1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33 
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36 			"Intel(R) Ethernet Connection XL710 Network Driver";
37 
38 #define DRV_KERN "-k"
39 
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 2
42 #define DRV_VERSION_BUILD 6
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 	     __stringify(DRV_VERSION_MINOR) "." \
45 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48 
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60 
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78 	/* required last entry */
79 	{0, }
80 };
81 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
82 
83 #define I40E_MAX_VF_COUNT 128
84 static int debug = -1;
85 module_param(debug, int, 0);
86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87 
88 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
89 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(DRV_VERSION);
92 
93 /**
94  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
95  * @hw:   pointer to the HW structure
96  * @mem:  ptr to mem struct to fill out
97  * @size: size of memory requested
98  * @alignment: what to align the allocation to
99  **/
100 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 			    u64 size, u32 alignment)
102 {
103 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
104 
105 	mem->size = ALIGN(size, alignment);
106 	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
107 				      &mem->pa, GFP_KERNEL);
108 	if (!mem->va)
109 		return -ENOMEM;
110 
111 	return 0;
112 }
113 
114 /**
115  * i40e_free_dma_mem_d - OS specific memory free for shared code
116  * @hw:   pointer to the HW structure
117  * @mem:  ptr to mem struct to free
118  **/
119 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
120 {
121 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
122 
123 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
124 	mem->va = NULL;
125 	mem->pa = 0;
126 	mem->size = 0;
127 
128 	return 0;
129 }
130 
131 /**
132  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
133  * @hw:   pointer to the HW structure
134  * @mem:  ptr to mem struct to fill out
135  * @size: size of memory requested
136  **/
137 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
138 			     u32 size)
139 {
140 	mem->size = size;
141 	mem->va = kzalloc(size, GFP_KERNEL);
142 
143 	if (!mem->va)
144 		return -ENOMEM;
145 
146 	return 0;
147 }
148 
149 /**
150  * i40e_free_virt_mem_d - OS specific memory free for shared code
151  * @hw:   pointer to the HW structure
152  * @mem:  ptr to mem struct to free
153  **/
154 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
155 {
156 	/* it's ok to kfree a NULL pointer */
157 	kfree(mem->va);
158 	mem->va = NULL;
159 	mem->size = 0;
160 
161 	return 0;
162 }
163 
164 /**
165  * i40e_get_lump - find a lump of free generic resource
166  * @pf: board private structure
167  * @pile: the pile of resource to search
168  * @needed: the number of items needed
169  * @id: an owner id to stick on the items assigned
170  *
171  * Returns the base item index of the lump, or negative for error
172  *
173  * The search_hint trick and lack of advanced fit-finding only work
174  * because we're highly likely to have all the same size lump requests.
175  * Linear search time and any fragmentation should be minimal.
176  **/
177 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
178 			 u16 needed, u16 id)
179 {
180 	int ret = -ENOMEM;
181 	int i, j;
182 
183 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
184 		dev_info(&pf->pdev->dev,
185 			 "param err: pile=%p needed=%d id=0x%04x\n",
186 			 pile, needed, id);
187 		return -EINVAL;
188 	}
189 
190 	/* start the linear search with an imperfect hint */
191 	i = pile->search_hint;
192 	while (i < pile->num_entries) {
193 		/* skip already allocated entries */
194 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
195 			i++;
196 			continue;
197 		}
198 
199 		/* do we have enough in this lump? */
200 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
201 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
202 				break;
203 		}
204 
205 		if (j == needed) {
206 			/* there was enough, so assign it to the requestor */
207 			for (j = 0; j < needed; j++)
208 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
209 			ret = i;
210 			pile->search_hint = i + j;
211 			break;
212 		} else {
213 			/* not enough, so skip over it and continue looking */
214 			i += j;
215 		}
216 	}
217 
218 	return ret;
219 }
220 
221 /**
222  * i40e_put_lump - return a lump of generic resource
223  * @pile: the pile of resource to search
224  * @index: the base item index
225  * @id: the owner id of the items assigned
226  *
227  * Returns the count of items in the lump
228  **/
229 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
230 {
231 	int valid_id = (id | I40E_PILE_VALID_BIT);
232 	int count = 0;
233 	int i;
234 
235 	if (!pile || index >= pile->num_entries)
236 		return -EINVAL;
237 
238 	for (i = index;
239 	     i < pile->num_entries && pile->list[i] == valid_id;
240 	     i++) {
241 		pile->list[i] = 0;
242 		count++;
243 	}
244 
245 	if (count && index < pile->search_hint)
246 		pile->search_hint = index;
247 
248 	return count;
249 }
250 
251 /**
252  * i40e_service_event_schedule - Schedule the service task to wake up
253  * @pf: board private structure
254  *
255  * If not already scheduled, this puts the task into the work queue
256  **/
257 static void i40e_service_event_schedule(struct i40e_pf *pf)
258 {
259 	if (!test_bit(__I40E_DOWN, &pf->state) &&
260 	    !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
261 	    !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
262 		schedule_work(&pf->service_task);
263 }
264 
265 /**
266  * i40e_tx_timeout - Respond to a Tx Hang
267  * @netdev: network interface device structure
268  *
269  * If any port has noticed a Tx timeout, it is likely that the whole
270  * device is munged, not just the one netdev port, so go for the full
271  * reset.
272  **/
273 #ifdef I40E_FCOE
274 void i40e_tx_timeout(struct net_device *netdev)
275 #else
276 static void i40e_tx_timeout(struct net_device *netdev)
277 #endif
278 {
279 	struct i40e_netdev_priv *np = netdev_priv(netdev);
280 	struct i40e_vsi *vsi = np->vsi;
281 	struct i40e_pf *pf = vsi->back;
282 
283 	pf->tx_timeout_count++;
284 
285 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
286 		pf->tx_timeout_recovery_level = 1;
287 	pf->tx_timeout_last_recovery = jiffies;
288 	netdev_info(netdev, "tx_timeout recovery level %d\n",
289 		    pf->tx_timeout_recovery_level);
290 
291 	switch (pf->tx_timeout_recovery_level) {
292 	case 0:
293 		/* disable and re-enable queues for the VSI */
294 		if (in_interrupt()) {
295 			set_bit(__I40E_REINIT_REQUESTED, &pf->state);
296 			set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
297 		} else {
298 			i40e_vsi_reinit_locked(vsi);
299 		}
300 		break;
301 	case 1:
302 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
303 		break;
304 	case 2:
305 		set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
306 		break;
307 	case 3:
308 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
309 		break;
310 	default:
311 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
312 		set_bit(__I40E_DOWN_REQUESTED, &pf->state);
313 		set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
314 		break;
315 	}
316 	i40e_service_event_schedule(pf);
317 	pf->tx_timeout_recovery_level++;
318 }
319 
320 /**
321  * i40e_release_rx_desc - Store the new tail and head values
322  * @rx_ring: ring to bump
323  * @val: new head index
324  **/
325 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
326 {
327 	rx_ring->next_to_use = val;
328 
329 	/* Force memory writes to complete before letting h/w
330 	 * know there are new descriptors to fetch.  (Only
331 	 * applicable for weak-ordered memory model archs,
332 	 * such as IA-64).
333 	 */
334 	wmb();
335 	writel(val, rx_ring->tail);
336 }
337 
338 /**
339  * i40e_get_vsi_stats_struct - Get System Network Statistics
340  * @vsi: the VSI we care about
341  *
342  * Returns the address of the device statistics structure.
343  * The statistics are actually updated from the service task.
344  **/
345 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
346 {
347 	return &vsi->net_stats;
348 }
349 
350 /**
351  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
352  * @netdev: network interface device structure
353  *
354  * Returns the address of the device statistics structure.
355  * The statistics are actually updated from the service task.
356  **/
357 #ifdef I40E_FCOE
358 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
359 					     struct net_device *netdev,
360 					     struct rtnl_link_stats64 *stats)
361 #else
362 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
363 					     struct net_device *netdev,
364 					     struct rtnl_link_stats64 *stats)
365 #endif
366 {
367 	struct i40e_netdev_priv *np = netdev_priv(netdev);
368 	struct i40e_ring *tx_ring, *rx_ring;
369 	struct i40e_vsi *vsi = np->vsi;
370 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
371 	int i;
372 
373 	if (test_bit(__I40E_DOWN, &vsi->state))
374 		return stats;
375 
376 	if (!vsi->tx_rings)
377 		return stats;
378 
379 	rcu_read_lock();
380 	for (i = 0; i < vsi->num_queue_pairs; i++) {
381 		u64 bytes, packets;
382 		unsigned int start;
383 
384 		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
385 		if (!tx_ring)
386 			continue;
387 
388 		do {
389 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
390 			packets = tx_ring->stats.packets;
391 			bytes   = tx_ring->stats.bytes;
392 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
393 
394 		stats->tx_packets += packets;
395 		stats->tx_bytes   += bytes;
396 		rx_ring = &tx_ring[1];
397 
398 		do {
399 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
400 			packets = rx_ring->stats.packets;
401 			bytes   = rx_ring->stats.bytes;
402 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
403 
404 		stats->rx_packets += packets;
405 		stats->rx_bytes   += bytes;
406 	}
407 	rcu_read_unlock();
408 
409 	/* following stats updated by i40e_watchdog_subtask() */
410 	stats->multicast	= vsi_stats->multicast;
411 	stats->tx_errors	= vsi_stats->tx_errors;
412 	stats->tx_dropped	= vsi_stats->tx_dropped;
413 	stats->rx_errors	= vsi_stats->rx_errors;
414 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
415 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
416 
417 	return stats;
418 }
419 
420 /**
421  * i40e_vsi_reset_stats - Resets all stats of the given vsi
422  * @vsi: the VSI to have its stats reset
423  **/
424 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
425 {
426 	struct rtnl_link_stats64 *ns;
427 	int i;
428 
429 	if (!vsi)
430 		return;
431 
432 	ns = i40e_get_vsi_stats_struct(vsi);
433 	memset(ns, 0, sizeof(*ns));
434 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
435 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
436 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
437 	if (vsi->rx_rings && vsi->rx_rings[0]) {
438 		for (i = 0; i < vsi->num_queue_pairs; i++) {
439 			memset(&vsi->rx_rings[i]->stats, 0 ,
440 			       sizeof(vsi->rx_rings[i]->stats));
441 			memset(&vsi->rx_rings[i]->rx_stats, 0 ,
442 			       sizeof(vsi->rx_rings[i]->rx_stats));
443 			memset(&vsi->tx_rings[i]->stats, 0 ,
444 			       sizeof(vsi->tx_rings[i]->stats));
445 			memset(&vsi->tx_rings[i]->tx_stats, 0,
446 			       sizeof(vsi->tx_rings[i]->tx_stats));
447 		}
448 	}
449 	vsi->stat_offsets_loaded = false;
450 }
451 
452 /**
453  * i40e_pf_reset_stats - Reset all of the stats for the given pf
454  * @pf: the PF to be reset
455  **/
456 void i40e_pf_reset_stats(struct i40e_pf *pf)
457 {
458 	int i;
459 
460 	memset(&pf->stats, 0, sizeof(pf->stats));
461 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
462 	pf->stat_offsets_loaded = false;
463 
464 	for (i = 0; i < I40E_MAX_VEB; i++) {
465 		if (pf->veb[i]) {
466 			memset(&pf->veb[i]->stats, 0,
467 			       sizeof(pf->veb[i]->stats));
468 			memset(&pf->veb[i]->stats_offsets, 0,
469 			       sizeof(pf->veb[i]->stats_offsets));
470 			pf->veb[i]->stat_offsets_loaded = false;
471 		}
472 	}
473 }
474 
475 /**
476  * i40e_stat_update48 - read and update a 48 bit stat from the chip
477  * @hw: ptr to the hardware info
478  * @hireg: the high 32 bit reg to read
479  * @loreg: the low 32 bit reg to read
480  * @offset_loaded: has the initial offset been loaded yet
481  * @offset: ptr to current offset value
482  * @stat: ptr to the stat
483  *
484  * Since the device stats are not reset at PFReset, they likely will not
485  * be zeroed when the driver starts.  We'll save the first values read
486  * and use them as offsets to be subtracted from the raw values in order
487  * to report stats that count from zero.  In the process, we also manage
488  * the potential roll-over.
489  **/
490 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
491 			       bool offset_loaded, u64 *offset, u64 *stat)
492 {
493 	u64 new_data;
494 
495 	if (hw->device_id == I40E_DEV_ID_QEMU) {
496 		new_data = rd32(hw, loreg);
497 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
498 	} else {
499 		new_data = rd64(hw, loreg);
500 	}
501 	if (!offset_loaded)
502 		*offset = new_data;
503 	if (likely(new_data >= *offset))
504 		*stat = new_data - *offset;
505 	else
506 		*stat = (new_data + ((u64)1 << 48)) - *offset;
507 	*stat &= 0xFFFFFFFFFFFFULL;
508 }
509 
510 /**
511  * i40e_stat_update32 - read and update a 32 bit stat from the chip
512  * @hw: ptr to the hardware info
513  * @reg: the hw reg to read
514  * @offset_loaded: has the initial offset been loaded yet
515  * @offset: ptr to current offset value
516  * @stat: ptr to the stat
517  **/
518 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
519 			       bool offset_loaded, u64 *offset, u64 *stat)
520 {
521 	u32 new_data;
522 
523 	new_data = rd32(hw, reg);
524 	if (!offset_loaded)
525 		*offset = new_data;
526 	if (likely(new_data >= *offset))
527 		*stat = (u32)(new_data - *offset);
528 	else
529 		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
530 }
531 
532 /**
533  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
534  * @vsi: the VSI to be updated
535  **/
536 void i40e_update_eth_stats(struct i40e_vsi *vsi)
537 {
538 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
539 	struct i40e_pf *pf = vsi->back;
540 	struct i40e_hw *hw = &pf->hw;
541 	struct i40e_eth_stats *oes;
542 	struct i40e_eth_stats *es;     /* device's eth stats */
543 
544 	es = &vsi->eth_stats;
545 	oes = &vsi->eth_stats_offsets;
546 
547 	/* Gather up the stats that the hw collects */
548 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
549 			   vsi->stat_offsets_loaded,
550 			   &oes->tx_errors, &es->tx_errors);
551 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
552 			   vsi->stat_offsets_loaded,
553 			   &oes->rx_discards, &es->rx_discards);
554 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
555 			   vsi->stat_offsets_loaded,
556 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
557 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
558 			   vsi->stat_offsets_loaded,
559 			   &oes->tx_errors, &es->tx_errors);
560 
561 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
562 			   I40E_GLV_GORCL(stat_idx),
563 			   vsi->stat_offsets_loaded,
564 			   &oes->rx_bytes, &es->rx_bytes);
565 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
566 			   I40E_GLV_UPRCL(stat_idx),
567 			   vsi->stat_offsets_loaded,
568 			   &oes->rx_unicast, &es->rx_unicast);
569 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
570 			   I40E_GLV_MPRCL(stat_idx),
571 			   vsi->stat_offsets_loaded,
572 			   &oes->rx_multicast, &es->rx_multicast);
573 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
574 			   I40E_GLV_BPRCL(stat_idx),
575 			   vsi->stat_offsets_loaded,
576 			   &oes->rx_broadcast, &es->rx_broadcast);
577 
578 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
579 			   I40E_GLV_GOTCL(stat_idx),
580 			   vsi->stat_offsets_loaded,
581 			   &oes->tx_bytes, &es->tx_bytes);
582 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
583 			   I40E_GLV_UPTCL(stat_idx),
584 			   vsi->stat_offsets_loaded,
585 			   &oes->tx_unicast, &es->tx_unicast);
586 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
587 			   I40E_GLV_MPTCL(stat_idx),
588 			   vsi->stat_offsets_loaded,
589 			   &oes->tx_multicast, &es->tx_multicast);
590 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
591 			   I40E_GLV_BPTCL(stat_idx),
592 			   vsi->stat_offsets_loaded,
593 			   &oes->tx_broadcast, &es->tx_broadcast);
594 	vsi->stat_offsets_loaded = true;
595 }
596 
597 /**
598  * i40e_update_veb_stats - Update Switch component statistics
599  * @veb: the VEB being updated
600  **/
601 static void i40e_update_veb_stats(struct i40e_veb *veb)
602 {
603 	struct i40e_pf *pf = veb->pf;
604 	struct i40e_hw *hw = &pf->hw;
605 	struct i40e_eth_stats *oes;
606 	struct i40e_eth_stats *es;     /* device's eth stats */
607 	int idx = 0;
608 
609 	idx = veb->stats_idx;
610 	es = &veb->stats;
611 	oes = &veb->stats_offsets;
612 
613 	/* Gather up the stats that the hw collects */
614 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
615 			   veb->stat_offsets_loaded,
616 			   &oes->tx_discards, &es->tx_discards);
617 	if (hw->revision_id > 0)
618 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
619 				   veb->stat_offsets_loaded,
620 				   &oes->rx_unknown_protocol,
621 				   &es->rx_unknown_protocol);
622 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
623 			   veb->stat_offsets_loaded,
624 			   &oes->rx_bytes, &es->rx_bytes);
625 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
626 			   veb->stat_offsets_loaded,
627 			   &oes->rx_unicast, &es->rx_unicast);
628 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
629 			   veb->stat_offsets_loaded,
630 			   &oes->rx_multicast, &es->rx_multicast);
631 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
632 			   veb->stat_offsets_loaded,
633 			   &oes->rx_broadcast, &es->rx_broadcast);
634 
635 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
636 			   veb->stat_offsets_loaded,
637 			   &oes->tx_bytes, &es->tx_bytes);
638 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
639 			   veb->stat_offsets_loaded,
640 			   &oes->tx_unicast, &es->tx_unicast);
641 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
642 			   veb->stat_offsets_loaded,
643 			   &oes->tx_multicast, &es->tx_multicast);
644 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
645 			   veb->stat_offsets_loaded,
646 			   &oes->tx_broadcast, &es->tx_broadcast);
647 	veb->stat_offsets_loaded = true;
648 }
649 
650 #ifdef I40E_FCOE
651 /**
652  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
653  * @vsi: the VSI that is capable of doing FCoE
654  **/
655 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
656 {
657 	struct i40e_pf *pf = vsi->back;
658 	struct i40e_hw *hw = &pf->hw;
659 	struct i40e_fcoe_stats *ofs;
660 	struct i40e_fcoe_stats *fs;     /* device's eth stats */
661 	int idx;
662 
663 	if (vsi->type != I40E_VSI_FCOE)
664 		return;
665 
666 	idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
667 	fs = &vsi->fcoe_stats;
668 	ofs = &vsi->fcoe_stats_offsets;
669 
670 	i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
671 			   vsi->fcoe_stat_offsets_loaded,
672 			   &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
673 	i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
674 			   vsi->fcoe_stat_offsets_loaded,
675 			   &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
676 	i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
677 			   vsi->fcoe_stat_offsets_loaded,
678 			   &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
679 	i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
680 			   vsi->fcoe_stat_offsets_loaded,
681 			   &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
682 	i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
683 			   vsi->fcoe_stat_offsets_loaded,
684 			   &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
685 	i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
686 			   vsi->fcoe_stat_offsets_loaded,
687 			   &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
688 	i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
689 			   vsi->fcoe_stat_offsets_loaded,
690 			   &ofs->fcoe_last_error, &fs->fcoe_last_error);
691 	i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
692 			   vsi->fcoe_stat_offsets_loaded,
693 			   &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
694 
695 	vsi->fcoe_stat_offsets_loaded = true;
696 }
697 
698 #endif
699 /**
700  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
701  * @pf: the corresponding PF
702  *
703  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
704  **/
705 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
706 {
707 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
708 	struct i40e_hw_port_stats *nsd = &pf->stats;
709 	struct i40e_hw *hw = &pf->hw;
710 	u64 xoff = 0;
711 	u16 i, v;
712 
713 	if ((hw->fc.current_mode != I40E_FC_FULL) &&
714 	    (hw->fc.current_mode != I40E_FC_RX_PAUSE))
715 		return;
716 
717 	xoff = nsd->link_xoff_rx;
718 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
719 			   pf->stat_offsets_loaded,
720 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
721 
722 	/* No new LFC xoff rx */
723 	if (!(nsd->link_xoff_rx - xoff))
724 		return;
725 
726 	/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
727 	for (v = 0; v < pf->num_alloc_vsi; v++) {
728 		struct i40e_vsi *vsi = pf->vsi[v];
729 
730 		if (!vsi || !vsi->tx_rings[0])
731 			continue;
732 
733 		for (i = 0; i < vsi->num_queue_pairs; i++) {
734 			struct i40e_ring *ring = vsi->tx_rings[i];
735 			clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
736 		}
737 	}
738 }
739 
740 /**
741  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
742  * @pf: the corresponding PF
743  *
744  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
745  **/
746 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
747 {
748 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
749 	struct i40e_hw_port_stats *nsd = &pf->stats;
750 	bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
751 	struct i40e_dcbx_config *dcb_cfg;
752 	struct i40e_hw *hw = &pf->hw;
753 	u16 i, v;
754 	u8 tc;
755 
756 	dcb_cfg = &hw->local_dcbx_config;
757 
758 	/* See if DCB enabled with PFC TC */
759 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
760 	    !(dcb_cfg->pfc.pfcenable)) {
761 		i40e_update_link_xoff_rx(pf);
762 		return;
763 	}
764 
765 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
766 		u64 prio_xoff = nsd->priority_xoff_rx[i];
767 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
768 				   pf->stat_offsets_loaded,
769 				   &osd->priority_xoff_rx[i],
770 				   &nsd->priority_xoff_rx[i]);
771 
772 		/* No new PFC xoff rx */
773 		if (!(nsd->priority_xoff_rx[i] - prio_xoff))
774 			continue;
775 		/* Get the TC for given priority */
776 		tc = dcb_cfg->etscfg.prioritytable[i];
777 		xoff[tc] = true;
778 	}
779 
780 	/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
781 	for (v = 0; v < pf->num_alloc_vsi; v++) {
782 		struct i40e_vsi *vsi = pf->vsi[v];
783 
784 		if (!vsi || !vsi->tx_rings[0])
785 			continue;
786 
787 		for (i = 0; i < vsi->num_queue_pairs; i++) {
788 			struct i40e_ring *ring = vsi->tx_rings[i];
789 
790 			tc = ring->dcb_tc;
791 			if (xoff[tc])
792 				clear_bit(__I40E_HANG_CHECK_ARMED,
793 					  &ring->state);
794 		}
795 	}
796 }
797 
798 /**
799  * i40e_update_vsi_stats - Update the vsi statistics counters.
800  * @vsi: the VSI to be updated
801  *
802  * There are a few instances where we store the same stat in a
803  * couple of different structs.  This is partly because we have
804  * the netdev stats that need to be filled out, which is slightly
805  * different from the "eth_stats" defined by the chip and used in
806  * VF communications.  We sort it out here.
807  **/
808 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
809 {
810 	struct i40e_pf *pf = vsi->back;
811 	struct rtnl_link_stats64 *ons;
812 	struct rtnl_link_stats64 *ns;   /* netdev stats */
813 	struct i40e_eth_stats *oes;
814 	struct i40e_eth_stats *es;     /* device's eth stats */
815 	u32 tx_restart, tx_busy;
816 	struct i40e_ring *p;
817 	u32 rx_page, rx_buf;
818 	u64 bytes, packets;
819 	unsigned int start;
820 	u64 rx_p, rx_b;
821 	u64 tx_p, tx_b;
822 	u16 q;
823 
824 	if (test_bit(__I40E_DOWN, &vsi->state) ||
825 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
826 		return;
827 
828 	ns = i40e_get_vsi_stats_struct(vsi);
829 	ons = &vsi->net_stats_offsets;
830 	es = &vsi->eth_stats;
831 	oes = &vsi->eth_stats_offsets;
832 
833 	/* Gather up the netdev and vsi stats that the driver collects
834 	 * on the fly during packet processing
835 	 */
836 	rx_b = rx_p = 0;
837 	tx_b = tx_p = 0;
838 	tx_restart = tx_busy = 0;
839 	rx_page = 0;
840 	rx_buf = 0;
841 	rcu_read_lock();
842 	for (q = 0; q < vsi->num_queue_pairs; q++) {
843 		/* locate Tx ring */
844 		p = ACCESS_ONCE(vsi->tx_rings[q]);
845 
846 		do {
847 			start = u64_stats_fetch_begin_irq(&p->syncp);
848 			packets = p->stats.packets;
849 			bytes = p->stats.bytes;
850 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
851 		tx_b += bytes;
852 		tx_p += packets;
853 		tx_restart += p->tx_stats.restart_queue;
854 		tx_busy += p->tx_stats.tx_busy;
855 
856 		/* Rx queue is part of the same block as Tx queue */
857 		p = &p[1];
858 		do {
859 			start = u64_stats_fetch_begin_irq(&p->syncp);
860 			packets = p->stats.packets;
861 			bytes = p->stats.bytes;
862 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
863 		rx_b += bytes;
864 		rx_p += packets;
865 		rx_buf += p->rx_stats.alloc_buff_failed;
866 		rx_page += p->rx_stats.alloc_page_failed;
867 	}
868 	rcu_read_unlock();
869 	vsi->tx_restart = tx_restart;
870 	vsi->tx_busy = tx_busy;
871 	vsi->rx_page_failed = rx_page;
872 	vsi->rx_buf_failed = rx_buf;
873 
874 	ns->rx_packets = rx_p;
875 	ns->rx_bytes = rx_b;
876 	ns->tx_packets = tx_p;
877 	ns->tx_bytes = tx_b;
878 
879 	/* update netdev stats from eth stats */
880 	i40e_update_eth_stats(vsi);
881 	ons->tx_errors = oes->tx_errors;
882 	ns->tx_errors = es->tx_errors;
883 	ons->multicast = oes->rx_multicast;
884 	ns->multicast = es->rx_multicast;
885 	ons->rx_dropped = oes->rx_discards;
886 	ns->rx_dropped = es->rx_discards;
887 	ons->tx_dropped = oes->tx_discards;
888 	ns->tx_dropped = es->tx_discards;
889 
890 	/* pull in a couple PF stats if this is the main vsi */
891 	if (vsi == pf->vsi[pf->lan_vsi]) {
892 		ns->rx_crc_errors = pf->stats.crc_errors;
893 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
894 		ns->rx_length_errors = pf->stats.rx_length_errors;
895 	}
896 }
897 
898 /**
899  * i40e_update_pf_stats - Update the pf statistics counters.
900  * @pf: the PF to be updated
901  **/
902 static void i40e_update_pf_stats(struct i40e_pf *pf)
903 {
904 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
905 	struct i40e_hw_port_stats *nsd = &pf->stats;
906 	struct i40e_hw *hw = &pf->hw;
907 	u32 val;
908 	int i;
909 
910 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
911 			   I40E_GLPRT_GORCL(hw->port),
912 			   pf->stat_offsets_loaded,
913 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
914 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
915 			   I40E_GLPRT_GOTCL(hw->port),
916 			   pf->stat_offsets_loaded,
917 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
918 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
919 			   pf->stat_offsets_loaded,
920 			   &osd->eth.rx_discards,
921 			   &nsd->eth.rx_discards);
922 	i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
923 			   pf->stat_offsets_loaded,
924 			   &osd->eth.tx_discards,
925 			   &nsd->eth.tx_discards);
926 
927 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
928 			   I40E_GLPRT_UPRCL(hw->port),
929 			   pf->stat_offsets_loaded,
930 			   &osd->eth.rx_unicast,
931 			   &nsd->eth.rx_unicast);
932 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
933 			   I40E_GLPRT_MPRCL(hw->port),
934 			   pf->stat_offsets_loaded,
935 			   &osd->eth.rx_multicast,
936 			   &nsd->eth.rx_multicast);
937 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
938 			   I40E_GLPRT_BPRCL(hw->port),
939 			   pf->stat_offsets_loaded,
940 			   &osd->eth.rx_broadcast,
941 			   &nsd->eth.rx_broadcast);
942 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
943 			   I40E_GLPRT_UPTCL(hw->port),
944 			   pf->stat_offsets_loaded,
945 			   &osd->eth.tx_unicast,
946 			   &nsd->eth.tx_unicast);
947 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
948 			   I40E_GLPRT_MPTCL(hw->port),
949 			   pf->stat_offsets_loaded,
950 			   &osd->eth.tx_multicast,
951 			   &nsd->eth.tx_multicast);
952 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
953 			   I40E_GLPRT_BPTCL(hw->port),
954 			   pf->stat_offsets_loaded,
955 			   &osd->eth.tx_broadcast,
956 			   &nsd->eth.tx_broadcast);
957 
958 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
959 			   pf->stat_offsets_loaded,
960 			   &osd->tx_dropped_link_down,
961 			   &nsd->tx_dropped_link_down);
962 
963 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
964 			   pf->stat_offsets_loaded,
965 			   &osd->crc_errors, &nsd->crc_errors);
966 
967 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
968 			   pf->stat_offsets_loaded,
969 			   &osd->illegal_bytes, &nsd->illegal_bytes);
970 
971 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
972 			   pf->stat_offsets_loaded,
973 			   &osd->mac_local_faults,
974 			   &nsd->mac_local_faults);
975 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
976 			   pf->stat_offsets_loaded,
977 			   &osd->mac_remote_faults,
978 			   &nsd->mac_remote_faults);
979 
980 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
981 			   pf->stat_offsets_loaded,
982 			   &osd->rx_length_errors,
983 			   &nsd->rx_length_errors);
984 
985 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
986 			   pf->stat_offsets_loaded,
987 			   &osd->link_xon_rx, &nsd->link_xon_rx);
988 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
989 			   pf->stat_offsets_loaded,
990 			   &osd->link_xon_tx, &nsd->link_xon_tx);
991 	i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
992 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
993 			   pf->stat_offsets_loaded,
994 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
995 
996 	for (i = 0; i < 8; i++) {
997 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
998 				   pf->stat_offsets_loaded,
999 				   &osd->priority_xon_rx[i],
1000 				   &nsd->priority_xon_rx[i]);
1001 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1002 				   pf->stat_offsets_loaded,
1003 				   &osd->priority_xon_tx[i],
1004 				   &nsd->priority_xon_tx[i]);
1005 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1006 				   pf->stat_offsets_loaded,
1007 				   &osd->priority_xoff_tx[i],
1008 				   &nsd->priority_xoff_tx[i]);
1009 		i40e_stat_update32(hw,
1010 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1011 				   pf->stat_offsets_loaded,
1012 				   &osd->priority_xon_2_xoff[i],
1013 				   &nsd->priority_xon_2_xoff[i]);
1014 	}
1015 
1016 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1017 			   I40E_GLPRT_PRC64L(hw->port),
1018 			   pf->stat_offsets_loaded,
1019 			   &osd->rx_size_64, &nsd->rx_size_64);
1020 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1021 			   I40E_GLPRT_PRC127L(hw->port),
1022 			   pf->stat_offsets_loaded,
1023 			   &osd->rx_size_127, &nsd->rx_size_127);
1024 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1025 			   I40E_GLPRT_PRC255L(hw->port),
1026 			   pf->stat_offsets_loaded,
1027 			   &osd->rx_size_255, &nsd->rx_size_255);
1028 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1029 			   I40E_GLPRT_PRC511L(hw->port),
1030 			   pf->stat_offsets_loaded,
1031 			   &osd->rx_size_511, &nsd->rx_size_511);
1032 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1033 			   I40E_GLPRT_PRC1023L(hw->port),
1034 			   pf->stat_offsets_loaded,
1035 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1036 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1037 			   I40E_GLPRT_PRC1522L(hw->port),
1038 			   pf->stat_offsets_loaded,
1039 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1040 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1041 			   I40E_GLPRT_PRC9522L(hw->port),
1042 			   pf->stat_offsets_loaded,
1043 			   &osd->rx_size_big, &nsd->rx_size_big);
1044 
1045 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1046 			   I40E_GLPRT_PTC64L(hw->port),
1047 			   pf->stat_offsets_loaded,
1048 			   &osd->tx_size_64, &nsd->tx_size_64);
1049 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1050 			   I40E_GLPRT_PTC127L(hw->port),
1051 			   pf->stat_offsets_loaded,
1052 			   &osd->tx_size_127, &nsd->tx_size_127);
1053 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1054 			   I40E_GLPRT_PTC255L(hw->port),
1055 			   pf->stat_offsets_loaded,
1056 			   &osd->tx_size_255, &nsd->tx_size_255);
1057 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1058 			   I40E_GLPRT_PTC511L(hw->port),
1059 			   pf->stat_offsets_loaded,
1060 			   &osd->tx_size_511, &nsd->tx_size_511);
1061 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1062 			   I40E_GLPRT_PTC1023L(hw->port),
1063 			   pf->stat_offsets_loaded,
1064 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1065 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1066 			   I40E_GLPRT_PTC1522L(hw->port),
1067 			   pf->stat_offsets_loaded,
1068 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1069 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1070 			   I40E_GLPRT_PTC9522L(hw->port),
1071 			   pf->stat_offsets_loaded,
1072 			   &osd->tx_size_big, &nsd->tx_size_big);
1073 
1074 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1075 			   pf->stat_offsets_loaded,
1076 			   &osd->rx_undersize, &nsd->rx_undersize);
1077 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1078 			   pf->stat_offsets_loaded,
1079 			   &osd->rx_fragments, &nsd->rx_fragments);
1080 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1081 			   pf->stat_offsets_loaded,
1082 			   &osd->rx_oversize, &nsd->rx_oversize);
1083 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1084 			   pf->stat_offsets_loaded,
1085 			   &osd->rx_jabber, &nsd->rx_jabber);
1086 
1087 	/* FDIR stats */
1088 	i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1089 			   pf->stat_offsets_loaded,
1090 			   &osd->fd_atr_match, &nsd->fd_atr_match);
1091 	i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1092 			   pf->stat_offsets_loaded,
1093 			   &osd->fd_sb_match, &nsd->fd_sb_match);
1094 
1095 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 	nsd->tx_lpi_status =
1097 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 	nsd->rx_lpi_status =
1100 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 			   pf->stat_offsets_loaded,
1104 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 			   pf->stat_offsets_loaded,
1107 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108 
1109 	pf->stat_offsets_loaded = true;
1110 }
1111 
1112 /**
1113  * i40e_update_stats - Update the various statistics counters.
1114  * @vsi: the VSI to be updated
1115  *
1116  * Update the various stats for this VSI and its related entities.
1117  **/
1118 void i40e_update_stats(struct i40e_vsi *vsi)
1119 {
1120 	struct i40e_pf *pf = vsi->back;
1121 
1122 	if (vsi == pf->vsi[pf->lan_vsi])
1123 		i40e_update_pf_stats(pf);
1124 
1125 	i40e_update_vsi_stats(vsi);
1126 #ifdef I40E_FCOE
1127 	i40e_update_fcoe_stats(vsi);
1128 #endif
1129 }
1130 
1131 /**
1132  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1133  * @vsi: the VSI to be searched
1134  * @macaddr: the MAC address
1135  * @vlan: the vlan
1136  * @is_vf: make sure its a vf filter, else doesn't matter
1137  * @is_netdev: make sure its a netdev filter, else doesn't matter
1138  *
1139  * Returns ptr to the filter object or NULL
1140  **/
1141 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1142 						u8 *macaddr, s16 vlan,
1143 						bool is_vf, bool is_netdev)
1144 {
1145 	struct i40e_mac_filter *f;
1146 
1147 	if (!vsi || !macaddr)
1148 		return NULL;
1149 
1150 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1151 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1152 		    (vlan == f->vlan)    &&
1153 		    (!is_vf || f->is_vf) &&
1154 		    (!is_netdev || f->is_netdev))
1155 			return f;
1156 	}
1157 	return NULL;
1158 }
1159 
1160 /**
1161  * i40e_find_mac - Find a mac addr in the macvlan filters list
1162  * @vsi: the VSI to be searched
1163  * @macaddr: the MAC address we are searching for
1164  * @is_vf: make sure its a vf filter, else doesn't matter
1165  * @is_netdev: make sure its a netdev filter, else doesn't matter
1166  *
1167  * Returns the first filter with the provided MAC address or NULL if
1168  * MAC address was not found
1169  **/
1170 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1171 				      bool is_vf, bool is_netdev)
1172 {
1173 	struct i40e_mac_filter *f;
1174 
1175 	if (!vsi || !macaddr)
1176 		return NULL;
1177 
1178 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1179 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1180 		    (!is_vf || f->is_vf) &&
1181 		    (!is_netdev || f->is_netdev))
1182 			return f;
1183 	}
1184 	return NULL;
1185 }
1186 
1187 /**
1188  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1189  * @vsi: the VSI to be searched
1190  *
1191  * Returns true if VSI is in vlan mode or false otherwise
1192  **/
1193 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1194 {
1195 	struct i40e_mac_filter *f;
1196 
1197 	/* Only -1 for all the filters denotes not in vlan mode
1198 	 * so we have to go through all the list in order to make sure
1199 	 */
1200 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1201 		if (f->vlan >= 0)
1202 			return true;
1203 	}
1204 
1205 	return false;
1206 }
1207 
1208 /**
1209  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1210  * @vsi: the VSI to be searched
1211  * @macaddr: the mac address to be filtered
1212  * @is_vf: true if it is a vf
1213  * @is_netdev: true if it is a netdev
1214  *
1215  * Goes through all the macvlan filters and adds a
1216  * macvlan filter for each unique vlan that already exists
1217  *
1218  * Returns first filter found on success, else NULL
1219  **/
1220 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1221 					     bool is_vf, bool is_netdev)
1222 {
1223 	struct i40e_mac_filter *f;
1224 
1225 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1226 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
1227 				      is_vf, is_netdev)) {
1228 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
1229 					     is_vf, is_netdev))
1230 				return NULL;
1231 		}
1232 	}
1233 
1234 	return list_first_entry_or_null(&vsi->mac_filter_list,
1235 					struct i40e_mac_filter, list);
1236 }
1237 
1238 /**
1239  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1240  * @vsi: the PF Main VSI - inappropriate for any other VSI
1241  * @macaddr: the MAC address
1242  *
1243  * Some older firmware configurations set up a default promiscuous VLAN
1244  * filter that needs to be removed.
1245  **/
1246 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1247 {
1248 	struct i40e_aqc_remove_macvlan_element_data element;
1249 	struct i40e_pf *pf = vsi->back;
1250 	i40e_status aq_ret;
1251 
1252 	/* Only appropriate for the PF main VSI */
1253 	if (vsi->type != I40E_VSI_MAIN)
1254 		return -EINVAL;
1255 
1256 	memset(&element, 0, sizeof(element));
1257 	ether_addr_copy(element.mac_addr, macaddr);
1258 	element.vlan_tag = 0;
1259 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1260 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1261 	aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1262 	if (aq_ret)
1263 		return -ENOENT;
1264 
1265 	return 0;
1266 }
1267 
1268 /**
1269  * i40e_add_filter - Add a mac/vlan filter to the VSI
1270  * @vsi: the VSI to be searched
1271  * @macaddr: the MAC address
1272  * @vlan: the vlan
1273  * @is_vf: make sure its a vf filter, else doesn't matter
1274  * @is_netdev: make sure its a netdev filter, else doesn't matter
1275  *
1276  * Returns ptr to the filter object or NULL when no memory available.
1277  **/
1278 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1279 					u8 *macaddr, s16 vlan,
1280 					bool is_vf, bool is_netdev)
1281 {
1282 	struct i40e_mac_filter *f;
1283 
1284 	if (!vsi || !macaddr)
1285 		return NULL;
1286 
1287 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1288 	if (!f) {
1289 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1290 		if (!f)
1291 			goto add_filter_out;
1292 
1293 		ether_addr_copy(f->macaddr, macaddr);
1294 		f->vlan = vlan;
1295 		f->changed = true;
1296 
1297 		INIT_LIST_HEAD(&f->list);
1298 		list_add(&f->list, &vsi->mac_filter_list);
1299 	}
1300 
1301 	/* increment counter and add a new flag if needed */
1302 	if (is_vf) {
1303 		if (!f->is_vf) {
1304 			f->is_vf = true;
1305 			f->counter++;
1306 		}
1307 	} else if (is_netdev) {
1308 		if (!f->is_netdev) {
1309 			f->is_netdev = true;
1310 			f->counter++;
1311 		}
1312 	} else {
1313 		f->counter++;
1314 	}
1315 
1316 	/* changed tells sync_filters_subtask to
1317 	 * push the filter down to the firmware
1318 	 */
1319 	if (f->changed) {
1320 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1321 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1322 	}
1323 
1324 add_filter_out:
1325 	return f;
1326 }
1327 
1328 /**
1329  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1330  * @vsi: the VSI to be searched
1331  * @macaddr: the MAC address
1332  * @vlan: the vlan
1333  * @is_vf: make sure it's a vf filter, else doesn't matter
1334  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1335  **/
1336 void i40e_del_filter(struct i40e_vsi *vsi,
1337 		     u8 *macaddr, s16 vlan,
1338 		     bool is_vf, bool is_netdev)
1339 {
1340 	struct i40e_mac_filter *f;
1341 
1342 	if (!vsi || !macaddr)
1343 		return;
1344 
1345 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1346 	if (!f || f->counter == 0)
1347 		return;
1348 
1349 	if (is_vf) {
1350 		if (f->is_vf) {
1351 			f->is_vf = false;
1352 			f->counter--;
1353 		}
1354 	} else if (is_netdev) {
1355 		if (f->is_netdev) {
1356 			f->is_netdev = false;
1357 			f->counter--;
1358 		}
1359 	} else {
1360 		/* make sure we don't remove a filter in use by vf or netdev */
1361 		int min_f = 0;
1362 		min_f += (f->is_vf ? 1 : 0);
1363 		min_f += (f->is_netdev ? 1 : 0);
1364 
1365 		if (f->counter > min_f)
1366 			f->counter--;
1367 	}
1368 
1369 	/* counter == 0 tells sync_filters_subtask to
1370 	 * remove the filter from the firmware's list
1371 	 */
1372 	if (f->counter == 0) {
1373 		f->changed = true;
1374 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1375 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1376 	}
1377 }
1378 
1379 /**
1380  * i40e_set_mac - NDO callback to set mac address
1381  * @netdev: network interface device structure
1382  * @p: pointer to an address structure
1383  *
1384  * Returns 0 on success, negative on failure
1385  **/
1386 #ifdef I40E_FCOE
1387 int i40e_set_mac(struct net_device *netdev, void *p)
1388 #else
1389 static int i40e_set_mac(struct net_device *netdev, void *p)
1390 #endif
1391 {
1392 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1393 	struct i40e_vsi *vsi = np->vsi;
1394 	struct i40e_pf *pf = vsi->back;
1395 	struct i40e_hw *hw = &pf->hw;
1396 	struct sockaddr *addr = p;
1397 	struct i40e_mac_filter *f;
1398 
1399 	if (!is_valid_ether_addr(addr->sa_data))
1400 		return -EADDRNOTAVAIL;
1401 
1402 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1403 		netdev_info(netdev, "already using mac address %pM\n",
1404 			    addr->sa_data);
1405 		return 0;
1406 	}
1407 
1408 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1409 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1410 		return -EADDRNOTAVAIL;
1411 
1412 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1413 		netdev_info(netdev, "returning to hw mac address %pM\n",
1414 			    hw->mac.addr);
1415 	else
1416 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1417 
1418 	if (vsi->type == I40E_VSI_MAIN) {
1419 		i40e_status ret;
1420 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
1421 						I40E_AQC_WRITE_TYPE_LAA_WOL,
1422 						addr->sa_data, NULL);
1423 		if (ret) {
1424 			netdev_info(netdev,
1425 				    "Addr change for Main VSI failed: %d\n",
1426 				    ret);
1427 			return -EADDRNOTAVAIL;
1428 		}
1429 	}
1430 
1431 	if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1432 		struct i40e_aqc_remove_macvlan_element_data element;
1433 
1434 		memset(&element, 0, sizeof(element));
1435 		ether_addr_copy(element.mac_addr, netdev->dev_addr);
1436 		element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1437 		i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1438 	} else {
1439 		i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1440 				false, false);
1441 	}
1442 
1443 	if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1444 		struct i40e_aqc_add_macvlan_element_data element;
1445 
1446 		memset(&element, 0, sizeof(element));
1447 		ether_addr_copy(element.mac_addr, hw->mac.addr);
1448 		element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1449 		i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450 	} else {
1451 		f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1452 				    false, false);
1453 		if (f)
1454 			f->is_laa = true;
1455 	}
1456 
1457 	i40e_sync_vsi_filters(vsi);
1458 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1465  * @vsi: the VSI being setup
1466  * @ctxt: VSI context structure
1467  * @enabled_tc: Enabled TCs bitmap
1468  * @is_add: True if called before Add VSI
1469  *
1470  * Setup VSI queue mapping for enabled traffic classes.
1471  **/
1472 #ifdef I40E_FCOE
1473 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1474 			      struct i40e_vsi_context *ctxt,
1475 			      u8 enabled_tc,
1476 			      bool is_add)
1477 #else
1478 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1479 				     struct i40e_vsi_context *ctxt,
1480 				     u8 enabled_tc,
1481 				     bool is_add)
1482 #endif
1483 {
1484 	struct i40e_pf *pf = vsi->back;
1485 	u16 sections = 0;
1486 	u8 netdev_tc = 0;
1487 	u16 numtc = 0;
1488 	u16 qcount;
1489 	u8 offset;
1490 	u16 qmap;
1491 	int i;
1492 	u16 num_tc_qps = 0;
1493 
1494 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1495 	offset = 0;
1496 
1497 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1498 		/* Find numtc from enabled TC bitmap */
1499 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1500 			if (enabled_tc & (1 << i)) /* TC is enabled */
1501 				numtc++;
1502 		}
1503 		if (!numtc) {
1504 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1505 			numtc = 1;
1506 		}
1507 	} else {
1508 		/* At least TC0 is enabled in case of non-DCB case */
1509 		numtc = 1;
1510 	}
1511 
1512 	vsi->tc_config.numtc = numtc;
1513 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1514 	/* Number of queues per enabled TC */
1515 	/* In MFP case we can have a much lower count of MSIx
1516 	 * vectors available and so we need to lower the used
1517 	 * q count.
1518 	 */
1519 	qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1520 	num_tc_qps = qcount / numtc;
1521 	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1522 
1523 	/* Setup queue offset/count for all TCs for given VSI */
1524 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1525 		/* See if the given TC is enabled for the given VSI */
1526 		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1527 			int pow, num_qps;
1528 
1529 			switch (vsi->type) {
1530 			case I40E_VSI_MAIN:
1531 				qcount = min_t(int, pf->rss_size, num_tc_qps);
1532 				break;
1533 #ifdef I40E_FCOE
1534 			case I40E_VSI_FCOE:
1535 				qcount = num_tc_qps;
1536 				break;
1537 #endif
1538 			case I40E_VSI_FDIR:
1539 			case I40E_VSI_SRIOV:
1540 			case I40E_VSI_VMDQ2:
1541 			default:
1542 				qcount = num_tc_qps;
1543 				WARN_ON(i != 0);
1544 				break;
1545 			}
1546 			vsi->tc_config.tc_info[i].qoffset = offset;
1547 			vsi->tc_config.tc_info[i].qcount = qcount;
1548 
1549 			/* find the power-of-2 of the number of queue pairs */
1550 			num_qps = qcount;
1551 			pow = 0;
1552 			while (num_qps && ((1 << pow) < qcount)) {
1553 				pow++;
1554 				num_qps >>= 1;
1555 			}
1556 
1557 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1558 			qmap =
1559 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1560 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1561 
1562 			offset += qcount;
1563 		} else {
1564 			/* TC is not enabled so set the offset to
1565 			 * default queue and allocate one queue
1566 			 * for the given TC.
1567 			 */
1568 			vsi->tc_config.tc_info[i].qoffset = 0;
1569 			vsi->tc_config.tc_info[i].qcount = 1;
1570 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1571 
1572 			qmap = 0;
1573 		}
1574 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1575 	}
1576 
1577 	/* Set actual Tx/Rx queue pairs */
1578 	vsi->num_queue_pairs = offset;
1579 
1580 	/* Scheduler section valid can only be set for ADD VSI */
1581 	if (is_add) {
1582 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1583 
1584 		ctxt->info.up_enable_bits = enabled_tc;
1585 	}
1586 	if (vsi->type == I40E_VSI_SRIOV) {
1587 		ctxt->info.mapping_flags |=
1588 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1589 		for (i = 0; i < vsi->num_queue_pairs; i++)
1590 			ctxt->info.queue_mapping[i] =
1591 					       cpu_to_le16(vsi->base_queue + i);
1592 	} else {
1593 		ctxt->info.mapping_flags |=
1594 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1595 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1596 	}
1597 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1598 }
1599 
1600 /**
1601  * i40e_set_rx_mode - NDO callback to set the netdev filters
1602  * @netdev: network interface device structure
1603  **/
1604 #ifdef I40E_FCOE
1605 void i40e_set_rx_mode(struct net_device *netdev)
1606 #else
1607 static void i40e_set_rx_mode(struct net_device *netdev)
1608 #endif
1609 {
1610 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1611 	struct i40e_mac_filter *f, *ftmp;
1612 	struct i40e_vsi *vsi = np->vsi;
1613 	struct netdev_hw_addr *uca;
1614 	struct netdev_hw_addr *mca;
1615 	struct netdev_hw_addr *ha;
1616 
1617 	/* add addr if not already in the filter list */
1618 	netdev_for_each_uc_addr(uca, netdev) {
1619 		if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1620 			if (i40e_is_vsi_in_vlan(vsi))
1621 				i40e_put_mac_in_vlan(vsi, uca->addr,
1622 						     false, true);
1623 			else
1624 				i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1625 						false, true);
1626 		}
1627 	}
1628 
1629 	netdev_for_each_mc_addr(mca, netdev) {
1630 		if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1631 			if (i40e_is_vsi_in_vlan(vsi))
1632 				i40e_put_mac_in_vlan(vsi, mca->addr,
1633 						     false, true);
1634 			else
1635 				i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1636 						false, true);
1637 		}
1638 	}
1639 
1640 	/* remove filter if not in netdev list */
1641 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1642 		bool found = false;
1643 
1644 		if (!f->is_netdev)
1645 			continue;
1646 
1647 		if (is_multicast_ether_addr(f->macaddr)) {
1648 			netdev_for_each_mc_addr(mca, netdev) {
1649 				if (ether_addr_equal(mca->addr, f->macaddr)) {
1650 					found = true;
1651 					break;
1652 				}
1653 			}
1654 		} else {
1655 			netdev_for_each_uc_addr(uca, netdev) {
1656 				if (ether_addr_equal(uca->addr, f->macaddr)) {
1657 					found = true;
1658 					break;
1659 				}
1660 			}
1661 
1662 			for_each_dev_addr(netdev, ha) {
1663 				if (ether_addr_equal(ha->addr, f->macaddr)) {
1664 					found = true;
1665 					break;
1666 				}
1667 			}
1668 		}
1669 		if (!found)
1670 			i40e_del_filter(
1671 			   vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1672 	}
1673 
1674 	/* check for other flag changes */
1675 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
1676 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1677 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1678 	}
1679 }
1680 
1681 /**
1682  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1683  * @vsi: ptr to the VSI
1684  *
1685  * Push any outstanding VSI filter changes through the AdminQ.
1686  *
1687  * Returns 0 or error value
1688  **/
1689 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1690 {
1691 	struct i40e_mac_filter *f, *ftmp;
1692 	bool promisc_forced_on = false;
1693 	bool add_happened = false;
1694 	int filter_list_len = 0;
1695 	u32 changed_flags = 0;
1696 	i40e_status aq_ret = 0;
1697 	struct i40e_pf *pf;
1698 	int num_add = 0;
1699 	int num_del = 0;
1700 	u16 cmd_flags;
1701 
1702 	/* empty array typed pointers, kcalloc later */
1703 	struct i40e_aqc_add_macvlan_element_data *add_list;
1704 	struct i40e_aqc_remove_macvlan_element_data *del_list;
1705 
1706 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1707 		usleep_range(1000, 2000);
1708 	pf = vsi->back;
1709 
1710 	if (vsi->netdev) {
1711 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1712 		vsi->current_netdev_flags = vsi->netdev->flags;
1713 	}
1714 
1715 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1716 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1717 
1718 		filter_list_len = pf->hw.aq.asq_buf_size /
1719 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
1720 		del_list = kcalloc(filter_list_len,
1721 			    sizeof(struct i40e_aqc_remove_macvlan_element_data),
1722 			    GFP_KERNEL);
1723 		if (!del_list)
1724 			return -ENOMEM;
1725 
1726 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1727 			if (!f->changed)
1728 				continue;
1729 
1730 			if (f->counter != 0)
1731 				continue;
1732 			f->changed = false;
1733 			cmd_flags = 0;
1734 
1735 			/* add to delete list */
1736 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1737 			del_list[num_del].vlan_tag =
1738 				cpu_to_le16((u16)(f->vlan ==
1739 					    I40E_VLAN_ANY ? 0 : f->vlan));
1740 
1741 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1742 			del_list[num_del].flags = cmd_flags;
1743 			num_del++;
1744 
1745 			/* unlink from filter list */
1746 			list_del(&f->list);
1747 			kfree(f);
1748 
1749 			/* flush a full buffer */
1750 			if (num_del == filter_list_len) {
1751 				aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1752 					    vsi->seid, del_list, num_del,
1753 					    NULL);
1754 				num_del = 0;
1755 				memset(del_list, 0, sizeof(*del_list));
1756 
1757 				if (aq_ret &&
1758 				    pf->hw.aq.asq_last_status !=
1759 							      I40E_AQ_RC_ENOENT)
1760 					dev_info(&pf->pdev->dev,
1761 						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1762 						 aq_ret,
1763 						 pf->hw.aq.asq_last_status);
1764 			}
1765 		}
1766 		if (num_del) {
1767 			aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1768 						     del_list, num_del, NULL);
1769 			num_del = 0;
1770 
1771 			if (aq_ret &&
1772 			    pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1773 				dev_info(&pf->pdev->dev,
1774 					 "ignoring delete macvlan error, err %d, aq_err %d\n",
1775 					 aq_ret, pf->hw.aq.asq_last_status);
1776 		}
1777 
1778 		kfree(del_list);
1779 		del_list = NULL;
1780 
1781 		/* do all the adds now */
1782 		filter_list_len = pf->hw.aq.asq_buf_size /
1783 			       sizeof(struct i40e_aqc_add_macvlan_element_data),
1784 		add_list = kcalloc(filter_list_len,
1785 			       sizeof(struct i40e_aqc_add_macvlan_element_data),
1786 			       GFP_KERNEL);
1787 		if (!add_list)
1788 			return -ENOMEM;
1789 
1790 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1791 			if (!f->changed)
1792 				continue;
1793 
1794 			if (f->counter == 0)
1795 				continue;
1796 			f->changed = false;
1797 			add_happened = true;
1798 			cmd_flags = 0;
1799 
1800 			/* add to add array */
1801 			ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1802 			add_list[num_add].vlan_tag =
1803 				cpu_to_le16(
1804 				 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1805 			add_list[num_add].queue_number = 0;
1806 
1807 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1808 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
1809 			num_add++;
1810 
1811 			/* flush a full buffer */
1812 			if (num_add == filter_list_len) {
1813 				aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1814 							     add_list, num_add,
1815 							     NULL);
1816 				num_add = 0;
1817 
1818 				if (aq_ret)
1819 					break;
1820 				memset(add_list, 0, sizeof(*add_list));
1821 			}
1822 		}
1823 		if (num_add) {
1824 			aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1825 						     add_list, num_add, NULL);
1826 			num_add = 0;
1827 		}
1828 		kfree(add_list);
1829 		add_list = NULL;
1830 
1831 		if (add_happened && aq_ret &&
1832 		    pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
1833 			dev_info(&pf->pdev->dev,
1834 				 "add filter failed, err %d, aq_err %d\n",
1835 				 aq_ret, pf->hw.aq.asq_last_status);
1836 			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1837 			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1838 				      &vsi->state)) {
1839 				promisc_forced_on = true;
1840 				set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1841 					&vsi->state);
1842 				dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1843 			}
1844 		}
1845 	}
1846 
1847 	/* check for changes in promiscuous modes */
1848 	if (changed_flags & IFF_ALLMULTI) {
1849 		bool cur_multipromisc;
1850 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1851 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1852 							       vsi->seid,
1853 							       cur_multipromisc,
1854 							       NULL);
1855 		if (aq_ret)
1856 			dev_info(&pf->pdev->dev,
1857 				 "set multi promisc failed, err %d, aq_err %d\n",
1858 				 aq_ret, pf->hw.aq.asq_last_status);
1859 	}
1860 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1861 		bool cur_promisc;
1862 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1863 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1864 					&vsi->state));
1865 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1866 							     vsi->seid,
1867 							     cur_promisc, NULL);
1868 		if (aq_ret)
1869 			dev_info(&pf->pdev->dev,
1870 				 "set uni promisc failed, err %d, aq_err %d\n",
1871 				 aq_ret, pf->hw.aq.asq_last_status);
1872 		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1873 						   vsi->seid,
1874 						   cur_promisc, NULL);
1875 		if (aq_ret)
1876 			dev_info(&pf->pdev->dev,
1877 				 "set brdcast promisc failed, err %d, aq_err %d\n",
1878 				 aq_ret, pf->hw.aq.asq_last_status);
1879 	}
1880 
1881 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1882 	return 0;
1883 }
1884 
1885 /**
1886  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1887  * @pf: board private structure
1888  **/
1889 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1890 {
1891 	int v;
1892 
1893 	if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1894 		return;
1895 	pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1896 
1897 	for (v = 0; v < pf->num_alloc_vsi; v++) {
1898 		if (pf->vsi[v] &&
1899 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1900 			i40e_sync_vsi_filters(pf->vsi[v]);
1901 	}
1902 }
1903 
1904 /**
1905  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1906  * @netdev: network interface device structure
1907  * @new_mtu: new value for maximum frame size
1908  *
1909  * Returns 0 on success, negative on failure
1910  **/
1911 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1912 {
1913 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1914 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1915 	struct i40e_vsi *vsi = np->vsi;
1916 
1917 	/* MTU < 68 is an error and causes problems on some kernels */
1918 	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1919 		return -EINVAL;
1920 
1921 	netdev_info(netdev, "changing MTU from %d to %d\n",
1922 		    netdev->mtu, new_mtu);
1923 	netdev->mtu = new_mtu;
1924 	if (netif_running(netdev))
1925 		i40e_vsi_reinit_locked(vsi);
1926 
1927 	return 0;
1928 }
1929 
1930 /**
1931  * i40e_ioctl - Access the hwtstamp interface
1932  * @netdev: network interface device structure
1933  * @ifr: interface request data
1934  * @cmd: ioctl command
1935  **/
1936 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1937 {
1938 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1939 	struct i40e_pf *pf = np->vsi->back;
1940 
1941 	switch (cmd) {
1942 	case SIOCGHWTSTAMP:
1943 		return i40e_ptp_get_ts_config(pf, ifr);
1944 	case SIOCSHWTSTAMP:
1945 		return i40e_ptp_set_ts_config(pf, ifr);
1946 	default:
1947 		return -EOPNOTSUPP;
1948 	}
1949 }
1950 
1951 /**
1952  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1953  * @vsi: the vsi being adjusted
1954  **/
1955 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1956 {
1957 	struct i40e_vsi_context ctxt;
1958 	i40e_status ret;
1959 
1960 	if ((vsi->info.valid_sections &
1961 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1962 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1963 		return;  /* already enabled */
1964 
1965 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1966 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1967 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1968 
1969 	ctxt.seid = vsi->seid;
1970 	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1971 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1972 	if (ret) {
1973 		dev_info(&vsi->back->pdev->dev,
1974 			 "%s: update vsi failed, aq_err=%d\n",
1975 			 __func__, vsi->back->hw.aq.asq_last_status);
1976 	}
1977 }
1978 
1979 /**
1980  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1981  * @vsi: the vsi being adjusted
1982  **/
1983 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1984 {
1985 	struct i40e_vsi_context ctxt;
1986 	i40e_status ret;
1987 
1988 	if ((vsi->info.valid_sections &
1989 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1990 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1991 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
1992 		return;  /* already disabled */
1993 
1994 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1995 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1996 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1997 
1998 	ctxt.seid = vsi->seid;
1999 	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2000 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2001 	if (ret) {
2002 		dev_info(&vsi->back->pdev->dev,
2003 			 "%s: update vsi failed, aq_err=%d\n",
2004 			 __func__, vsi->back->hw.aq.asq_last_status);
2005 	}
2006 }
2007 
2008 /**
2009  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2010  * @netdev: network interface to be adjusted
2011  * @features: netdev features to test if VLAN offload is enabled or not
2012  **/
2013 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2014 {
2015 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2016 	struct i40e_vsi *vsi = np->vsi;
2017 
2018 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2019 		i40e_vlan_stripping_enable(vsi);
2020 	else
2021 		i40e_vlan_stripping_disable(vsi);
2022 }
2023 
2024 /**
2025  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2026  * @vsi: the vsi being configured
2027  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2028  **/
2029 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2030 {
2031 	struct i40e_mac_filter *f, *add_f;
2032 	bool is_netdev, is_vf;
2033 
2034 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2035 	is_netdev = !!(vsi->netdev);
2036 
2037 	if (is_netdev) {
2038 		add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2039 					is_vf, is_netdev);
2040 		if (!add_f) {
2041 			dev_info(&vsi->back->pdev->dev,
2042 				 "Could not add vlan filter %d for %pM\n",
2043 				 vid, vsi->netdev->dev_addr);
2044 			return -ENOMEM;
2045 		}
2046 	}
2047 
2048 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
2049 		add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2050 		if (!add_f) {
2051 			dev_info(&vsi->back->pdev->dev,
2052 				 "Could not add vlan filter %d for %pM\n",
2053 				 vid, f->macaddr);
2054 			return -ENOMEM;
2055 		}
2056 	}
2057 
2058 	/* Now if we add a vlan tag, make sure to check if it is the first
2059 	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2060 	 * with 0, so we now accept untagged and specified tagged traffic
2061 	 * (and not any taged and untagged)
2062 	 */
2063 	if (vid > 0) {
2064 		if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2065 						  I40E_VLAN_ANY,
2066 						  is_vf, is_netdev)) {
2067 			i40e_del_filter(vsi, vsi->netdev->dev_addr,
2068 					I40E_VLAN_ANY, is_vf, is_netdev);
2069 			add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2070 						is_vf, is_netdev);
2071 			if (!add_f) {
2072 				dev_info(&vsi->back->pdev->dev,
2073 					 "Could not add filter 0 for %pM\n",
2074 					 vsi->netdev->dev_addr);
2075 				return -ENOMEM;
2076 			}
2077 		}
2078 	}
2079 
2080 	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2081 	if (vid > 0 && !vsi->info.pvid) {
2082 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
2083 			if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2084 					     is_vf, is_netdev)) {
2085 				i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2086 						is_vf, is_netdev);
2087 				add_f = i40e_add_filter(vsi, f->macaddr,
2088 							0, is_vf, is_netdev);
2089 				if (!add_f) {
2090 					dev_info(&vsi->back->pdev->dev,
2091 						 "Could not add filter 0 for %pM\n",
2092 						 f->macaddr);
2093 					return -ENOMEM;
2094 				}
2095 			}
2096 		}
2097 	}
2098 
2099 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2100 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2101 		return 0;
2102 
2103 	return i40e_sync_vsi_filters(vsi);
2104 }
2105 
2106 /**
2107  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2108  * @vsi: the vsi being configured
2109  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2110  *
2111  * Return: 0 on success or negative otherwise
2112  **/
2113 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2114 {
2115 	struct net_device *netdev = vsi->netdev;
2116 	struct i40e_mac_filter *f, *add_f;
2117 	bool is_vf, is_netdev;
2118 	int filter_count = 0;
2119 
2120 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2121 	is_netdev = !!(netdev);
2122 
2123 	if (is_netdev)
2124 		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2125 
2126 	list_for_each_entry(f, &vsi->mac_filter_list, list)
2127 		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2128 
2129 	/* go through all the filters for this VSI and if there is only
2130 	 * vid == 0 it means there are no other filters, so vid 0 must
2131 	 * be replaced with -1. This signifies that we should from now
2132 	 * on accept any traffic (with any tag present, or untagged)
2133 	 */
2134 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
2135 		if (is_netdev) {
2136 			if (f->vlan &&
2137 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2138 				filter_count++;
2139 		}
2140 
2141 		if (f->vlan)
2142 			filter_count++;
2143 	}
2144 
2145 	if (!filter_count && is_netdev) {
2146 		i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2147 		f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2148 				    is_vf, is_netdev);
2149 		if (!f) {
2150 			dev_info(&vsi->back->pdev->dev,
2151 				 "Could not add filter %d for %pM\n",
2152 				 I40E_VLAN_ANY, netdev->dev_addr);
2153 			return -ENOMEM;
2154 		}
2155 	}
2156 
2157 	if (!filter_count) {
2158 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
2159 			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2160 			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2161 					    is_vf, is_netdev);
2162 			if (!add_f) {
2163 				dev_info(&vsi->back->pdev->dev,
2164 					 "Could not add filter %d for %pM\n",
2165 					 I40E_VLAN_ANY, f->macaddr);
2166 				return -ENOMEM;
2167 			}
2168 		}
2169 	}
2170 
2171 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2172 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2173 		return 0;
2174 
2175 	return i40e_sync_vsi_filters(vsi);
2176 }
2177 
2178 /**
2179  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2180  * @netdev: network interface to be adjusted
2181  * @vid: vlan id to be added
2182  *
2183  * net_device_ops implementation for adding vlan ids
2184  **/
2185 #ifdef I40E_FCOE
2186 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2187 			 __always_unused __be16 proto, u16 vid)
2188 #else
2189 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2190 				__always_unused __be16 proto, u16 vid)
2191 #endif
2192 {
2193 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2194 	struct i40e_vsi *vsi = np->vsi;
2195 	int ret = 0;
2196 
2197 	if (vid > 4095)
2198 		return -EINVAL;
2199 
2200 	netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2201 
2202 	/* If the network stack called us with vid = 0 then
2203 	 * it is asking to receive priority tagged packets with
2204 	 * vlan id 0.  Our HW receives them by default when configured
2205 	 * to receive untagged packets so there is no need to add an
2206 	 * extra filter for vlan 0 tagged packets.
2207 	 */
2208 	if (vid)
2209 		ret = i40e_vsi_add_vlan(vsi, vid);
2210 
2211 	if (!ret && (vid < VLAN_N_VID))
2212 		set_bit(vid, vsi->active_vlans);
2213 
2214 	return ret;
2215 }
2216 
2217 /**
2218  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2219  * @netdev: network interface to be adjusted
2220  * @vid: vlan id to be removed
2221  *
2222  * net_device_ops implementation for removing vlan ids
2223  **/
2224 #ifdef I40E_FCOE
2225 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2226 			  __always_unused __be16 proto, u16 vid)
2227 #else
2228 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2229 				 __always_unused __be16 proto, u16 vid)
2230 #endif
2231 {
2232 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2233 	struct i40e_vsi *vsi = np->vsi;
2234 
2235 	netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2236 
2237 	/* return code is ignored as there is nothing a user
2238 	 * can do about failure to remove and a log message was
2239 	 * already printed from the other function
2240 	 */
2241 	i40e_vsi_kill_vlan(vsi, vid);
2242 
2243 	clear_bit(vid, vsi->active_vlans);
2244 
2245 	return 0;
2246 }
2247 
2248 /**
2249  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2250  * @vsi: the vsi being brought back up
2251  **/
2252 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2253 {
2254 	u16 vid;
2255 
2256 	if (!vsi->netdev)
2257 		return;
2258 
2259 	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2260 
2261 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2262 		i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2263 				     vid);
2264 }
2265 
2266 /**
2267  * i40e_vsi_add_pvid - Add pvid for the VSI
2268  * @vsi: the vsi being adjusted
2269  * @vid: the vlan id to set as a PVID
2270  **/
2271 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2272 {
2273 	struct i40e_vsi_context ctxt;
2274 	i40e_status aq_ret;
2275 
2276 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2277 	vsi->info.pvid = cpu_to_le16(vid);
2278 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2279 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
2280 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
2281 
2282 	ctxt.seid = vsi->seid;
2283 	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2284 	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2285 	if (aq_ret) {
2286 		dev_info(&vsi->back->pdev->dev,
2287 			 "%s: update vsi failed, aq_err=%d\n",
2288 			 __func__, vsi->back->hw.aq.asq_last_status);
2289 		return -ENOENT;
2290 	}
2291 
2292 	return 0;
2293 }
2294 
2295 /**
2296  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2297  * @vsi: the vsi being adjusted
2298  *
2299  * Just use the vlan_rx_register() service to put it back to normal
2300  **/
2301 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2302 {
2303 	i40e_vlan_stripping_disable(vsi);
2304 
2305 	vsi->info.pvid = 0;
2306 }
2307 
2308 /**
2309  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2310  * @vsi: ptr to the VSI
2311  *
2312  * If this function returns with an error, then it's possible one or
2313  * more of the rings is populated (while the rest are not).  It is the
2314  * callers duty to clean those orphaned rings.
2315  *
2316  * Return 0 on success, negative on failure
2317  **/
2318 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2319 {
2320 	int i, err = 0;
2321 
2322 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2323 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2324 
2325 	return err;
2326 }
2327 
2328 /**
2329  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2330  * @vsi: ptr to the VSI
2331  *
2332  * Free VSI's transmit software resources
2333  **/
2334 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2335 {
2336 	int i;
2337 
2338 	if (!vsi->tx_rings)
2339 		return;
2340 
2341 	for (i = 0; i < vsi->num_queue_pairs; i++)
2342 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2343 			i40e_free_tx_resources(vsi->tx_rings[i]);
2344 }
2345 
2346 /**
2347  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2348  * @vsi: ptr to the VSI
2349  *
2350  * If this function returns with an error, then it's possible one or
2351  * more of the rings is populated (while the rest are not).  It is the
2352  * callers duty to clean those orphaned rings.
2353  *
2354  * Return 0 on success, negative on failure
2355  **/
2356 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2357 {
2358 	int i, err = 0;
2359 
2360 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2361 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2362 #ifdef I40E_FCOE
2363 	i40e_fcoe_setup_ddp_resources(vsi);
2364 #endif
2365 	return err;
2366 }
2367 
2368 /**
2369  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2370  * @vsi: ptr to the VSI
2371  *
2372  * Free all receive software resources
2373  **/
2374 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2375 {
2376 	int i;
2377 
2378 	if (!vsi->rx_rings)
2379 		return;
2380 
2381 	for (i = 0; i < vsi->num_queue_pairs; i++)
2382 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2383 			i40e_free_rx_resources(vsi->rx_rings[i]);
2384 #ifdef I40E_FCOE
2385 	i40e_fcoe_free_ddp_resources(vsi);
2386 #endif
2387 }
2388 
2389 /**
2390  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2391  * @ring: The Tx ring to configure
2392  *
2393  * This enables/disables XPS for a given Tx descriptor ring
2394  * based on the TCs enabled for the VSI that ring belongs to.
2395  **/
2396 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2397 {
2398 	struct i40e_vsi *vsi = ring->vsi;
2399 	cpumask_var_t mask;
2400 
2401 	if (ring->q_vector && ring->netdev) {
2402 		/* Single TC mode enable XPS */
2403 		if (vsi->tc_config.numtc <= 1 &&
2404 		    !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
2405 			netif_set_xps_queue(ring->netdev,
2406 					    &ring->q_vector->affinity_mask,
2407 					    ring->queue_index);
2408 		} else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2409 			/* Disable XPS to allow selection based on TC */
2410 			bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2411 			netif_set_xps_queue(ring->netdev, mask,
2412 					    ring->queue_index);
2413 			free_cpumask_var(mask);
2414 		}
2415 	}
2416 }
2417 
2418 /**
2419  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2420  * @ring: The Tx ring to configure
2421  *
2422  * Configure the Tx descriptor ring in the HMC context.
2423  **/
2424 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2425 {
2426 	struct i40e_vsi *vsi = ring->vsi;
2427 	u16 pf_q = vsi->base_queue + ring->queue_index;
2428 	struct i40e_hw *hw = &vsi->back->hw;
2429 	struct i40e_hmc_obj_txq tx_ctx;
2430 	i40e_status err = 0;
2431 	u32 qtx_ctl = 0;
2432 
2433 	/* some ATR related tx ring init */
2434 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2435 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
2436 		ring->atr_count = 0;
2437 	} else {
2438 		ring->atr_sample_rate = 0;
2439 	}
2440 
2441 	/* configure XPS */
2442 	i40e_config_xps_tx_ring(ring);
2443 
2444 	/* clear the context structure first */
2445 	memset(&tx_ctx, 0, sizeof(tx_ctx));
2446 
2447 	tx_ctx.new_context = 1;
2448 	tx_ctx.base = (ring->dma / 128);
2449 	tx_ctx.qlen = ring->count;
2450 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2451 					       I40E_FLAG_FD_ATR_ENABLED));
2452 #ifdef I40E_FCOE
2453 	tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2454 #endif
2455 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2456 	/* FDIR VSI tx ring can still use RS bit and writebacks */
2457 	if (vsi->type != I40E_VSI_FDIR)
2458 		tx_ctx.head_wb_ena = 1;
2459 	tx_ctx.head_wb_addr = ring->dma +
2460 			      (ring->count * sizeof(struct i40e_tx_desc));
2461 
2462 	/* As part of VSI creation/update, FW allocates certain
2463 	 * Tx arbitration queue sets for each TC enabled for
2464 	 * the VSI. The FW returns the handles to these queue
2465 	 * sets as part of the response buffer to Add VSI,
2466 	 * Update VSI, etc. AQ commands. It is expected that
2467 	 * these queue set handles be associated with the Tx
2468 	 * queues by the driver as part of the TX queue context
2469 	 * initialization. This has to be done regardless of
2470 	 * DCB as by default everything is mapped to TC0.
2471 	 */
2472 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2473 	tx_ctx.rdylist_act = 0;
2474 
2475 	/* clear the context in the HMC */
2476 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2477 	if (err) {
2478 		dev_info(&vsi->back->pdev->dev,
2479 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2480 			 ring->queue_index, pf_q, err);
2481 		return -ENOMEM;
2482 	}
2483 
2484 	/* set the context in the HMC */
2485 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2486 	if (err) {
2487 		dev_info(&vsi->back->pdev->dev,
2488 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2489 			 ring->queue_index, pf_q, err);
2490 		return -ENOMEM;
2491 	}
2492 
2493 	/* Now associate this queue with this PCI function */
2494 	if (vsi->type == I40E_VSI_VMDQ2) {
2495 		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2496 		qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2497 			   I40E_QTX_CTL_VFVM_INDX_MASK;
2498 	} else {
2499 		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2500 	}
2501 
2502 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2503 		    I40E_QTX_CTL_PF_INDX_MASK);
2504 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2505 	i40e_flush(hw);
2506 
2507 	clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2508 
2509 	/* cache tail off for easier writes later */
2510 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2511 
2512 	return 0;
2513 }
2514 
2515 /**
2516  * i40e_configure_rx_ring - Configure a receive ring context
2517  * @ring: The Rx ring to configure
2518  *
2519  * Configure the Rx descriptor ring in the HMC context.
2520  **/
2521 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2522 {
2523 	struct i40e_vsi *vsi = ring->vsi;
2524 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2525 	u16 pf_q = vsi->base_queue + ring->queue_index;
2526 	struct i40e_hw *hw = &vsi->back->hw;
2527 	struct i40e_hmc_obj_rxq rx_ctx;
2528 	i40e_status err = 0;
2529 
2530 	ring->state = 0;
2531 
2532 	/* clear the context structure first */
2533 	memset(&rx_ctx, 0, sizeof(rx_ctx));
2534 
2535 	ring->rx_buf_len = vsi->rx_buf_len;
2536 	ring->rx_hdr_len = vsi->rx_hdr_len;
2537 
2538 	rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2539 	rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2540 
2541 	rx_ctx.base = (ring->dma / 128);
2542 	rx_ctx.qlen = ring->count;
2543 
2544 	if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2545 		set_ring_16byte_desc_enabled(ring);
2546 		rx_ctx.dsize = 0;
2547 	} else {
2548 		rx_ctx.dsize = 1;
2549 	}
2550 
2551 	rx_ctx.dtype = vsi->dtype;
2552 	if (vsi->dtype) {
2553 		set_ring_ps_enabled(ring);
2554 		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2555 				  I40E_RX_SPLIT_IP      |
2556 				  I40E_RX_SPLIT_TCP_UDP |
2557 				  I40E_RX_SPLIT_SCTP;
2558 	} else {
2559 		rx_ctx.hsplit_0 = 0;
2560 	}
2561 
2562 	rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2563 				  (chain_len * ring->rx_buf_len));
2564 	if (hw->revision_id == 0)
2565 		rx_ctx.lrxqthresh = 0;
2566 	else
2567 		rx_ctx.lrxqthresh = 2;
2568 	rx_ctx.crcstrip = 1;
2569 	rx_ctx.l2tsel = 1;
2570 	rx_ctx.showiv = 1;
2571 #ifdef I40E_FCOE
2572 	rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2573 #endif
2574 	/* set the prefena field to 1 because the manual says to */
2575 	rx_ctx.prefena = 1;
2576 
2577 	/* clear the context in the HMC */
2578 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2579 	if (err) {
2580 		dev_info(&vsi->back->pdev->dev,
2581 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2582 			 ring->queue_index, pf_q, err);
2583 		return -ENOMEM;
2584 	}
2585 
2586 	/* set the context in the HMC */
2587 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2588 	if (err) {
2589 		dev_info(&vsi->back->pdev->dev,
2590 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2591 			 ring->queue_index, pf_q, err);
2592 		return -ENOMEM;
2593 	}
2594 
2595 	/* cache tail for quicker writes, and clear the reg before use */
2596 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2597 	writel(0, ring->tail);
2598 
2599 	i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2600 
2601 	return 0;
2602 }
2603 
2604 /**
2605  * i40e_vsi_configure_tx - Configure the VSI for Tx
2606  * @vsi: VSI structure describing this set of rings and resources
2607  *
2608  * Configure the Tx VSI for operation.
2609  **/
2610 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2611 {
2612 	int err = 0;
2613 	u16 i;
2614 
2615 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2616 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2617 
2618 	return err;
2619 }
2620 
2621 /**
2622  * i40e_vsi_configure_rx - Configure the VSI for Rx
2623  * @vsi: the VSI being configured
2624  *
2625  * Configure the Rx VSI for operation.
2626  **/
2627 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2628 {
2629 	int err = 0;
2630 	u16 i;
2631 
2632 	if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2633 		vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2634 			       + ETH_FCS_LEN + VLAN_HLEN;
2635 	else
2636 		vsi->max_frame = I40E_RXBUFFER_2048;
2637 
2638 	/* figure out correct receive buffer length */
2639 	switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2640 				    I40E_FLAG_RX_PS_ENABLED)) {
2641 	case I40E_FLAG_RX_1BUF_ENABLED:
2642 		vsi->rx_hdr_len = 0;
2643 		vsi->rx_buf_len = vsi->max_frame;
2644 		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2645 		break;
2646 	case I40E_FLAG_RX_PS_ENABLED:
2647 		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2648 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
2649 		vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2650 		break;
2651 	default:
2652 		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2653 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
2654 		vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2655 		break;
2656 	}
2657 
2658 #ifdef I40E_FCOE
2659 	/* setup rx buffer for FCoE */
2660 	if ((vsi->type == I40E_VSI_FCOE) &&
2661 	    (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2662 		vsi->rx_hdr_len = 0;
2663 		vsi->rx_buf_len = I40E_RXBUFFER_3072;
2664 		vsi->max_frame = I40E_RXBUFFER_3072;
2665 		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2666 	}
2667 
2668 #endif /* I40E_FCOE */
2669 	/* round up for the chip's needs */
2670 	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2671 				(1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2672 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2673 				(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2674 
2675 	/* set up individual rings */
2676 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2677 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2678 
2679 	return err;
2680 }
2681 
2682 /**
2683  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2684  * @vsi: ptr to the VSI
2685  **/
2686 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2687 {
2688 	struct i40e_ring *tx_ring, *rx_ring;
2689 	u16 qoffset, qcount;
2690 	int i, n;
2691 
2692 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2693 		/* Reset the TC information */
2694 		for (i = 0; i < vsi->num_queue_pairs; i++) {
2695 			rx_ring = vsi->rx_rings[i];
2696 			tx_ring = vsi->tx_rings[i];
2697 			rx_ring->dcb_tc = 0;
2698 			tx_ring->dcb_tc = 0;
2699 		}
2700 	}
2701 
2702 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2703 		if (!(vsi->tc_config.enabled_tc & (1 << n)))
2704 			continue;
2705 
2706 		qoffset = vsi->tc_config.tc_info[n].qoffset;
2707 		qcount = vsi->tc_config.tc_info[n].qcount;
2708 		for (i = qoffset; i < (qoffset + qcount); i++) {
2709 			rx_ring = vsi->rx_rings[i];
2710 			tx_ring = vsi->tx_rings[i];
2711 			rx_ring->dcb_tc = n;
2712 			tx_ring->dcb_tc = n;
2713 		}
2714 	}
2715 }
2716 
2717 /**
2718  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2719  * @vsi: ptr to the VSI
2720  **/
2721 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2722 {
2723 	if (vsi->netdev)
2724 		i40e_set_rx_mode(vsi->netdev);
2725 }
2726 
2727 /**
2728  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2729  * @vsi: Pointer to the targeted VSI
2730  *
2731  * This function replays the hlist on the hw where all the SB Flow Director
2732  * filters were saved.
2733  **/
2734 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2735 {
2736 	struct i40e_fdir_filter *filter;
2737 	struct i40e_pf *pf = vsi->back;
2738 	struct hlist_node *node;
2739 
2740 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2741 		return;
2742 
2743 	hlist_for_each_entry_safe(filter, node,
2744 				  &pf->fdir_filter_list, fdir_node) {
2745 		i40e_add_del_fdir(vsi, filter, true);
2746 	}
2747 }
2748 
2749 /**
2750  * i40e_vsi_configure - Set up the VSI for action
2751  * @vsi: the VSI being configured
2752  **/
2753 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2754 {
2755 	int err;
2756 
2757 	i40e_set_vsi_rx_mode(vsi);
2758 	i40e_restore_vlan(vsi);
2759 	i40e_vsi_config_dcb_rings(vsi);
2760 	err = i40e_vsi_configure_tx(vsi);
2761 	if (!err)
2762 		err = i40e_vsi_configure_rx(vsi);
2763 
2764 	return err;
2765 }
2766 
2767 /**
2768  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2769  * @vsi: the VSI being configured
2770  **/
2771 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2772 {
2773 	struct i40e_pf *pf = vsi->back;
2774 	struct i40e_q_vector *q_vector;
2775 	struct i40e_hw *hw = &pf->hw;
2776 	u16 vector;
2777 	int i, q;
2778 	u32 val;
2779 	u32 qp;
2780 
2781 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
2782 	 * and PFINT_LNKLSTn registers, e.g.:
2783 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2784 	 */
2785 	qp = vsi->base_queue;
2786 	vector = vsi->base_vector;
2787 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2788 		q_vector = vsi->q_vectors[i];
2789 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2790 		q_vector->rx.latency_range = I40E_LOW_LATENCY;
2791 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2792 		     q_vector->rx.itr);
2793 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2794 		q_vector->tx.latency_range = I40E_LOW_LATENCY;
2795 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2796 		     q_vector->tx.itr);
2797 
2798 		/* Linked list for the queuepairs assigned to this vector */
2799 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2800 		for (q = 0; q < q_vector->num_ringpairs; q++) {
2801 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2802 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2803 			      (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2804 			      (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2805 			      (I40E_QUEUE_TYPE_TX
2806 				      << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2807 
2808 			wr32(hw, I40E_QINT_RQCTL(qp), val);
2809 
2810 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2811 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2812 			      (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2813 			      ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2814 			      (I40E_QUEUE_TYPE_RX
2815 				      << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2816 
2817 			/* Terminate the linked list */
2818 			if (q == (q_vector->num_ringpairs - 1))
2819 				val |= (I40E_QUEUE_END_OF_LIST
2820 					   << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2821 
2822 			wr32(hw, I40E_QINT_TQCTL(qp), val);
2823 			qp++;
2824 		}
2825 	}
2826 
2827 	i40e_flush(hw);
2828 }
2829 
2830 /**
2831  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2832  * @hw: ptr to the hardware info
2833  **/
2834 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2835 {
2836 	struct i40e_hw *hw = &pf->hw;
2837 	u32 val;
2838 
2839 	/* clear things first */
2840 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2841 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2842 
2843 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2844 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2845 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
2846 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2847 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2848 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2849 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2850 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2851 
2852 	if (pf->flags & I40E_FLAG_PTP)
2853 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2854 
2855 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
2856 
2857 	/* SW_ITR_IDX = 0, but don't change INTENA */
2858 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2859 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2860 
2861 	/* OTHER_ITR_IDX = 0 */
2862 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2863 }
2864 
2865 /**
2866  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2867  * @vsi: the VSI being configured
2868  **/
2869 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2870 {
2871 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2872 	struct i40e_pf *pf = vsi->back;
2873 	struct i40e_hw *hw = &pf->hw;
2874 	u32 val;
2875 
2876 	/* set the ITR configuration */
2877 	q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2878 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
2879 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2880 	q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2881 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
2882 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2883 
2884 	i40e_enable_misc_int_causes(pf);
2885 
2886 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2887 	wr32(hw, I40E_PFINT_LNKLST0, 0);
2888 
2889 	/* Associate the queue pair to the vector and enable the queue int */
2890 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		      |
2891 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2892 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2893 
2894 	wr32(hw, I40E_QINT_RQCTL(0), val);
2895 
2896 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
2897 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2898 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2899 
2900 	wr32(hw, I40E_QINT_TQCTL(0), val);
2901 	i40e_flush(hw);
2902 }
2903 
2904 /**
2905  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2906  * @pf: board private structure
2907  **/
2908 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2909 {
2910 	struct i40e_hw *hw = &pf->hw;
2911 
2912 	wr32(hw, I40E_PFINT_DYN_CTL0,
2913 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2914 	i40e_flush(hw);
2915 }
2916 
2917 /**
2918  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2919  * @pf: board private structure
2920  **/
2921 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2922 {
2923 	struct i40e_hw *hw = &pf->hw;
2924 	u32 val;
2925 
2926 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
2927 	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2928 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2929 
2930 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
2931 	i40e_flush(hw);
2932 }
2933 
2934 /**
2935  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2936  * @vsi: pointer to a vsi
2937  * @vector: enable a particular Hw Interrupt vector
2938  **/
2939 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2940 {
2941 	struct i40e_pf *pf = vsi->back;
2942 	struct i40e_hw *hw = &pf->hw;
2943 	u32 val;
2944 
2945 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2946 	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2947 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2948 	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2949 	/* skip the flush */
2950 }
2951 
2952 /**
2953  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2954  * @vsi: pointer to a vsi
2955  * @vector: disable a particular Hw Interrupt vector
2956  **/
2957 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2958 {
2959 	struct i40e_pf *pf = vsi->back;
2960 	struct i40e_hw *hw = &pf->hw;
2961 	u32 val;
2962 
2963 	val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2964 	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2965 	i40e_flush(hw);
2966 }
2967 
2968 /**
2969  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2970  * @irq: interrupt number
2971  * @data: pointer to a q_vector
2972  **/
2973 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2974 {
2975 	struct i40e_q_vector *q_vector = data;
2976 
2977 	if (!q_vector->tx.ring && !q_vector->rx.ring)
2978 		return IRQ_HANDLED;
2979 
2980 	napi_schedule(&q_vector->napi);
2981 
2982 	return IRQ_HANDLED;
2983 }
2984 
2985 /**
2986  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2987  * @vsi: the VSI being configured
2988  * @basename: name for the vector
2989  *
2990  * Allocates MSI-X vectors and requests interrupts from the kernel.
2991  **/
2992 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2993 {
2994 	int q_vectors = vsi->num_q_vectors;
2995 	struct i40e_pf *pf = vsi->back;
2996 	int base = vsi->base_vector;
2997 	int rx_int_idx = 0;
2998 	int tx_int_idx = 0;
2999 	int vector, err;
3000 
3001 	for (vector = 0; vector < q_vectors; vector++) {
3002 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3003 
3004 		if (q_vector->tx.ring && q_vector->rx.ring) {
3005 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3006 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3007 			tx_int_idx++;
3008 		} else if (q_vector->rx.ring) {
3009 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3010 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3011 		} else if (q_vector->tx.ring) {
3012 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3013 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3014 		} else {
3015 			/* skip this unused q_vector */
3016 			continue;
3017 		}
3018 		err = request_irq(pf->msix_entries[base + vector].vector,
3019 				  vsi->irq_handler,
3020 				  0,
3021 				  q_vector->name,
3022 				  q_vector);
3023 		if (err) {
3024 			dev_info(&pf->pdev->dev,
3025 				 "%s: request_irq failed, error: %d\n",
3026 				 __func__, err);
3027 			goto free_queue_irqs;
3028 		}
3029 		/* assign the mask for this irq */
3030 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3031 				      &q_vector->affinity_mask);
3032 	}
3033 
3034 	vsi->irqs_ready = true;
3035 	return 0;
3036 
3037 free_queue_irqs:
3038 	while (vector) {
3039 		vector--;
3040 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3041 				      NULL);
3042 		free_irq(pf->msix_entries[base + vector].vector,
3043 			 &(vsi->q_vectors[vector]));
3044 	}
3045 	return err;
3046 }
3047 
3048 /**
3049  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3050  * @vsi: the VSI being un-configured
3051  **/
3052 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3053 {
3054 	struct i40e_pf *pf = vsi->back;
3055 	struct i40e_hw *hw = &pf->hw;
3056 	int base = vsi->base_vector;
3057 	int i;
3058 
3059 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3060 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3061 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3062 	}
3063 
3064 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3065 		for (i = vsi->base_vector;
3066 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3067 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3068 
3069 		i40e_flush(hw);
3070 		for (i = 0; i < vsi->num_q_vectors; i++)
3071 			synchronize_irq(pf->msix_entries[i + base].vector);
3072 	} else {
3073 		/* Legacy and MSI mode - this stops all interrupt handling */
3074 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3075 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3076 		i40e_flush(hw);
3077 		synchronize_irq(pf->pdev->irq);
3078 	}
3079 }
3080 
3081 /**
3082  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3083  * @vsi: the VSI being configured
3084  **/
3085 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3086 {
3087 	struct i40e_pf *pf = vsi->back;
3088 	int i;
3089 
3090 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3091 		for (i = vsi->base_vector;
3092 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3093 			i40e_irq_dynamic_enable(vsi, i);
3094 	} else {
3095 		i40e_irq_dynamic_enable_icr0(pf);
3096 	}
3097 
3098 	i40e_flush(&pf->hw);
3099 	return 0;
3100 }
3101 
3102 /**
3103  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3104  * @pf: board private structure
3105  **/
3106 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3107 {
3108 	/* Disable ICR 0 */
3109 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3110 	i40e_flush(&pf->hw);
3111 }
3112 
3113 /**
3114  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3115  * @irq: interrupt number
3116  * @data: pointer to a q_vector
3117  *
3118  * This is the handler used for all MSI/Legacy interrupts, and deals
3119  * with both queue and non-queue interrupts.  This is also used in
3120  * MSIX mode to handle the non-queue interrupts.
3121  **/
3122 static irqreturn_t i40e_intr(int irq, void *data)
3123 {
3124 	struct i40e_pf *pf = (struct i40e_pf *)data;
3125 	struct i40e_hw *hw = &pf->hw;
3126 	irqreturn_t ret = IRQ_NONE;
3127 	u32 icr0, icr0_remaining;
3128 	u32 val, ena_mask;
3129 
3130 	icr0 = rd32(hw, I40E_PFINT_ICR0);
3131 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3132 
3133 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
3134 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3135 		goto enable_intr;
3136 
3137 	/* if interrupt but no bits showing, must be SWINT */
3138 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3139 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3140 		pf->sw_int_count++;
3141 
3142 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3143 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3144 
3145 		/* temporarily disable queue cause for NAPI processing */
3146 		u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3147 		qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3148 		wr32(hw, I40E_QINT_RQCTL(0), qval);
3149 
3150 		qval = rd32(hw, I40E_QINT_TQCTL(0));
3151 		qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3152 		wr32(hw, I40E_QINT_TQCTL(0), qval);
3153 
3154 		if (!test_bit(__I40E_DOWN, &pf->state))
3155 			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3156 	}
3157 
3158 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3159 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3160 		set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3161 	}
3162 
3163 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3164 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3165 		set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3166 	}
3167 
3168 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3169 		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3170 		set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3171 	}
3172 
3173 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3174 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3175 			set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3176 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3177 		val = rd32(hw, I40E_GLGEN_RSTAT);
3178 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3179 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3180 		if (val == I40E_RESET_CORER) {
3181 			pf->corer_count++;
3182 		} else if (val == I40E_RESET_GLOBR) {
3183 			pf->globr_count++;
3184 		} else if (val == I40E_RESET_EMPR) {
3185 			pf->empr_count++;
3186 			set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
3187 		}
3188 	}
3189 
3190 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3191 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3192 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3193 	}
3194 
3195 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3196 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3197 
3198 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3199 			icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3200 			i40e_ptp_tx_hwtstamp(pf);
3201 		}
3202 	}
3203 
3204 	/* If a critical error is pending we have no choice but to reset the
3205 	 * device.
3206 	 * Report and mask out any remaining unexpected interrupts.
3207 	 */
3208 	icr0_remaining = icr0 & ena_mask;
3209 	if (icr0_remaining) {
3210 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3211 			 icr0_remaining);
3212 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3213 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3214 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3215 			dev_info(&pf->pdev->dev, "device will be reset\n");
3216 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3217 			i40e_service_event_schedule(pf);
3218 		}
3219 		ena_mask &= ~icr0_remaining;
3220 	}
3221 	ret = IRQ_HANDLED;
3222 
3223 enable_intr:
3224 	/* re-enable interrupt causes */
3225 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3226 	if (!test_bit(__I40E_DOWN, &pf->state)) {
3227 		i40e_service_event_schedule(pf);
3228 		i40e_irq_dynamic_enable_icr0(pf);
3229 	}
3230 
3231 	return ret;
3232 }
3233 
3234 /**
3235  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3236  * @tx_ring:  tx ring to clean
3237  * @budget:   how many cleans we're allowed
3238  *
3239  * Returns true if there's any budget left (e.g. the clean is finished)
3240  **/
3241 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3242 {
3243 	struct i40e_vsi *vsi = tx_ring->vsi;
3244 	u16 i = tx_ring->next_to_clean;
3245 	struct i40e_tx_buffer *tx_buf;
3246 	struct i40e_tx_desc *tx_desc;
3247 
3248 	tx_buf = &tx_ring->tx_bi[i];
3249 	tx_desc = I40E_TX_DESC(tx_ring, i);
3250 	i -= tx_ring->count;
3251 
3252 	do {
3253 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3254 
3255 		/* if next_to_watch is not set then there is no work pending */
3256 		if (!eop_desc)
3257 			break;
3258 
3259 		/* prevent any other reads prior to eop_desc */
3260 		read_barrier_depends();
3261 
3262 		/* if the descriptor isn't done, no work yet to do */
3263 		if (!(eop_desc->cmd_type_offset_bsz &
3264 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3265 			break;
3266 
3267 		/* clear next_to_watch to prevent false hangs */
3268 		tx_buf->next_to_watch = NULL;
3269 
3270 		tx_desc->buffer_addr = 0;
3271 		tx_desc->cmd_type_offset_bsz = 0;
3272 		/* move past filter desc */
3273 		tx_buf++;
3274 		tx_desc++;
3275 		i++;
3276 		if (unlikely(!i)) {
3277 			i -= tx_ring->count;
3278 			tx_buf = tx_ring->tx_bi;
3279 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3280 		}
3281 		/* unmap skb header data */
3282 		dma_unmap_single(tx_ring->dev,
3283 				 dma_unmap_addr(tx_buf, dma),
3284 				 dma_unmap_len(tx_buf, len),
3285 				 DMA_TO_DEVICE);
3286 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3287 			kfree(tx_buf->raw_buf);
3288 
3289 		tx_buf->raw_buf = NULL;
3290 		tx_buf->tx_flags = 0;
3291 		tx_buf->next_to_watch = NULL;
3292 		dma_unmap_len_set(tx_buf, len, 0);
3293 		tx_desc->buffer_addr = 0;
3294 		tx_desc->cmd_type_offset_bsz = 0;
3295 
3296 		/* move us past the eop_desc for start of next FD desc */
3297 		tx_buf++;
3298 		tx_desc++;
3299 		i++;
3300 		if (unlikely(!i)) {
3301 			i -= tx_ring->count;
3302 			tx_buf = tx_ring->tx_bi;
3303 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3304 		}
3305 
3306 		/* update budget accounting */
3307 		budget--;
3308 	} while (likely(budget));
3309 
3310 	i += tx_ring->count;
3311 	tx_ring->next_to_clean = i;
3312 
3313 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3314 		i40e_irq_dynamic_enable(vsi,
3315 				tx_ring->q_vector->v_idx + vsi->base_vector);
3316 	}
3317 	return budget > 0;
3318 }
3319 
3320 /**
3321  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3322  * @irq: interrupt number
3323  * @data: pointer to a q_vector
3324  **/
3325 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3326 {
3327 	struct i40e_q_vector *q_vector = data;
3328 	struct i40e_vsi *vsi;
3329 
3330 	if (!q_vector->tx.ring)
3331 		return IRQ_HANDLED;
3332 
3333 	vsi = q_vector->tx.ring->vsi;
3334 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3335 
3336 	return IRQ_HANDLED;
3337 }
3338 
3339 /**
3340  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3341  * @vsi: the VSI being configured
3342  * @v_idx: vector index
3343  * @qp_idx: queue pair index
3344  **/
3345 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3346 {
3347 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3348 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3349 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3350 
3351 	tx_ring->q_vector = q_vector;
3352 	tx_ring->next = q_vector->tx.ring;
3353 	q_vector->tx.ring = tx_ring;
3354 	q_vector->tx.count++;
3355 
3356 	rx_ring->q_vector = q_vector;
3357 	rx_ring->next = q_vector->rx.ring;
3358 	q_vector->rx.ring = rx_ring;
3359 	q_vector->rx.count++;
3360 }
3361 
3362 /**
3363  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3364  * @vsi: the VSI being configured
3365  *
3366  * This function maps descriptor rings to the queue-specific vectors
3367  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3368  * one vector per queue pair, but on a constrained vector budget, we
3369  * group the queue pairs as "efficiently" as possible.
3370  **/
3371 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3372 {
3373 	int qp_remaining = vsi->num_queue_pairs;
3374 	int q_vectors = vsi->num_q_vectors;
3375 	int num_ringpairs;
3376 	int v_start = 0;
3377 	int qp_idx = 0;
3378 
3379 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3380 	 * group them so there are multiple queues per vector.
3381 	 * It is also important to go through all the vectors available to be
3382 	 * sure that if we don't use all the vectors, that the remaining vectors
3383 	 * are cleared. This is especially important when decreasing the
3384 	 * number of queues in use.
3385 	 */
3386 	for (; v_start < q_vectors; v_start++) {
3387 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3388 
3389 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3390 
3391 		q_vector->num_ringpairs = num_ringpairs;
3392 
3393 		q_vector->rx.count = 0;
3394 		q_vector->tx.count = 0;
3395 		q_vector->rx.ring = NULL;
3396 		q_vector->tx.ring = NULL;
3397 
3398 		while (num_ringpairs--) {
3399 			map_vector_to_qp(vsi, v_start, qp_idx);
3400 			qp_idx++;
3401 			qp_remaining--;
3402 		}
3403 	}
3404 }
3405 
3406 /**
3407  * i40e_vsi_request_irq - Request IRQ from the OS
3408  * @vsi: the VSI being configured
3409  * @basename: name for the vector
3410  **/
3411 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3412 {
3413 	struct i40e_pf *pf = vsi->back;
3414 	int err;
3415 
3416 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3417 		err = i40e_vsi_request_irq_msix(vsi, basename);
3418 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3419 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
3420 				  pf->int_name, pf);
3421 	else
3422 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3423 				  pf->int_name, pf);
3424 
3425 	if (err)
3426 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3427 
3428 	return err;
3429 }
3430 
3431 #ifdef CONFIG_NET_POLL_CONTROLLER
3432 /**
3433  * i40e_netpoll - A Polling 'interrupt'handler
3434  * @netdev: network interface device structure
3435  *
3436  * This is used by netconsole to send skbs without having to re-enable
3437  * interrupts.  It's not called while the normal interrupt routine is executing.
3438  **/
3439 #ifdef I40E_FCOE
3440 void i40e_netpoll(struct net_device *netdev)
3441 #else
3442 static void i40e_netpoll(struct net_device *netdev)
3443 #endif
3444 {
3445 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3446 	struct i40e_vsi *vsi = np->vsi;
3447 	struct i40e_pf *pf = vsi->back;
3448 	int i;
3449 
3450 	/* if interface is down do nothing */
3451 	if (test_bit(__I40E_DOWN, &vsi->state))
3452 		return;
3453 
3454 	pf->flags |= I40E_FLAG_IN_NETPOLL;
3455 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3456 		for (i = 0; i < vsi->num_q_vectors; i++)
3457 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3458 	} else {
3459 		i40e_intr(pf->pdev->irq, netdev);
3460 	}
3461 	pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3462 }
3463 #endif
3464 
3465 /**
3466  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3467  * @pf: the PF being configured
3468  * @pf_q: the PF queue
3469  * @enable: enable or disable state of the queue
3470  *
3471  * This routine will wait for the given Tx queue of the PF to reach the
3472  * enabled or disabled state.
3473  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3474  * multiple retries; else will return 0 in case of success.
3475  **/
3476 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3477 {
3478 	int i;
3479 	u32 tx_reg;
3480 
3481 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3482 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3483 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3484 			break;
3485 
3486 		usleep_range(10, 20);
3487 	}
3488 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3489 		return -ETIMEDOUT;
3490 
3491 	return 0;
3492 }
3493 
3494 /**
3495  * i40e_vsi_control_tx - Start or stop a VSI's rings
3496  * @vsi: the VSI being configured
3497  * @enable: start or stop the rings
3498  **/
3499 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3500 {
3501 	struct i40e_pf *pf = vsi->back;
3502 	struct i40e_hw *hw = &pf->hw;
3503 	int i, j, pf_q, ret = 0;
3504 	u32 tx_reg;
3505 
3506 	pf_q = vsi->base_queue;
3507 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3508 
3509 		/* warn the TX unit of coming changes */
3510 		i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3511 		if (!enable)
3512 			usleep_range(10, 20);
3513 
3514 		for (j = 0; j < 50; j++) {
3515 			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3516 			if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3517 			    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3518 				break;
3519 			usleep_range(1000, 2000);
3520 		}
3521 		/* Skip if the queue is already in the requested state */
3522 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3523 			continue;
3524 
3525 		/* turn on/off the queue */
3526 		if (enable) {
3527 			wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3528 			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3529 		} else {
3530 			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3531 		}
3532 
3533 		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3534 		/* No waiting for the Tx queue to disable */
3535 		if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3536 			continue;
3537 
3538 		/* wait for the change to finish */
3539 		ret = i40e_pf_txq_wait(pf, pf_q, enable);
3540 		if (ret) {
3541 			dev_info(&pf->pdev->dev,
3542 				 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3543 				 __func__, vsi->seid, pf_q,
3544 				 (enable ? "en" : "dis"));
3545 			break;
3546 		}
3547 	}
3548 
3549 	if (hw->revision_id == 0)
3550 		mdelay(50);
3551 	return ret;
3552 }
3553 
3554 /**
3555  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3556  * @pf: the PF being configured
3557  * @pf_q: the PF queue
3558  * @enable: enable or disable state of the queue
3559  *
3560  * This routine will wait for the given Rx queue of the PF to reach the
3561  * enabled or disabled state.
3562  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3563  * multiple retries; else will return 0 in case of success.
3564  **/
3565 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3566 {
3567 	int i;
3568 	u32 rx_reg;
3569 
3570 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3571 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3572 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3573 			break;
3574 
3575 		usleep_range(10, 20);
3576 	}
3577 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3578 		return -ETIMEDOUT;
3579 
3580 	return 0;
3581 }
3582 
3583 /**
3584  * i40e_vsi_control_rx - Start or stop a VSI's rings
3585  * @vsi: the VSI being configured
3586  * @enable: start or stop the rings
3587  **/
3588 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3589 {
3590 	struct i40e_pf *pf = vsi->back;
3591 	struct i40e_hw *hw = &pf->hw;
3592 	int i, j, pf_q, ret = 0;
3593 	u32 rx_reg;
3594 
3595 	pf_q = vsi->base_queue;
3596 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3597 		for (j = 0; j < 50; j++) {
3598 			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3599 			if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3600 			    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3601 				break;
3602 			usleep_range(1000, 2000);
3603 		}
3604 
3605 		/* Skip if the queue is already in the requested state */
3606 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3607 			continue;
3608 
3609 		/* turn on/off the queue */
3610 		if (enable)
3611 			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3612 		else
3613 			rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3614 		wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3615 
3616 		/* wait for the change to finish */
3617 		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3618 		if (ret) {
3619 			dev_info(&pf->pdev->dev,
3620 				 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3621 				 __func__, vsi->seid, pf_q,
3622 				 (enable ? "en" : "dis"));
3623 			break;
3624 		}
3625 	}
3626 
3627 	return ret;
3628 }
3629 
3630 /**
3631  * i40e_vsi_control_rings - Start or stop a VSI's rings
3632  * @vsi: the VSI being configured
3633  * @enable: start or stop the rings
3634  **/
3635 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3636 {
3637 	int ret = 0;
3638 
3639 	/* do rx first for enable and last for disable */
3640 	if (request) {
3641 		ret = i40e_vsi_control_rx(vsi, request);
3642 		if (ret)
3643 			return ret;
3644 		ret = i40e_vsi_control_tx(vsi, request);
3645 	} else {
3646 		/* Ignore return value, we need to shutdown whatever we can */
3647 		i40e_vsi_control_tx(vsi, request);
3648 		i40e_vsi_control_rx(vsi, request);
3649 	}
3650 
3651 	return ret;
3652 }
3653 
3654 /**
3655  * i40e_vsi_free_irq - Free the irq association with the OS
3656  * @vsi: the VSI being configured
3657  **/
3658 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3659 {
3660 	struct i40e_pf *pf = vsi->back;
3661 	struct i40e_hw *hw = &pf->hw;
3662 	int base = vsi->base_vector;
3663 	u32 val, qp;
3664 	int i;
3665 
3666 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3667 		if (!vsi->q_vectors)
3668 			return;
3669 
3670 		if (!vsi->irqs_ready)
3671 			return;
3672 
3673 		vsi->irqs_ready = false;
3674 		for (i = 0; i < vsi->num_q_vectors; i++) {
3675 			u16 vector = i + base;
3676 
3677 			/* free only the irqs that were actually requested */
3678 			if (!vsi->q_vectors[i] ||
3679 			    !vsi->q_vectors[i]->num_ringpairs)
3680 				continue;
3681 
3682 			/* clear the affinity_mask in the IRQ descriptor */
3683 			irq_set_affinity_hint(pf->msix_entries[vector].vector,
3684 					      NULL);
3685 			free_irq(pf->msix_entries[vector].vector,
3686 				 vsi->q_vectors[i]);
3687 
3688 			/* Tear down the interrupt queue link list
3689 			 *
3690 			 * We know that they come in pairs and always
3691 			 * the Rx first, then the Tx.  To clear the
3692 			 * link list, stick the EOL value into the
3693 			 * next_q field of the registers.
3694 			 */
3695 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3696 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3697 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3698 			val |= I40E_QUEUE_END_OF_LIST
3699 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3700 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3701 
3702 			while (qp != I40E_QUEUE_END_OF_LIST) {
3703 				u32 next;
3704 
3705 				val = rd32(hw, I40E_QINT_RQCTL(qp));
3706 
3707 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3708 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3709 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3710 					 I40E_QINT_RQCTL_INTEVENT_MASK);
3711 
3712 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3713 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3714 
3715 				wr32(hw, I40E_QINT_RQCTL(qp), val);
3716 
3717 				val = rd32(hw, I40E_QINT_TQCTL(qp));
3718 
3719 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3720 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3721 
3722 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3723 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3724 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3725 					 I40E_QINT_TQCTL_INTEVENT_MASK);
3726 
3727 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3728 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3729 
3730 				wr32(hw, I40E_QINT_TQCTL(qp), val);
3731 				qp = next;
3732 			}
3733 		}
3734 	} else {
3735 		free_irq(pf->pdev->irq, pf);
3736 
3737 		val = rd32(hw, I40E_PFINT_LNKLST0);
3738 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3739 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3740 		val |= I40E_QUEUE_END_OF_LIST
3741 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3742 		wr32(hw, I40E_PFINT_LNKLST0, val);
3743 
3744 		val = rd32(hw, I40E_QINT_RQCTL(qp));
3745 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3746 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3747 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3748 			 I40E_QINT_RQCTL_INTEVENT_MASK);
3749 
3750 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3751 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3752 
3753 		wr32(hw, I40E_QINT_RQCTL(qp), val);
3754 
3755 		val = rd32(hw, I40E_QINT_TQCTL(qp));
3756 
3757 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3758 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3759 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3760 			 I40E_QINT_TQCTL_INTEVENT_MASK);
3761 
3762 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3763 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3764 
3765 		wr32(hw, I40E_QINT_TQCTL(qp), val);
3766 	}
3767 }
3768 
3769 /**
3770  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3771  * @vsi: the VSI being configured
3772  * @v_idx: Index of vector to be freed
3773  *
3774  * This function frees the memory allocated to the q_vector.  In addition if
3775  * NAPI is enabled it will delete any references to the NAPI struct prior
3776  * to freeing the q_vector.
3777  **/
3778 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3779 {
3780 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3781 	struct i40e_ring *ring;
3782 
3783 	if (!q_vector)
3784 		return;
3785 
3786 	/* disassociate q_vector from rings */
3787 	i40e_for_each_ring(ring, q_vector->tx)
3788 		ring->q_vector = NULL;
3789 
3790 	i40e_for_each_ring(ring, q_vector->rx)
3791 		ring->q_vector = NULL;
3792 
3793 	/* only VSI w/ an associated netdev is set up w/ NAPI */
3794 	if (vsi->netdev)
3795 		netif_napi_del(&q_vector->napi);
3796 
3797 	vsi->q_vectors[v_idx] = NULL;
3798 
3799 	kfree_rcu(q_vector, rcu);
3800 }
3801 
3802 /**
3803  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3804  * @vsi: the VSI being un-configured
3805  *
3806  * This frees the memory allocated to the q_vectors and
3807  * deletes references to the NAPI struct.
3808  **/
3809 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3810 {
3811 	int v_idx;
3812 
3813 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3814 		i40e_free_q_vector(vsi, v_idx);
3815 }
3816 
3817 /**
3818  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3819  * @pf: board private structure
3820  **/
3821 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3822 {
3823 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3824 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3825 		pci_disable_msix(pf->pdev);
3826 		kfree(pf->msix_entries);
3827 		pf->msix_entries = NULL;
3828 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3829 		pci_disable_msi(pf->pdev);
3830 	}
3831 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3832 }
3833 
3834 /**
3835  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3836  * @pf: board private structure
3837  *
3838  * We go through and clear interrupt specific resources and reset the structure
3839  * to pre-load conditions
3840  **/
3841 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3842 {
3843 	int i;
3844 
3845 	i40e_stop_misc_vector(pf);
3846 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3847 		synchronize_irq(pf->msix_entries[0].vector);
3848 		free_irq(pf->msix_entries[0].vector, pf);
3849 	}
3850 
3851 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3852 	for (i = 0; i < pf->num_alloc_vsi; i++)
3853 		if (pf->vsi[i])
3854 			i40e_vsi_free_q_vectors(pf->vsi[i]);
3855 	i40e_reset_interrupt_capability(pf);
3856 }
3857 
3858 /**
3859  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3860  * @vsi: the VSI being configured
3861  **/
3862 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3863 {
3864 	int q_idx;
3865 
3866 	if (!vsi->netdev)
3867 		return;
3868 
3869 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3870 		napi_enable(&vsi->q_vectors[q_idx]->napi);
3871 }
3872 
3873 /**
3874  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3875  * @vsi: the VSI being configured
3876  **/
3877 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3878 {
3879 	int q_idx;
3880 
3881 	if (!vsi->netdev)
3882 		return;
3883 
3884 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3885 		napi_disable(&vsi->q_vectors[q_idx]->napi);
3886 }
3887 
3888 /**
3889  * i40e_vsi_close - Shut down a VSI
3890  * @vsi: the vsi to be quelled
3891  **/
3892 static void i40e_vsi_close(struct i40e_vsi *vsi)
3893 {
3894 	if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3895 		i40e_down(vsi);
3896 	i40e_vsi_free_irq(vsi);
3897 	i40e_vsi_free_tx_resources(vsi);
3898 	i40e_vsi_free_rx_resources(vsi);
3899 }
3900 
3901 /**
3902  * i40e_quiesce_vsi - Pause a given VSI
3903  * @vsi: the VSI being paused
3904  **/
3905 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3906 {
3907 	if (test_bit(__I40E_DOWN, &vsi->state))
3908 		return;
3909 
3910 	/* No need to disable FCoE VSI when Tx suspended */
3911 	if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3912 	    vsi->type == I40E_VSI_FCOE) {
3913 		dev_dbg(&vsi->back->pdev->dev,
3914 			"%s: VSI seid %d skipping FCoE VSI disable\n",
3915 			 __func__, vsi->seid);
3916 		return;
3917 	}
3918 
3919 	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3920 	if (vsi->netdev && netif_running(vsi->netdev)) {
3921 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3922 	} else {
3923 		i40e_vsi_close(vsi);
3924 	}
3925 }
3926 
3927 /**
3928  * i40e_unquiesce_vsi - Resume a given VSI
3929  * @vsi: the VSI being resumed
3930  **/
3931 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3932 {
3933 	if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3934 		return;
3935 
3936 	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3937 	if (vsi->netdev && netif_running(vsi->netdev))
3938 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3939 	else
3940 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
3941 }
3942 
3943 /**
3944  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3945  * @pf: the PF
3946  **/
3947 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3948 {
3949 	int v;
3950 
3951 	for (v = 0; v < pf->num_alloc_vsi; v++) {
3952 		if (pf->vsi[v])
3953 			i40e_quiesce_vsi(pf->vsi[v]);
3954 	}
3955 }
3956 
3957 /**
3958  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3959  * @pf: the PF
3960  **/
3961 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3962 {
3963 	int v;
3964 
3965 	for (v = 0; v < pf->num_alloc_vsi; v++) {
3966 		if (pf->vsi[v])
3967 			i40e_unquiesce_vsi(pf->vsi[v]);
3968 	}
3969 }
3970 
3971 #ifdef CONFIG_I40E_DCB
3972 /**
3973  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
3974  * @vsi: the VSI being configured
3975  *
3976  * This function waits for the given VSI's Tx queues to be disabled.
3977  **/
3978 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
3979 {
3980 	struct i40e_pf *pf = vsi->back;
3981 	int i, pf_q, ret;
3982 
3983 	pf_q = vsi->base_queue;
3984 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3985 		/* Check and wait for the disable status of the queue */
3986 		ret = i40e_pf_txq_wait(pf, pf_q, false);
3987 		if (ret) {
3988 			dev_info(&pf->pdev->dev,
3989 				 "%s: VSI seid %d Tx ring %d disable timeout\n",
3990 				 __func__, vsi->seid, pf_q);
3991 			return ret;
3992 		}
3993 	}
3994 
3995 	return 0;
3996 }
3997 
3998 /**
3999  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4000  * @pf: the PF
4001  *
4002  * This function waits for the Tx queues to be in disabled state for all the
4003  * VSIs that are managed by this PF.
4004  **/
4005 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4006 {
4007 	int v, ret = 0;
4008 
4009 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4010 		/* No need to wait for FCoE VSI queues */
4011 		if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4012 			ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4013 			if (ret)
4014 				break;
4015 		}
4016 	}
4017 
4018 	return ret;
4019 }
4020 
4021 #endif
4022 /**
4023  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4024  * @pf: pointer to pf
4025  *
4026  * Get TC map for ISCSI PF type that will include iSCSI TC
4027  * and LAN TC.
4028  **/
4029 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4030 {
4031 	struct i40e_dcb_app_priority_table app;
4032 	struct i40e_hw *hw = &pf->hw;
4033 	u8 enabled_tc = 1; /* TC0 is always enabled */
4034 	u8 tc, i;
4035 	/* Get the iSCSI APP TLV */
4036 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4037 
4038 	for (i = 0; i < dcbcfg->numapps; i++) {
4039 		app = dcbcfg->app[i];
4040 		if (app.selector == I40E_APP_SEL_TCPIP &&
4041 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
4042 			tc = dcbcfg->etscfg.prioritytable[app.priority];
4043 			enabled_tc |= (1 << tc);
4044 			break;
4045 		}
4046 	}
4047 
4048 	return enabled_tc;
4049 }
4050 
4051 /**
4052  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4053  * @dcbcfg: the corresponding DCBx configuration structure
4054  *
4055  * Return the number of TCs from given DCBx configuration
4056  **/
4057 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4058 {
4059 	u8 num_tc = 0;
4060 	int i;
4061 
4062 	/* Scan the ETS Config Priority Table to find
4063 	 * traffic class enabled for a given priority
4064 	 * and use the traffic class index to get the
4065 	 * number of traffic classes enabled
4066 	 */
4067 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4068 		if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4069 			num_tc = dcbcfg->etscfg.prioritytable[i];
4070 	}
4071 
4072 	/* Traffic class index starts from zero so
4073 	 * increment to return the actual count
4074 	 */
4075 	return num_tc + 1;
4076 }
4077 
4078 /**
4079  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4080  * @dcbcfg: the corresponding DCBx configuration structure
4081  *
4082  * Query the current DCB configuration and return the number of
4083  * traffic classes enabled from the given DCBX config
4084  **/
4085 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4086 {
4087 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4088 	u8 enabled_tc = 1;
4089 	u8 i;
4090 
4091 	for (i = 0; i < num_tc; i++)
4092 		enabled_tc |= 1 << i;
4093 
4094 	return enabled_tc;
4095 }
4096 
4097 /**
4098  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4099  * @pf: PF being queried
4100  *
4101  * Return number of traffic classes enabled for the given PF
4102  **/
4103 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4104 {
4105 	struct i40e_hw *hw = &pf->hw;
4106 	u8 i, enabled_tc;
4107 	u8 num_tc = 0;
4108 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4109 
4110 	/* If DCB is not enabled then always in single TC */
4111 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4112 		return 1;
4113 
4114 	/* SFP mode will be enabled for all TCs on port */
4115 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4116 		return i40e_dcb_get_num_tc(dcbcfg);
4117 
4118 	/* MFP mode return count of enabled TCs for this PF */
4119 	if (pf->hw.func_caps.iscsi)
4120 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
4121 	else
4122 		enabled_tc = pf->hw.func_caps.enabled_tcmap;
4123 
4124 	/* At least have TC0 */
4125 	enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4126 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4127 		if (enabled_tc & (1 << i))
4128 			num_tc++;
4129 	}
4130 	return num_tc;
4131 }
4132 
4133 /**
4134  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4135  * @pf: PF being queried
4136  *
4137  * Return a bitmap for first enabled traffic class for this PF.
4138  **/
4139 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4140 {
4141 	u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4142 	u8 i = 0;
4143 
4144 	if (!enabled_tc)
4145 		return 0x1; /* TC0 */
4146 
4147 	/* Find the first enabled TC */
4148 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4149 		if (enabled_tc & (1 << i))
4150 			break;
4151 	}
4152 
4153 	return 1 << i;
4154 }
4155 
4156 /**
4157  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4158  * @pf: PF being queried
4159  *
4160  * Return a bitmap for enabled traffic classes for this PF.
4161  **/
4162 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4163 {
4164 	/* If DCB is not enabled for this PF then just return default TC */
4165 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4166 		return i40e_pf_get_default_tc(pf);
4167 
4168 	/* SFP mode we want PF to be enabled for all TCs */
4169 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4170 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4171 
4172 	/* MPF enabled and iSCSI PF type */
4173 	if (pf->hw.func_caps.iscsi)
4174 		return i40e_get_iscsi_tc_map(pf);
4175 	else
4176 		return pf->hw.func_caps.enabled_tcmap;
4177 }
4178 
4179 /**
4180  * i40e_vsi_get_bw_info - Query VSI BW Information
4181  * @vsi: the VSI being queried
4182  *
4183  * Returns 0 on success, negative value on failure
4184  **/
4185 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4186 {
4187 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4188 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4189 	struct i40e_pf *pf = vsi->back;
4190 	struct i40e_hw *hw = &pf->hw;
4191 	i40e_status aq_ret;
4192 	u32 tc_bw_max;
4193 	int i;
4194 
4195 	/* Get the VSI level BW configuration */
4196 	aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4197 	if (aq_ret) {
4198 		dev_info(&pf->pdev->dev,
4199 			 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
4200 			 aq_ret, pf->hw.aq.asq_last_status);
4201 		return -EINVAL;
4202 	}
4203 
4204 	/* Get the VSI level BW configuration per TC */
4205 	aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4206 						  NULL);
4207 	if (aq_ret) {
4208 		dev_info(&pf->pdev->dev,
4209 			 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
4210 			 aq_ret, pf->hw.aq.asq_last_status);
4211 		return -EINVAL;
4212 	}
4213 
4214 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4215 		dev_info(&pf->pdev->dev,
4216 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4217 			 bw_config.tc_valid_bits,
4218 			 bw_ets_config.tc_valid_bits);
4219 		/* Still continuing */
4220 	}
4221 
4222 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4223 	vsi->bw_max_quanta = bw_config.max_bw;
4224 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4225 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4226 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4227 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4228 		vsi->bw_ets_limit_credits[i] =
4229 					le16_to_cpu(bw_ets_config.credits[i]);
4230 		/* 3 bits out of 4 for each TC */
4231 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4232 	}
4233 
4234 	return 0;
4235 }
4236 
4237 /**
4238  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4239  * @vsi: the VSI being configured
4240  * @enabled_tc: TC bitmap
4241  * @bw_credits: BW shared credits per TC
4242  *
4243  * Returns 0 on success, negative value on failure
4244  **/
4245 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4246 				       u8 *bw_share)
4247 {
4248 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4249 	i40e_status aq_ret;
4250 	int i;
4251 
4252 	bw_data.tc_valid_bits = enabled_tc;
4253 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4254 		bw_data.tc_bw_credits[i] = bw_share[i];
4255 
4256 	aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4257 					  NULL);
4258 	if (aq_ret) {
4259 		dev_info(&vsi->back->pdev->dev,
4260 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
4261 			 vsi->back->hw.aq.asq_last_status);
4262 		return -EINVAL;
4263 	}
4264 
4265 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4266 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4267 
4268 	return 0;
4269 }
4270 
4271 /**
4272  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4273  * @vsi: the VSI being configured
4274  * @enabled_tc: TC map to be enabled
4275  *
4276  **/
4277 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4278 {
4279 	struct net_device *netdev = vsi->netdev;
4280 	struct i40e_pf *pf = vsi->back;
4281 	struct i40e_hw *hw = &pf->hw;
4282 	u8 netdev_tc = 0;
4283 	int i;
4284 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4285 
4286 	if (!netdev)
4287 		return;
4288 
4289 	if (!enabled_tc) {
4290 		netdev_reset_tc(netdev);
4291 		return;
4292 	}
4293 
4294 	/* Set up actual enabled TCs on the VSI */
4295 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4296 		return;
4297 
4298 	/* set per TC queues for the VSI */
4299 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4300 		/* Only set TC queues for enabled tcs
4301 		 *
4302 		 * e.g. For a VSI that has TC0 and TC3 enabled the
4303 		 * enabled_tc bitmap would be 0x00001001; the driver
4304 		 * will set the numtc for netdev as 2 that will be
4305 		 * referenced by the netdev layer as TC 0 and 1.
4306 		 */
4307 		if (vsi->tc_config.enabled_tc & (1 << i))
4308 			netdev_set_tc_queue(netdev,
4309 					vsi->tc_config.tc_info[i].netdev_tc,
4310 					vsi->tc_config.tc_info[i].qcount,
4311 					vsi->tc_config.tc_info[i].qoffset);
4312 	}
4313 
4314 	/* Assign UP2TC map for the VSI */
4315 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4316 		/* Get the actual TC# for the UP */
4317 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4318 		/* Get the mapped netdev TC# for the UP */
4319 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4320 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
4321 	}
4322 }
4323 
4324 /**
4325  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4326  * @vsi: the VSI being configured
4327  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4328  **/
4329 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4330 				      struct i40e_vsi_context *ctxt)
4331 {
4332 	/* copy just the sections touched not the entire info
4333 	 * since not all sections are valid as returned by
4334 	 * update vsi params
4335 	 */
4336 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
4337 	memcpy(&vsi->info.queue_mapping,
4338 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4339 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4340 	       sizeof(vsi->info.tc_mapping));
4341 }
4342 
4343 /**
4344  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4345  * @vsi: VSI to be configured
4346  * @enabled_tc: TC bitmap
4347  *
4348  * This configures a particular VSI for TCs that are mapped to the
4349  * given TC bitmap. It uses default bandwidth share for TCs across
4350  * VSIs to configure TC for a particular VSI.
4351  *
4352  * NOTE:
4353  * It is expected that the VSI queues have been quisced before calling
4354  * this function.
4355  **/
4356 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4357 {
4358 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4359 	struct i40e_vsi_context ctxt;
4360 	int ret = 0;
4361 	int i;
4362 
4363 	/* Check if enabled_tc is same as existing or new TCs */
4364 	if (vsi->tc_config.enabled_tc == enabled_tc)
4365 		return ret;
4366 
4367 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
4368 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4369 		if (enabled_tc & (1 << i))
4370 			bw_share[i] = 1;
4371 	}
4372 
4373 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4374 	if (ret) {
4375 		dev_info(&vsi->back->pdev->dev,
4376 			 "Failed configuring TC map %d for VSI %d\n",
4377 			 enabled_tc, vsi->seid);
4378 		goto out;
4379 	}
4380 
4381 	/* Update Queue Pairs Mapping for currently enabled UPs */
4382 	ctxt.seid = vsi->seid;
4383 	ctxt.pf_num = vsi->back->hw.pf_id;
4384 	ctxt.vf_num = 0;
4385 	ctxt.uplink_seid = vsi->uplink_seid;
4386 	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4387 	i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4388 
4389 	/* Update the VSI after updating the VSI queue-mapping information */
4390 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4391 	if (ret) {
4392 		dev_info(&vsi->back->pdev->dev,
4393 			 "update vsi failed, aq_err=%d\n",
4394 			 vsi->back->hw.aq.asq_last_status);
4395 		goto out;
4396 	}
4397 	/* update the local VSI info with updated queue map */
4398 	i40e_vsi_update_queue_map(vsi, &ctxt);
4399 	vsi->info.valid_sections = 0;
4400 
4401 	/* Update current VSI BW information */
4402 	ret = i40e_vsi_get_bw_info(vsi);
4403 	if (ret) {
4404 		dev_info(&vsi->back->pdev->dev,
4405 			 "Failed updating vsi bw info, aq_err=%d\n",
4406 			 vsi->back->hw.aq.asq_last_status);
4407 		goto out;
4408 	}
4409 
4410 	/* Update the netdev TC setup */
4411 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4412 out:
4413 	return ret;
4414 }
4415 
4416 /**
4417  * i40e_veb_config_tc - Configure TCs for given VEB
4418  * @veb: given VEB
4419  * @enabled_tc: TC bitmap
4420  *
4421  * Configures given TC bitmap for VEB (switching) element
4422  **/
4423 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4424 {
4425 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4426 	struct i40e_pf *pf = veb->pf;
4427 	int ret = 0;
4428 	int i;
4429 
4430 	/* No TCs or already enabled TCs just return */
4431 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
4432 		return ret;
4433 
4434 	bw_data.tc_valid_bits = enabled_tc;
4435 	/* bw_data.absolute_credits is not set (relative) */
4436 
4437 	/* Enable ETS TCs with equal BW Share for now */
4438 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4439 		if (enabled_tc & (1 << i))
4440 			bw_data.tc_bw_share_credits[i] = 1;
4441 	}
4442 
4443 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4444 						   &bw_data, NULL);
4445 	if (ret) {
4446 		dev_info(&pf->pdev->dev,
4447 			 "veb bw config failed, aq_err=%d\n",
4448 			 pf->hw.aq.asq_last_status);
4449 		goto out;
4450 	}
4451 
4452 	/* Update the BW information */
4453 	ret = i40e_veb_get_bw_info(veb);
4454 	if (ret) {
4455 		dev_info(&pf->pdev->dev,
4456 			 "Failed getting veb bw config, aq_err=%d\n",
4457 			 pf->hw.aq.asq_last_status);
4458 	}
4459 
4460 out:
4461 	return ret;
4462 }
4463 
4464 #ifdef CONFIG_I40E_DCB
4465 /**
4466  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4467  * @pf: PF struct
4468  *
4469  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4470  * the caller would've quiesce all the VSIs before calling
4471  * this function
4472  **/
4473 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4474 {
4475 	u8 tc_map = 0;
4476 	int ret;
4477 	u8 v;
4478 
4479 	/* Enable the TCs available on PF to all VEBs */
4480 	tc_map = i40e_pf_get_tc_map(pf);
4481 	for (v = 0; v < I40E_MAX_VEB; v++) {
4482 		if (!pf->veb[v])
4483 			continue;
4484 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4485 		if (ret) {
4486 			dev_info(&pf->pdev->dev,
4487 				 "Failed configuring TC for VEB seid=%d\n",
4488 				 pf->veb[v]->seid);
4489 			/* Will try to configure as many components */
4490 		}
4491 	}
4492 
4493 	/* Update each VSI */
4494 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4495 		if (!pf->vsi[v])
4496 			continue;
4497 
4498 		/* - Enable all TCs for the LAN VSI
4499 #ifdef I40E_FCOE
4500 		 * - For FCoE VSI only enable the TC configured
4501 		 *   as per the APP TLV
4502 #endif
4503 		 * - For all others keep them at TC0 for now
4504 		 */
4505 		if (v == pf->lan_vsi)
4506 			tc_map = i40e_pf_get_tc_map(pf);
4507 		else
4508 			tc_map = i40e_pf_get_default_tc(pf);
4509 #ifdef I40E_FCOE
4510 		if (pf->vsi[v]->type == I40E_VSI_FCOE)
4511 			tc_map = i40e_get_fcoe_tc_map(pf);
4512 #endif /* #ifdef I40E_FCOE */
4513 
4514 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4515 		if (ret) {
4516 			dev_info(&pf->pdev->dev,
4517 				 "Failed configuring TC for VSI seid=%d\n",
4518 				 pf->vsi[v]->seid);
4519 			/* Will try to configure as many components */
4520 		} else {
4521 			/* Re-configure VSI vectors based on updated TC map */
4522 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4523 			if (pf->vsi[v]->netdev)
4524 				i40e_dcbnl_set_all(pf->vsi[v]);
4525 		}
4526 	}
4527 }
4528 
4529 /**
4530  * i40e_resume_port_tx - Resume port Tx
4531  * @pf: PF struct
4532  *
4533  * Resume a port's Tx and issue a PF reset in case of failure to
4534  * resume.
4535  **/
4536 static int i40e_resume_port_tx(struct i40e_pf *pf)
4537 {
4538 	struct i40e_hw *hw = &pf->hw;
4539 	int ret;
4540 
4541 	ret = i40e_aq_resume_port_tx(hw, NULL);
4542 	if (ret) {
4543 		dev_info(&pf->pdev->dev,
4544 			 "AQ command Resume Port Tx failed = %d\n",
4545 			  pf->hw.aq.asq_last_status);
4546 		/* Schedule PF reset to recover */
4547 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4548 		i40e_service_event_schedule(pf);
4549 	}
4550 
4551 	return ret;
4552 }
4553 
4554 /**
4555  * i40e_init_pf_dcb - Initialize DCB configuration
4556  * @pf: PF being configured
4557  *
4558  * Query the current DCB configuration and cache it
4559  * in the hardware structure
4560  **/
4561 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4562 {
4563 	struct i40e_hw *hw = &pf->hw;
4564 	int err = 0;
4565 
4566 	/* Get the initial DCB configuration */
4567 	err = i40e_init_dcb(hw);
4568 	if (!err) {
4569 		/* Device/Function is not DCBX capable */
4570 		if ((!hw->func_caps.dcb) ||
4571 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4572 			dev_info(&pf->pdev->dev,
4573 				 "DCBX offload is not supported or is disabled for this PF.\n");
4574 
4575 			if (pf->flags & I40E_FLAG_MFP_ENABLED)
4576 				goto out;
4577 
4578 		} else {
4579 			/* When status is not DISABLED then DCBX in FW */
4580 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4581 				       DCB_CAP_DCBX_VER_IEEE;
4582 
4583 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
4584 			/* Enable DCB tagging only when more than one TC */
4585 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4586 				pf->flags |= I40E_FLAG_DCB_ENABLED;
4587 			dev_dbg(&pf->pdev->dev,
4588 				"DCBX offload is supported for this PF.\n");
4589 		}
4590 	} else {
4591 		dev_info(&pf->pdev->dev,
4592 			 "AQ Querying DCB configuration failed: aq_err %d\n",
4593 			 pf->hw.aq.asq_last_status);
4594 	}
4595 
4596 out:
4597 	return err;
4598 }
4599 #endif /* CONFIG_I40E_DCB */
4600 #define SPEED_SIZE 14
4601 #define FC_SIZE 8
4602 /**
4603  * i40e_print_link_message - print link up or down
4604  * @vsi: the VSI for which link needs a message
4605  */
4606 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4607 {
4608 	char speed[SPEED_SIZE] = "Unknown";
4609 	char fc[FC_SIZE] = "RX/TX";
4610 
4611 	if (!isup) {
4612 		netdev_info(vsi->netdev, "NIC Link is Down\n");
4613 		return;
4614 	}
4615 
4616 	/* Warn user if link speed on NPAR enabled partition is not at
4617 	 * least 10GB
4618 	 */
4619 	if (vsi->back->hw.func_caps.npar_enable &&
4620 	    (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4621 	     vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4622 		netdev_warn(vsi->netdev,
4623 			    "The partition detected link speed that is less than 10Gbps\n");
4624 
4625 	switch (vsi->back->hw.phy.link_info.link_speed) {
4626 	case I40E_LINK_SPEED_40GB:
4627 		strlcpy(speed, "40 Gbps", SPEED_SIZE);
4628 		break;
4629 	case I40E_LINK_SPEED_10GB:
4630 		strlcpy(speed, "10 Gbps", SPEED_SIZE);
4631 		break;
4632 	case I40E_LINK_SPEED_1GB:
4633 		strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4634 		break;
4635 	case I40E_LINK_SPEED_100MB:
4636 		strncpy(speed, "100 Mbps", SPEED_SIZE);
4637 		break;
4638 	default:
4639 		break;
4640 	}
4641 
4642 	switch (vsi->back->hw.fc.current_mode) {
4643 	case I40E_FC_FULL:
4644 		strlcpy(fc, "RX/TX", FC_SIZE);
4645 		break;
4646 	case I40E_FC_TX_PAUSE:
4647 		strlcpy(fc, "TX", FC_SIZE);
4648 		break;
4649 	case I40E_FC_RX_PAUSE:
4650 		strlcpy(fc, "RX", FC_SIZE);
4651 		break;
4652 	default:
4653 		strlcpy(fc, "None", FC_SIZE);
4654 		break;
4655 	}
4656 
4657 	netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4658 		    speed, fc);
4659 }
4660 
4661 /**
4662  * i40e_up_complete - Finish the last steps of bringing up a connection
4663  * @vsi: the VSI being configured
4664  **/
4665 static int i40e_up_complete(struct i40e_vsi *vsi)
4666 {
4667 	struct i40e_pf *pf = vsi->back;
4668 	int err;
4669 
4670 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4671 		i40e_vsi_configure_msix(vsi);
4672 	else
4673 		i40e_configure_msi_and_legacy(vsi);
4674 
4675 	/* start rings */
4676 	err = i40e_vsi_control_rings(vsi, true);
4677 	if (err)
4678 		return err;
4679 
4680 	clear_bit(__I40E_DOWN, &vsi->state);
4681 	i40e_napi_enable_all(vsi);
4682 	i40e_vsi_enable_irq(vsi);
4683 
4684 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4685 	    (vsi->netdev)) {
4686 		i40e_print_link_message(vsi, true);
4687 		netif_tx_start_all_queues(vsi->netdev);
4688 		netif_carrier_on(vsi->netdev);
4689 	} else if (vsi->netdev) {
4690 		i40e_print_link_message(vsi, false);
4691 		/* need to check for qualified module here*/
4692 		if ((pf->hw.phy.link_info.link_info &
4693 			I40E_AQ_MEDIA_AVAILABLE) &&
4694 		    (!(pf->hw.phy.link_info.an_info &
4695 			I40E_AQ_QUALIFIED_MODULE)))
4696 			netdev_err(vsi->netdev,
4697 				   "the driver failed to link because an unqualified module was detected.");
4698 	}
4699 
4700 	/* replay FDIR SB filters */
4701 	if (vsi->type == I40E_VSI_FDIR) {
4702 		/* reset fd counters */
4703 		pf->fd_add_err = pf->fd_atr_cnt = 0;
4704 		if (pf->fd_tcp_rule > 0) {
4705 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4706 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4707 			pf->fd_tcp_rule = 0;
4708 		}
4709 		i40e_fdir_filter_restore(vsi);
4710 	}
4711 	i40e_service_event_schedule(pf);
4712 
4713 	return 0;
4714 }
4715 
4716 /**
4717  * i40e_vsi_reinit_locked - Reset the VSI
4718  * @vsi: the VSI being configured
4719  *
4720  * Rebuild the ring structs after some configuration
4721  * has changed, e.g. MTU size.
4722  **/
4723 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4724 {
4725 	struct i40e_pf *pf = vsi->back;
4726 
4727 	WARN_ON(in_interrupt());
4728 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4729 		usleep_range(1000, 2000);
4730 	i40e_down(vsi);
4731 
4732 	/* Give a VF some time to respond to the reset.  The
4733 	 * two second wait is based upon the watchdog cycle in
4734 	 * the VF driver.
4735 	 */
4736 	if (vsi->type == I40E_VSI_SRIOV)
4737 		msleep(2000);
4738 	i40e_up(vsi);
4739 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4740 }
4741 
4742 /**
4743  * i40e_up - Bring the connection back up after being down
4744  * @vsi: the VSI being configured
4745  **/
4746 int i40e_up(struct i40e_vsi *vsi)
4747 {
4748 	int err;
4749 
4750 	err = i40e_vsi_configure(vsi);
4751 	if (!err)
4752 		err = i40e_up_complete(vsi);
4753 
4754 	return err;
4755 }
4756 
4757 /**
4758  * i40e_down - Shutdown the connection processing
4759  * @vsi: the VSI being stopped
4760  **/
4761 void i40e_down(struct i40e_vsi *vsi)
4762 {
4763 	int i;
4764 
4765 	/* It is assumed that the caller of this function
4766 	 * sets the vsi->state __I40E_DOWN bit.
4767 	 */
4768 	if (vsi->netdev) {
4769 		netif_carrier_off(vsi->netdev);
4770 		netif_tx_disable(vsi->netdev);
4771 	}
4772 	i40e_vsi_disable_irq(vsi);
4773 	i40e_vsi_control_rings(vsi, false);
4774 	i40e_napi_disable_all(vsi);
4775 
4776 	for (i = 0; i < vsi->num_queue_pairs; i++) {
4777 		i40e_clean_tx_ring(vsi->tx_rings[i]);
4778 		i40e_clean_rx_ring(vsi->rx_rings[i]);
4779 	}
4780 }
4781 
4782 /**
4783  * i40e_setup_tc - configure multiple traffic classes
4784  * @netdev: net device to configure
4785  * @tc: number of traffic classes to enable
4786  **/
4787 #ifdef I40E_FCOE
4788 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4789 #else
4790 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4791 #endif
4792 {
4793 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4794 	struct i40e_vsi *vsi = np->vsi;
4795 	struct i40e_pf *pf = vsi->back;
4796 	u8 enabled_tc = 0;
4797 	int ret = -EINVAL;
4798 	int i;
4799 
4800 	/* Check if DCB enabled to continue */
4801 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4802 		netdev_info(netdev, "DCB is not enabled for adapter\n");
4803 		goto exit;
4804 	}
4805 
4806 	/* Check if MFP enabled */
4807 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4808 		netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4809 		goto exit;
4810 	}
4811 
4812 	/* Check whether tc count is within enabled limit */
4813 	if (tc > i40e_pf_get_num_tc(pf)) {
4814 		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4815 		goto exit;
4816 	}
4817 
4818 	/* Generate TC map for number of tc requested */
4819 	for (i = 0; i < tc; i++)
4820 		enabled_tc |= (1 << i);
4821 
4822 	/* Requesting same TC configuration as already enabled */
4823 	if (enabled_tc == vsi->tc_config.enabled_tc)
4824 		return 0;
4825 
4826 	/* Quiesce VSI queues */
4827 	i40e_quiesce_vsi(vsi);
4828 
4829 	/* Configure VSI for enabled TCs */
4830 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
4831 	if (ret) {
4832 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4833 			    vsi->seid);
4834 		goto exit;
4835 	}
4836 
4837 	/* Unquiesce VSI */
4838 	i40e_unquiesce_vsi(vsi);
4839 
4840 exit:
4841 	return ret;
4842 }
4843 
4844 /**
4845  * i40e_open - Called when a network interface is made active
4846  * @netdev: network interface device structure
4847  *
4848  * The open entry point is called when a network interface is made
4849  * active by the system (IFF_UP).  At this point all resources needed
4850  * for transmit and receive operations are allocated, the interrupt
4851  * handler is registered with the OS, the netdev watchdog subtask is
4852  * enabled, and the stack is notified that the interface is ready.
4853  *
4854  * Returns 0 on success, negative value on failure
4855  **/
4856 #ifdef I40E_FCOE
4857 int i40e_open(struct net_device *netdev)
4858 #else
4859 static int i40e_open(struct net_device *netdev)
4860 #endif
4861 {
4862 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4863 	struct i40e_vsi *vsi = np->vsi;
4864 	struct i40e_pf *pf = vsi->back;
4865 	int err;
4866 
4867 	/* disallow open during test or if eeprom is broken */
4868 	if (test_bit(__I40E_TESTING, &pf->state) ||
4869 	    test_bit(__I40E_BAD_EEPROM, &pf->state))
4870 		return -EBUSY;
4871 
4872 	netif_carrier_off(netdev);
4873 
4874 	err = i40e_vsi_open(vsi);
4875 	if (err)
4876 		return err;
4877 
4878 	/* configure global TSO hardware offload settings */
4879 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4880 						       TCP_FLAG_FIN) >> 16);
4881 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4882 						       TCP_FLAG_FIN |
4883 						       TCP_FLAG_CWR) >> 16);
4884 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4885 
4886 #ifdef CONFIG_I40E_VXLAN
4887 	vxlan_get_rx_port(netdev);
4888 #endif
4889 
4890 	return 0;
4891 }
4892 
4893 /**
4894  * i40e_vsi_open -
4895  * @vsi: the VSI to open
4896  *
4897  * Finish initialization of the VSI.
4898  *
4899  * Returns 0 on success, negative value on failure
4900  **/
4901 int i40e_vsi_open(struct i40e_vsi *vsi)
4902 {
4903 	struct i40e_pf *pf = vsi->back;
4904 	char int_name[I40E_INT_NAME_STR_LEN];
4905 	int err;
4906 
4907 	/* allocate descriptors */
4908 	err = i40e_vsi_setup_tx_resources(vsi);
4909 	if (err)
4910 		goto err_setup_tx;
4911 	err = i40e_vsi_setup_rx_resources(vsi);
4912 	if (err)
4913 		goto err_setup_rx;
4914 
4915 	err = i40e_vsi_configure(vsi);
4916 	if (err)
4917 		goto err_setup_rx;
4918 
4919 	if (vsi->netdev) {
4920 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4921 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4922 		err = i40e_vsi_request_irq(vsi, int_name);
4923 		if (err)
4924 			goto err_setup_rx;
4925 
4926 		/* Notify the stack of the actual queue counts. */
4927 		err = netif_set_real_num_tx_queues(vsi->netdev,
4928 						   vsi->num_queue_pairs);
4929 		if (err)
4930 			goto err_set_queues;
4931 
4932 		err = netif_set_real_num_rx_queues(vsi->netdev,
4933 						   vsi->num_queue_pairs);
4934 		if (err)
4935 			goto err_set_queues;
4936 
4937 	} else if (vsi->type == I40E_VSI_FDIR) {
4938 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
4939 			 dev_driver_string(&pf->pdev->dev),
4940 			 dev_name(&pf->pdev->dev));
4941 		err = i40e_vsi_request_irq(vsi, int_name);
4942 
4943 	} else {
4944 		err = -EINVAL;
4945 		goto err_setup_rx;
4946 	}
4947 
4948 	err = i40e_up_complete(vsi);
4949 	if (err)
4950 		goto err_up_complete;
4951 
4952 	return 0;
4953 
4954 err_up_complete:
4955 	i40e_down(vsi);
4956 err_set_queues:
4957 	i40e_vsi_free_irq(vsi);
4958 err_setup_rx:
4959 	i40e_vsi_free_rx_resources(vsi);
4960 err_setup_tx:
4961 	i40e_vsi_free_tx_resources(vsi);
4962 	if (vsi == pf->vsi[pf->lan_vsi])
4963 		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4964 
4965 	return err;
4966 }
4967 
4968 /**
4969  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4970  * @pf: Pointer to pf
4971  *
4972  * This function destroys the hlist where all the Flow Director
4973  * filters were saved.
4974  **/
4975 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4976 {
4977 	struct i40e_fdir_filter *filter;
4978 	struct hlist_node *node2;
4979 
4980 	hlist_for_each_entry_safe(filter, node2,
4981 				  &pf->fdir_filter_list, fdir_node) {
4982 		hlist_del(&filter->fdir_node);
4983 		kfree(filter);
4984 	}
4985 	pf->fdir_pf_active_filters = 0;
4986 }
4987 
4988 /**
4989  * i40e_close - Disables a network interface
4990  * @netdev: network interface device structure
4991  *
4992  * The close entry point is called when an interface is de-activated
4993  * by the OS.  The hardware is still under the driver's control, but
4994  * this netdev interface is disabled.
4995  *
4996  * Returns 0, this is not allowed to fail
4997  **/
4998 #ifdef I40E_FCOE
4999 int i40e_close(struct net_device *netdev)
5000 #else
5001 static int i40e_close(struct net_device *netdev)
5002 #endif
5003 {
5004 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5005 	struct i40e_vsi *vsi = np->vsi;
5006 
5007 	i40e_vsi_close(vsi);
5008 
5009 	return 0;
5010 }
5011 
5012 /**
5013  * i40e_do_reset - Start a PF or Core Reset sequence
5014  * @pf: board private structure
5015  * @reset_flags: which reset is requested
5016  *
5017  * The essential difference in resets is that the PF Reset
5018  * doesn't clear the packet buffers, doesn't reset the PE
5019  * firmware, and doesn't bother the other PFs on the chip.
5020  **/
5021 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5022 {
5023 	u32 val;
5024 
5025 	WARN_ON(in_interrupt());
5026 
5027 	if (i40e_check_asq_alive(&pf->hw))
5028 		i40e_vc_notify_reset(pf);
5029 
5030 	/* do the biggest reset indicated */
5031 	if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
5032 
5033 		/* Request a Global Reset
5034 		 *
5035 		 * This will start the chip's countdown to the actual full
5036 		 * chip reset event, and a warning interrupt to be sent
5037 		 * to all PFs, including the requestor.  Our handler
5038 		 * for the warning interrupt will deal with the shutdown
5039 		 * and recovery of the switch setup.
5040 		 */
5041 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5042 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5043 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5044 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5045 
5046 	} else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
5047 
5048 		/* Request a Core Reset
5049 		 *
5050 		 * Same as Global Reset, except does *not* include the MAC/PHY
5051 		 */
5052 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5053 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5054 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
5055 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5056 		i40e_flush(&pf->hw);
5057 
5058 	} else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
5059 
5060 		/* Request a Firmware Reset
5061 		 *
5062 		 * Same as Global reset, plus restarting the
5063 		 * embedded firmware engine.
5064 		 */
5065 		/* enable EMP Reset */
5066 		val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
5067 		val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
5068 		wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
5069 
5070 		/* force the reset */
5071 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5072 		val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
5073 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5074 		i40e_flush(&pf->hw);
5075 
5076 	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
5077 
5078 		/* Request a PF Reset
5079 		 *
5080 		 * Resets only the PF-specific registers
5081 		 *
5082 		 * This goes directly to the tear-down and rebuild of
5083 		 * the switch, since we need to do all the recovery as
5084 		 * for the Core Reset.
5085 		 */
5086 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
5087 		i40e_handle_reset_warning(pf);
5088 
5089 	} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
5090 		int v;
5091 
5092 		/* Find the VSI(s) that requested a re-init */
5093 		dev_info(&pf->pdev->dev,
5094 			 "VSI reinit requested\n");
5095 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5096 			struct i40e_vsi *vsi = pf->vsi[v];
5097 			if (vsi != NULL &&
5098 			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5099 				i40e_vsi_reinit_locked(pf->vsi[v]);
5100 				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5101 			}
5102 		}
5103 
5104 		/* no further action needed, so return now */
5105 		return;
5106 	} else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
5107 		int v;
5108 
5109 		/* Find the VSI(s) that needs to be brought down */
5110 		dev_info(&pf->pdev->dev, "VSI down requested\n");
5111 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5112 			struct i40e_vsi *vsi = pf->vsi[v];
5113 			if (vsi != NULL &&
5114 			    test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5115 				set_bit(__I40E_DOWN, &vsi->state);
5116 				i40e_down(vsi);
5117 				clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5118 			}
5119 		}
5120 
5121 		/* no further action needed, so return now */
5122 		return;
5123 	} else {
5124 		dev_info(&pf->pdev->dev,
5125 			 "bad reset request 0x%08x\n", reset_flags);
5126 		return;
5127 	}
5128 }
5129 
5130 #ifdef CONFIG_I40E_DCB
5131 /**
5132  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5133  * @pf: board private structure
5134  * @old_cfg: current DCB config
5135  * @new_cfg: new DCB config
5136  **/
5137 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5138 			    struct i40e_dcbx_config *old_cfg,
5139 			    struct i40e_dcbx_config *new_cfg)
5140 {
5141 	bool need_reconfig = false;
5142 
5143 	/* Check if ETS configuration has changed */
5144 	if (memcmp(&new_cfg->etscfg,
5145 		   &old_cfg->etscfg,
5146 		   sizeof(new_cfg->etscfg))) {
5147 		/* If Priority Table has changed reconfig is needed */
5148 		if (memcmp(&new_cfg->etscfg.prioritytable,
5149 			   &old_cfg->etscfg.prioritytable,
5150 			   sizeof(new_cfg->etscfg.prioritytable))) {
5151 			need_reconfig = true;
5152 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5153 		}
5154 
5155 		if (memcmp(&new_cfg->etscfg.tcbwtable,
5156 			   &old_cfg->etscfg.tcbwtable,
5157 			   sizeof(new_cfg->etscfg.tcbwtable)))
5158 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5159 
5160 		if (memcmp(&new_cfg->etscfg.tsatable,
5161 			   &old_cfg->etscfg.tsatable,
5162 			   sizeof(new_cfg->etscfg.tsatable)))
5163 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5164 	}
5165 
5166 	/* Check if PFC configuration has changed */
5167 	if (memcmp(&new_cfg->pfc,
5168 		   &old_cfg->pfc,
5169 		   sizeof(new_cfg->pfc))) {
5170 		need_reconfig = true;
5171 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5172 	}
5173 
5174 	/* Check if APP Table has changed */
5175 	if (memcmp(&new_cfg->app,
5176 		   &old_cfg->app,
5177 		   sizeof(new_cfg->app))) {
5178 		need_reconfig = true;
5179 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5180 	}
5181 
5182 	dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5183 		need_reconfig);
5184 	return need_reconfig;
5185 }
5186 
5187 /**
5188  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5189  * @pf: board private structure
5190  * @e: event info posted on ARQ
5191  **/
5192 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5193 				  struct i40e_arq_event_info *e)
5194 {
5195 	struct i40e_aqc_lldp_get_mib *mib =
5196 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5197 	struct i40e_hw *hw = &pf->hw;
5198 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
5199 	struct i40e_dcbx_config tmp_dcbx_cfg;
5200 	bool need_reconfig = false;
5201 	int ret = 0;
5202 	u8 type;
5203 
5204 	/* Not DCB capable or capability disabled */
5205 	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5206 		return ret;
5207 
5208 	/* Ignore if event is not for Nearest Bridge */
5209 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5210 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5211 	dev_dbg(&pf->pdev->dev,
5212 		"%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5213 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5214 		return ret;
5215 
5216 	/* Check MIB Type and return if event for Remote MIB update */
5217 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5218 	dev_dbg(&pf->pdev->dev,
5219 		"%s: LLDP event mib type %s\n", __func__,
5220 		type ? "remote" : "local");
5221 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5222 		/* Update the remote cached instance and return */
5223 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5224 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5225 				&hw->remote_dcbx_config);
5226 		goto exit;
5227 	}
5228 
5229 	memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
5230 	/* Store the old configuration */
5231 	tmp_dcbx_cfg = *dcbx_cfg;
5232 
5233 	/* Get updated DCBX data from firmware */
5234 	ret = i40e_get_dcb_config(&pf->hw);
5235 	if (ret) {
5236 		dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
5237 		goto exit;
5238 	}
5239 
5240 	/* No change detected in DCBX configs */
5241 	if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
5242 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5243 		goto exit;
5244 	}
5245 
5246 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
5247 
5248 	i40e_dcbnl_flush_apps(pf, dcbx_cfg);
5249 
5250 	if (!need_reconfig)
5251 		goto exit;
5252 
5253 	/* Enable DCB tagging only when more than one TC */
5254 	if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
5255 		pf->flags |= I40E_FLAG_DCB_ENABLED;
5256 	else
5257 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5258 
5259 	set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5260 	/* Reconfiguration needed quiesce all VSIs */
5261 	i40e_pf_quiesce_all_vsi(pf);
5262 
5263 	/* Changes in configuration update VEB/VSI */
5264 	i40e_dcb_reconfigure(pf);
5265 
5266 	ret = i40e_resume_port_tx(pf);
5267 
5268 	clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5269 	/* In case of error no point in resuming VSIs */
5270 	if (ret)
5271 		goto exit;
5272 
5273 	/* Wait for the PF's Tx queues to be disabled */
5274 	ret = i40e_pf_wait_txq_disabled(pf);
5275 	if (ret) {
5276 		/* Schedule PF reset to recover */
5277 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5278 		i40e_service_event_schedule(pf);
5279 	} else {
5280 		i40e_pf_unquiesce_all_vsi(pf);
5281 	}
5282 
5283 exit:
5284 	return ret;
5285 }
5286 #endif /* CONFIG_I40E_DCB */
5287 
5288 /**
5289  * i40e_do_reset_safe - Protected reset path for userland calls.
5290  * @pf: board private structure
5291  * @reset_flags: which reset is requested
5292  *
5293  **/
5294 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5295 {
5296 	rtnl_lock();
5297 	i40e_do_reset(pf, reset_flags);
5298 	rtnl_unlock();
5299 }
5300 
5301 /**
5302  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5303  * @pf: board private structure
5304  * @e: event info posted on ARQ
5305  *
5306  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5307  * and VF queues
5308  **/
5309 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5310 					   struct i40e_arq_event_info *e)
5311 {
5312 	struct i40e_aqc_lan_overflow *data =
5313 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5314 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
5315 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5316 	struct i40e_hw *hw = &pf->hw;
5317 	struct i40e_vf *vf;
5318 	u16 vf_id;
5319 
5320 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5321 		queue, qtx_ctl);
5322 
5323 	/* Queue belongs to VF, find the VF and issue VF reset */
5324 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5325 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5326 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5327 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5328 		vf_id -= hw->func_caps.vf_base_id;
5329 		vf = &pf->vf[vf_id];
5330 		i40e_vc_notify_vf_reset(vf);
5331 		/* Allow VF to process pending reset notification */
5332 		msleep(20);
5333 		i40e_reset_vf(vf, false);
5334 	}
5335 }
5336 
5337 /**
5338  * i40e_service_event_complete - Finish up the service event
5339  * @pf: board private structure
5340  **/
5341 static void i40e_service_event_complete(struct i40e_pf *pf)
5342 {
5343 	BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5344 
5345 	/* flush memory to make sure state is correct before next watchog */
5346 	smp_mb__before_atomic();
5347 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5348 }
5349 
5350 /**
5351  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5352  * @pf: board private structure
5353  **/
5354 int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5355 {
5356 	int val, fcnt_prog;
5357 
5358 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5359 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5360 	return fcnt_prog;
5361 }
5362 
5363 /**
5364  * i40e_get_current_fd_count - Get the count of total FD filters programmed
5365  * @pf: board private structure
5366  **/
5367 int i40e_get_current_fd_count(struct i40e_pf *pf)
5368 {
5369 	int val, fcnt_prog;
5370 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5371 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5372 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5373 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5374 	return fcnt_prog;
5375 }
5376 
5377 /**
5378  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5379  * @pf: board private structure
5380  **/
5381 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5382 {
5383 	u32 fcnt_prog, fcnt_avail;
5384 
5385 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5386 		return;
5387 
5388 	/* Check if, FD SB or ATR was auto disabled and if there is enough room
5389 	 * to re-enable
5390 	 */
5391 	fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
5392 	fcnt_avail = pf->fdir_pf_filter_count;
5393 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5394 	    (pf->fd_add_err == 0) ||
5395 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5396 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5397 		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5398 			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5399 			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5400 		}
5401 	}
5402 	/* Wait for some more space to be available to turn on ATR */
5403 	if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5404 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5405 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5406 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5407 			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5408 		}
5409 	}
5410 }
5411 
5412 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5413 /**
5414  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5415  * @pf: board private structure
5416  **/
5417 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5418 {
5419 	int flush_wait_retry = 50;
5420 	int reg;
5421 
5422 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5423 		return;
5424 
5425 	if (time_after(jiffies, pf->fd_flush_timestamp +
5426 				(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5427 		set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5428 		pf->fd_flush_timestamp = jiffies;
5429 		pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
5430 		pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5431 		/* flush all filters */
5432 		wr32(&pf->hw, I40E_PFQF_CTL_1,
5433 		     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5434 		i40e_flush(&pf->hw);
5435 		pf->fd_flush_cnt++;
5436 		pf->fd_add_err = 0;
5437 		do {
5438 			/* Check FD flush status every 5-6msec */
5439 			usleep_range(5000, 6000);
5440 			reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5441 			if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5442 				break;
5443 		} while (flush_wait_retry--);
5444 		if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5445 			dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5446 		} else {
5447 			/* replay sideband filters */
5448 			i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5449 
5450 			pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5451 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5452 			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5453 			clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5454 			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5455 		}
5456 	}
5457 }
5458 
5459 /**
5460  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5461  * @pf: board private structure
5462  **/
5463 int i40e_get_current_atr_cnt(struct i40e_pf *pf)
5464 {
5465 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5466 }
5467 
5468 /* We can see up to 256 filter programming desc in transit if the filters are
5469  * being applied really fast; before we see the first
5470  * filter miss error on Rx queue 0. Accumulating enough error messages before
5471  * reacting will make sure we don't cause flush too often.
5472  */
5473 #define I40E_MAX_FD_PROGRAM_ERROR 256
5474 
5475 /**
5476  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5477  * @pf: board private structure
5478  **/
5479 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5480 {
5481 
5482 	/* if interface is down do nothing */
5483 	if (test_bit(__I40E_DOWN, &pf->state))
5484 		return;
5485 
5486 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5487 		return;
5488 
5489 	if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
5490 	    (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
5491 	    (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
5492 		i40e_fdir_flush_and_replay(pf);
5493 
5494 	i40e_fdir_check_and_reenable(pf);
5495 
5496 }
5497 
5498 /**
5499  * i40e_vsi_link_event - notify VSI of a link event
5500  * @vsi: vsi to be notified
5501  * @link_up: link up or down
5502  **/
5503 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5504 {
5505 	if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5506 		return;
5507 
5508 	switch (vsi->type) {
5509 	case I40E_VSI_MAIN:
5510 #ifdef I40E_FCOE
5511 	case I40E_VSI_FCOE:
5512 #endif
5513 		if (!vsi->netdev || !vsi->netdev_registered)
5514 			break;
5515 
5516 		if (link_up) {
5517 			netif_carrier_on(vsi->netdev);
5518 			netif_tx_wake_all_queues(vsi->netdev);
5519 		} else {
5520 			netif_carrier_off(vsi->netdev);
5521 			netif_tx_stop_all_queues(vsi->netdev);
5522 		}
5523 		break;
5524 
5525 	case I40E_VSI_SRIOV:
5526 	case I40E_VSI_VMDQ2:
5527 	case I40E_VSI_CTRL:
5528 	case I40E_VSI_MIRROR:
5529 	default:
5530 		/* there is no notification for other VSIs */
5531 		break;
5532 	}
5533 }
5534 
5535 /**
5536  * i40e_veb_link_event - notify elements on the veb of a link event
5537  * @veb: veb to be notified
5538  * @link_up: link up or down
5539  **/
5540 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5541 {
5542 	struct i40e_pf *pf;
5543 	int i;
5544 
5545 	if (!veb || !veb->pf)
5546 		return;
5547 	pf = veb->pf;
5548 
5549 	/* depth first... */
5550 	for (i = 0; i < I40E_MAX_VEB; i++)
5551 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5552 			i40e_veb_link_event(pf->veb[i], link_up);
5553 
5554 	/* ... now the local VSIs */
5555 	for (i = 0; i < pf->num_alloc_vsi; i++)
5556 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5557 			i40e_vsi_link_event(pf->vsi[i], link_up);
5558 }
5559 
5560 /**
5561  * i40e_link_event - Update netif_carrier status
5562  * @pf: board private structure
5563  **/
5564 static void i40e_link_event(struct i40e_pf *pf)
5565 {
5566 	bool new_link, old_link;
5567 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5568 	u8 new_link_speed, old_link_speed;
5569 
5570 	/* set this to force the get_link_status call to refresh state */
5571 	pf->hw.phy.get_link_info = true;
5572 
5573 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5574 	new_link = i40e_get_link_status(&pf->hw);
5575 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
5576 	new_link_speed = pf->hw.phy.link_info.link_speed;
5577 
5578 	if (new_link == old_link &&
5579 	    new_link_speed == old_link_speed &&
5580 	    (test_bit(__I40E_DOWN, &vsi->state) ||
5581 	     new_link == netif_carrier_ok(vsi->netdev)))
5582 		return;
5583 
5584 	if (!test_bit(__I40E_DOWN, &vsi->state))
5585 		i40e_print_link_message(vsi, new_link);
5586 
5587 	/* Notify the base of the switch tree connected to
5588 	 * the link.  Floating VEBs are not notified.
5589 	 */
5590 	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5591 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5592 	else
5593 		i40e_vsi_link_event(vsi, new_link);
5594 
5595 	if (pf->vf)
5596 		i40e_vc_notify_link_state(pf);
5597 
5598 	if (pf->flags & I40E_FLAG_PTP)
5599 		i40e_ptp_set_increment(pf);
5600 }
5601 
5602 /**
5603  * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5604  * @pf: board private structure
5605  *
5606  * Set the per-queue flags to request a check for stuck queues in the irq
5607  * clean functions, then force interrupts to be sure the irq clean is called.
5608  **/
5609 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5610 {
5611 	int i, v;
5612 
5613 	/* If we're down or resetting, just bail */
5614 	if (test_bit(__I40E_DOWN, &pf->state) ||
5615 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
5616 		return;
5617 
5618 	/* for each VSI/netdev
5619 	 *     for each Tx queue
5620 	 *         set the check flag
5621 	 *     for each q_vector
5622 	 *         force an interrupt
5623 	 */
5624 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5625 		struct i40e_vsi *vsi = pf->vsi[v];
5626 		int armed = 0;
5627 
5628 		if (!pf->vsi[v] ||
5629 		    test_bit(__I40E_DOWN, &vsi->state) ||
5630 		    (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5631 			continue;
5632 
5633 		for (i = 0; i < vsi->num_queue_pairs; i++) {
5634 			set_check_for_tx_hang(vsi->tx_rings[i]);
5635 			if (test_bit(__I40E_HANG_CHECK_ARMED,
5636 				     &vsi->tx_rings[i]->state))
5637 				armed++;
5638 		}
5639 
5640 		if (armed) {
5641 			if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5642 				wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5643 				     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5644 				      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5645 				      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5646 				      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5647 				      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5648 			} else {
5649 				u16 vec = vsi->base_vector - 1;
5650 				u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5651 				      I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5652 				      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5653 				      I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5654 				      I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5655 				for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5656 					wr32(&vsi->back->hw,
5657 					     I40E_PFINT_DYN_CTLN(vec), val);
5658 			}
5659 			i40e_flush(&vsi->back->hw);
5660 		}
5661 	}
5662 }
5663 
5664 /**
5665  * i40e_watchdog_subtask - periodic checks not using event driven response
5666  * @pf: board private structure
5667  **/
5668 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5669 {
5670 	int i;
5671 
5672 	/* if interface is down do nothing */
5673 	if (test_bit(__I40E_DOWN, &pf->state) ||
5674 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
5675 		return;
5676 
5677 	/* make sure we don't do these things too often */
5678 	if (time_before(jiffies, (pf->service_timer_previous +
5679 				  pf->service_timer_period)))
5680 		return;
5681 	pf->service_timer_previous = jiffies;
5682 
5683 	i40e_check_hang_subtask(pf);
5684 	i40e_link_event(pf);
5685 
5686 	/* Update the stats for active netdevs so the network stack
5687 	 * can look at updated numbers whenever it cares to
5688 	 */
5689 	for (i = 0; i < pf->num_alloc_vsi; i++)
5690 		if (pf->vsi[i] && pf->vsi[i]->netdev)
5691 			i40e_update_stats(pf->vsi[i]);
5692 
5693 	/* Update the stats for the active switching components */
5694 	for (i = 0; i < I40E_MAX_VEB; i++)
5695 		if (pf->veb[i])
5696 			i40e_update_veb_stats(pf->veb[i]);
5697 
5698 	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5699 }
5700 
5701 /**
5702  * i40e_reset_subtask - Set up for resetting the device and driver
5703  * @pf: board private structure
5704  **/
5705 static void i40e_reset_subtask(struct i40e_pf *pf)
5706 {
5707 	u32 reset_flags = 0;
5708 
5709 	rtnl_lock();
5710 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5711 		reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5712 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5713 	}
5714 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5715 		reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5716 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5717 	}
5718 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5719 		reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5720 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5721 	}
5722 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5723 		reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5724 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5725 	}
5726 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5727 		reset_flags |= (1 << __I40E_DOWN_REQUESTED);
5728 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5729 	}
5730 
5731 	/* If there's a recovery already waiting, it takes
5732 	 * precedence before starting a new reset sequence.
5733 	 */
5734 	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5735 		i40e_handle_reset_warning(pf);
5736 		goto unlock;
5737 	}
5738 
5739 	/* If we're already down or resetting, just bail */
5740 	if (reset_flags &&
5741 	    !test_bit(__I40E_DOWN, &pf->state) &&
5742 	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5743 		i40e_do_reset(pf, reset_flags);
5744 
5745 unlock:
5746 	rtnl_unlock();
5747 }
5748 
5749 /**
5750  * i40e_handle_link_event - Handle link event
5751  * @pf: board private structure
5752  * @e: event info posted on ARQ
5753  **/
5754 static void i40e_handle_link_event(struct i40e_pf *pf,
5755 				   struct i40e_arq_event_info *e)
5756 {
5757 	struct i40e_hw *hw = &pf->hw;
5758 	struct i40e_aqc_get_link_status *status =
5759 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5760 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
5761 
5762 	/* save off old link status information */
5763 	memcpy(&pf->hw.phy.link_info_old, hw_link_info,
5764 	       sizeof(pf->hw.phy.link_info_old));
5765 
5766 	/* Do a new status request to re-enable LSE reporting
5767 	 * and load new status information into the hw struct
5768 	 * This completely ignores any state information
5769 	 * in the ARQ event info, instead choosing to always
5770 	 * issue the AQ update link status command.
5771 	 */
5772 	i40e_link_event(pf);
5773 
5774 	/* check for unqualified module, if link is down */
5775 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5776 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5777 	    (!(status->link_info & I40E_AQ_LINK_UP)))
5778 		dev_err(&pf->pdev->dev,
5779 			"The driver failed to link because an unqualified module was detected.\n");
5780 }
5781 
5782 /**
5783  * i40e_clean_adminq_subtask - Clean the AdminQ rings
5784  * @pf: board private structure
5785  **/
5786 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5787 {
5788 	struct i40e_arq_event_info event;
5789 	struct i40e_hw *hw = &pf->hw;
5790 	u16 pending, i = 0;
5791 	i40e_status ret;
5792 	u16 opcode;
5793 	u32 oldval;
5794 	u32 val;
5795 
5796 	/* Do not run clean AQ when PF reset fails */
5797 	if (test_bit(__I40E_RESET_FAILED, &pf->state))
5798 		return;
5799 
5800 	/* check for error indications */
5801 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
5802 	oldval = val;
5803 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5804 		dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5805 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5806 	}
5807 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5808 		dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5809 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5810 	}
5811 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5812 		dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5813 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5814 	}
5815 	if (oldval != val)
5816 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
5817 
5818 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
5819 	oldval = val;
5820 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5821 		dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5822 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5823 	}
5824 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5825 		dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5826 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5827 	}
5828 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5829 		dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5830 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5831 	}
5832 	if (oldval != val)
5833 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
5834 
5835 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5836 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5837 	if (!event.msg_buf)
5838 		return;
5839 
5840 	do {
5841 		ret = i40e_clean_arq_element(hw, &event, &pending);
5842 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5843 			break;
5844 		else if (ret) {
5845 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5846 			break;
5847 		}
5848 
5849 		opcode = le16_to_cpu(event.desc.opcode);
5850 		switch (opcode) {
5851 
5852 		case i40e_aqc_opc_get_link_status:
5853 			i40e_handle_link_event(pf, &event);
5854 			break;
5855 		case i40e_aqc_opc_send_msg_to_pf:
5856 			ret = i40e_vc_process_vf_msg(pf,
5857 					le16_to_cpu(event.desc.retval),
5858 					le32_to_cpu(event.desc.cookie_high),
5859 					le32_to_cpu(event.desc.cookie_low),
5860 					event.msg_buf,
5861 					event.msg_len);
5862 			break;
5863 		case i40e_aqc_opc_lldp_update_mib:
5864 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5865 #ifdef CONFIG_I40E_DCB
5866 			rtnl_lock();
5867 			ret = i40e_handle_lldp_event(pf, &event);
5868 			rtnl_unlock();
5869 #endif /* CONFIG_I40E_DCB */
5870 			break;
5871 		case i40e_aqc_opc_event_lan_overflow:
5872 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5873 			i40e_handle_lan_overflow_event(pf, &event);
5874 			break;
5875 		case i40e_aqc_opc_send_msg_to_peer:
5876 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5877 			break;
5878 		default:
5879 			dev_info(&pf->pdev->dev,
5880 				 "ARQ Error: Unknown event 0x%04x received\n",
5881 				 opcode);
5882 			break;
5883 		}
5884 	} while (pending && (i++ < pf->adminq_work_limit));
5885 
5886 	clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5887 	/* re-enable Admin queue interrupt cause */
5888 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
5889 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5890 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
5891 	i40e_flush(hw);
5892 
5893 	kfree(event.msg_buf);
5894 }
5895 
5896 /**
5897  * i40e_verify_eeprom - make sure eeprom is good to use
5898  * @pf: board private structure
5899  **/
5900 static void i40e_verify_eeprom(struct i40e_pf *pf)
5901 {
5902 	int err;
5903 
5904 	err = i40e_diag_eeprom_test(&pf->hw);
5905 	if (err) {
5906 		/* retry in case of garbage read */
5907 		err = i40e_diag_eeprom_test(&pf->hw);
5908 		if (err) {
5909 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5910 				 err);
5911 			set_bit(__I40E_BAD_EEPROM, &pf->state);
5912 		}
5913 	}
5914 
5915 	if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5916 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5917 		clear_bit(__I40E_BAD_EEPROM, &pf->state);
5918 	}
5919 }
5920 
5921 /**
5922  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5923  * @veb: pointer to the VEB instance
5924  *
5925  * This is a recursive function that first builds the attached VSIs then
5926  * recurses in to build the next layer of VEB.  We track the connections
5927  * through our own index numbers because the seid's from the HW could
5928  * change across the reset.
5929  **/
5930 static int i40e_reconstitute_veb(struct i40e_veb *veb)
5931 {
5932 	struct i40e_vsi *ctl_vsi = NULL;
5933 	struct i40e_pf *pf = veb->pf;
5934 	int v, veb_idx;
5935 	int ret;
5936 
5937 	/* build VSI that owns this VEB, temporarily attached to base VEB */
5938 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5939 		if (pf->vsi[v] &&
5940 		    pf->vsi[v]->veb_idx == veb->idx &&
5941 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5942 			ctl_vsi = pf->vsi[v];
5943 			break;
5944 		}
5945 	}
5946 	if (!ctl_vsi) {
5947 		dev_info(&pf->pdev->dev,
5948 			 "missing owner VSI for veb_idx %d\n", veb->idx);
5949 		ret = -ENOENT;
5950 		goto end_reconstitute;
5951 	}
5952 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
5953 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5954 	ret = i40e_add_vsi(ctl_vsi);
5955 	if (ret) {
5956 		dev_info(&pf->pdev->dev,
5957 			 "rebuild of owner VSI failed: %d\n", ret);
5958 		goto end_reconstitute;
5959 	}
5960 	i40e_vsi_reset_stats(ctl_vsi);
5961 
5962 	/* create the VEB in the switch and move the VSI onto the VEB */
5963 	ret = i40e_add_veb(veb, ctl_vsi);
5964 	if (ret)
5965 		goto end_reconstitute;
5966 
5967 	/* Enable LB mode for the main VSI now that it is on a VEB */
5968 	i40e_enable_pf_switch_lb(pf);
5969 
5970 	/* create the remaining VSIs attached to this VEB */
5971 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5972 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5973 			continue;
5974 
5975 		if (pf->vsi[v]->veb_idx == veb->idx) {
5976 			struct i40e_vsi *vsi = pf->vsi[v];
5977 			vsi->uplink_seid = veb->seid;
5978 			ret = i40e_add_vsi(vsi);
5979 			if (ret) {
5980 				dev_info(&pf->pdev->dev,
5981 					 "rebuild of vsi_idx %d failed: %d\n",
5982 					 v, ret);
5983 				goto end_reconstitute;
5984 			}
5985 			i40e_vsi_reset_stats(vsi);
5986 		}
5987 	}
5988 
5989 	/* create any VEBs attached to this VEB - RECURSION */
5990 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5991 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5992 			pf->veb[veb_idx]->uplink_seid = veb->seid;
5993 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5994 			if (ret)
5995 				break;
5996 		}
5997 	}
5998 
5999 end_reconstitute:
6000 	return ret;
6001 }
6002 
6003 /**
6004  * i40e_get_capabilities - get info about the HW
6005  * @pf: the PF struct
6006  **/
6007 static int i40e_get_capabilities(struct i40e_pf *pf)
6008 {
6009 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6010 	u16 data_size;
6011 	int buf_len;
6012 	int err;
6013 
6014 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6015 	do {
6016 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
6017 		if (!cap_buf)
6018 			return -ENOMEM;
6019 
6020 		/* this loads the data into the hw struct for us */
6021 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6022 					    &data_size,
6023 					    i40e_aqc_opc_list_func_capabilities,
6024 					    NULL);
6025 		/* data loaded, buffer no longer needed */
6026 		kfree(cap_buf);
6027 
6028 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6029 			/* retry with a larger buffer */
6030 			buf_len = data_size;
6031 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6032 			dev_info(&pf->pdev->dev,
6033 				 "capability discovery failed: aq=%d\n",
6034 				 pf->hw.aq.asq_last_status);
6035 			return -ENODEV;
6036 		}
6037 	} while (err);
6038 
6039 	if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6040 	    (pf->hw.aq.fw_maj_ver < 2)) {
6041 		pf->hw.func_caps.num_msix_vectors++;
6042 		pf->hw.func_caps.num_msix_vectors_vf++;
6043 	}
6044 
6045 	if (pf->hw.debug_mask & I40E_DEBUG_USER)
6046 		dev_info(&pf->pdev->dev,
6047 			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6048 			 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6049 			 pf->hw.func_caps.num_msix_vectors,
6050 			 pf->hw.func_caps.num_msix_vectors_vf,
6051 			 pf->hw.func_caps.fd_filters_guaranteed,
6052 			 pf->hw.func_caps.fd_filters_best_effort,
6053 			 pf->hw.func_caps.num_tx_qp,
6054 			 pf->hw.func_caps.num_vsis);
6055 
6056 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6057 		       + pf->hw.func_caps.num_vfs)
6058 	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6059 		dev_info(&pf->pdev->dev,
6060 			 "got num_vsis %d, setting num_vsis to %d\n",
6061 			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6062 		pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6063 	}
6064 
6065 	return 0;
6066 }
6067 
6068 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6069 
6070 /**
6071  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6072  * @pf: board private structure
6073  **/
6074 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6075 {
6076 	struct i40e_vsi *vsi;
6077 	int i;
6078 
6079 	/* quick workaround for an NVM issue that leaves a critical register
6080 	 * uninitialized
6081 	 */
6082 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6083 		static const u32 hkey[] = {
6084 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6085 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6086 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6087 			0x95b3a76d};
6088 
6089 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6090 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6091 	}
6092 
6093 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6094 		return;
6095 
6096 	/* find existing VSI and see if it needs configuring */
6097 	vsi = NULL;
6098 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6099 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6100 			vsi = pf->vsi[i];
6101 			break;
6102 		}
6103 	}
6104 
6105 	/* create a new VSI if none exists */
6106 	if (!vsi) {
6107 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6108 				     pf->vsi[pf->lan_vsi]->seid, 0);
6109 		if (!vsi) {
6110 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6111 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6112 			return;
6113 		}
6114 	}
6115 
6116 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6117 }
6118 
6119 /**
6120  * i40e_fdir_teardown - release the Flow Director resources
6121  * @pf: board private structure
6122  **/
6123 static void i40e_fdir_teardown(struct i40e_pf *pf)
6124 {
6125 	int i;
6126 
6127 	i40e_fdir_filter_exit(pf);
6128 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6129 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6130 			i40e_vsi_release(pf->vsi[i]);
6131 			break;
6132 		}
6133 	}
6134 }
6135 
6136 /**
6137  * i40e_prep_for_reset - prep for the core to reset
6138  * @pf: board private structure
6139  *
6140  * Close up the VFs and other things in prep for pf Reset.
6141   **/
6142 static void i40e_prep_for_reset(struct i40e_pf *pf)
6143 {
6144 	struct i40e_hw *hw = &pf->hw;
6145 	i40e_status ret = 0;
6146 	u32 v;
6147 
6148 	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6149 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6150 		return;
6151 
6152 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6153 
6154 	/* quiesce the VSIs and their queues that are not already DOWN */
6155 	i40e_pf_quiesce_all_vsi(pf);
6156 
6157 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6158 		if (pf->vsi[v])
6159 			pf->vsi[v]->seid = 0;
6160 	}
6161 
6162 	i40e_shutdown_adminq(&pf->hw);
6163 
6164 	/* call shutdown HMC */
6165 	if (hw->hmc.hmc_obj) {
6166 		ret = i40e_shutdown_lan_hmc(hw);
6167 		if (ret)
6168 			dev_warn(&pf->pdev->dev,
6169 				 "shutdown_lan_hmc failed: %d\n", ret);
6170 	}
6171 }
6172 
6173 /**
6174  * i40e_send_version - update firmware with driver version
6175  * @pf: PF struct
6176  */
6177 static void i40e_send_version(struct i40e_pf *pf)
6178 {
6179 	struct i40e_driver_version dv;
6180 
6181 	dv.major_version = DRV_VERSION_MAJOR;
6182 	dv.minor_version = DRV_VERSION_MINOR;
6183 	dv.build_version = DRV_VERSION_BUILD;
6184 	dv.subbuild_version = 0;
6185 	strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6186 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6187 }
6188 
6189 /**
6190  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6191  * @pf: board private structure
6192  * @reinit: if the Main VSI needs to re-initialized.
6193  **/
6194 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6195 {
6196 	struct i40e_hw *hw = &pf->hw;
6197 	u8 set_fc_aq_fail = 0;
6198 	i40e_status ret;
6199 	u32 v;
6200 
6201 	/* Now we wait for GRST to settle out.
6202 	 * We don't have to delete the VEBs or VSIs from the hw switch
6203 	 * because the reset will make them disappear.
6204 	 */
6205 	ret = i40e_pf_reset(hw);
6206 	if (ret) {
6207 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6208 		set_bit(__I40E_RESET_FAILED, &pf->state);
6209 		goto clear_recovery;
6210 	}
6211 	pf->pfr_count++;
6212 
6213 	if (test_bit(__I40E_DOWN, &pf->state))
6214 		goto clear_recovery;
6215 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6216 
6217 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6218 	ret = i40e_init_adminq(&pf->hw);
6219 	if (ret) {
6220 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
6221 		goto clear_recovery;
6222 	}
6223 
6224 	/* re-verify the eeprom if we just had an EMP reset */
6225 	if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
6226 		clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
6227 		i40e_verify_eeprom(pf);
6228 	}
6229 
6230 	i40e_clear_pxe_mode(hw);
6231 	ret = i40e_get_capabilities(pf);
6232 	if (ret) {
6233 		dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
6234 			 ret);
6235 		goto end_core_reset;
6236 	}
6237 
6238 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6239 				hw->func_caps.num_rx_qp,
6240 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6241 	if (ret) {
6242 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6243 		goto end_core_reset;
6244 	}
6245 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6246 	if (ret) {
6247 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6248 		goto end_core_reset;
6249 	}
6250 
6251 #ifdef CONFIG_I40E_DCB
6252 	ret = i40e_init_pf_dcb(pf);
6253 	if (ret) {
6254 		dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6255 		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6256 		/* Continue without DCB enabled */
6257 	}
6258 #endif /* CONFIG_I40E_DCB */
6259 #ifdef I40E_FCOE
6260 	ret = i40e_init_pf_fcoe(pf);
6261 	if (ret)
6262 		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6263 
6264 #endif
6265 	/* do basic switch setup */
6266 	ret = i40e_setup_pf_switch(pf, reinit);
6267 	if (ret)
6268 		goto end_core_reset;
6269 
6270 	/* driver is only interested in link up/down and module qualification
6271 	 * reports from firmware
6272 	 */
6273 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
6274 				       I40E_AQ_EVENT_LINK_UPDOWN |
6275 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6276 	if (ret)
6277 		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
6278 
6279 	/* make sure our flow control settings are restored */
6280 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6281 	if (ret)
6282 		dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
6283 
6284 	/* Rebuild the VSIs and VEBs that existed before reset.
6285 	 * They are still in our local switch element arrays, so only
6286 	 * need to rebuild the switch model in the HW.
6287 	 *
6288 	 * If there were VEBs but the reconstitution failed, we'll try
6289 	 * try to recover minimal use by getting the basic PF VSI working.
6290 	 */
6291 	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6292 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6293 		/* find the one VEB connected to the MAC, and find orphans */
6294 		for (v = 0; v < I40E_MAX_VEB; v++) {
6295 			if (!pf->veb[v])
6296 				continue;
6297 
6298 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6299 			    pf->veb[v]->uplink_seid == 0) {
6300 				ret = i40e_reconstitute_veb(pf->veb[v]);
6301 
6302 				if (!ret)
6303 					continue;
6304 
6305 				/* If Main VEB failed, we're in deep doodoo,
6306 				 * so give up rebuilding the switch and set up
6307 				 * for minimal rebuild of PF VSI.
6308 				 * If orphan failed, we'll report the error
6309 				 * but try to keep going.
6310 				 */
6311 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6312 					dev_info(&pf->pdev->dev,
6313 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6314 						 ret);
6315 					pf->vsi[pf->lan_vsi]->uplink_seid
6316 								= pf->mac_seid;
6317 					break;
6318 				} else if (pf->veb[v]->uplink_seid == 0) {
6319 					dev_info(&pf->pdev->dev,
6320 						 "rebuild of orphan VEB failed: %d\n",
6321 						 ret);
6322 				}
6323 			}
6324 		}
6325 	}
6326 
6327 	if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6328 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6329 		/* no VEB, so rebuild only the Main VSI */
6330 		ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6331 		if (ret) {
6332 			dev_info(&pf->pdev->dev,
6333 				 "rebuild of Main VSI failed: %d\n", ret);
6334 			goto end_core_reset;
6335 		}
6336 	}
6337 
6338 	msleep(75);
6339 	ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6340 	if (ret) {
6341 		dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
6342 			 pf->hw.aq.asq_last_status);
6343 	}
6344 
6345 	/* reinit the misc interrupt */
6346 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6347 		ret = i40e_setup_misc_vector(pf);
6348 
6349 	/* restart the VSIs that were rebuilt and running before the reset */
6350 	i40e_pf_unquiesce_all_vsi(pf);
6351 
6352 	if (pf->num_alloc_vfs) {
6353 		for (v = 0; v < pf->num_alloc_vfs; v++)
6354 			i40e_reset_vf(&pf->vf[v], true);
6355 	}
6356 
6357 	/* tell the firmware that we're starting */
6358 	i40e_send_version(pf);
6359 
6360 end_core_reset:
6361 	clear_bit(__I40E_RESET_FAILED, &pf->state);
6362 clear_recovery:
6363 	clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6364 }
6365 
6366 /**
6367  * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
6368  * @pf: board private structure
6369  *
6370  * Close up the VFs and other things in prep for a Core Reset,
6371  * then get ready to rebuild the world.
6372  **/
6373 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6374 {
6375 	i40e_prep_for_reset(pf);
6376 	i40e_reset_and_rebuild(pf, false);
6377 }
6378 
6379 /**
6380  * i40e_handle_mdd_event
6381  * @pf: pointer to the pf structure
6382  *
6383  * Called from the MDD irq handler to identify possibly malicious vfs
6384  **/
6385 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6386 {
6387 	struct i40e_hw *hw = &pf->hw;
6388 	bool mdd_detected = false;
6389 	bool pf_mdd_detected = false;
6390 	struct i40e_vf *vf;
6391 	u32 reg;
6392 	int i;
6393 
6394 	if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6395 		return;
6396 
6397 	/* find what triggered the MDD event */
6398 	reg = rd32(hw, I40E_GL_MDET_TX);
6399 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6400 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6401 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
6402 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6403 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
6404 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6405 				I40E_GL_MDET_TX_EVENT_SHIFT;
6406 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6407 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
6408 				pf->hw.func_caps.base_queue;
6409 		if (netif_msg_tx_err(pf))
6410 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
6411 				 event, queue, pf_num, vf_num);
6412 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6413 		mdd_detected = true;
6414 	}
6415 	reg = rd32(hw, I40E_GL_MDET_RX);
6416 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6417 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6418 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
6419 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6420 				I40E_GL_MDET_RX_EVENT_SHIFT;
6421 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6422 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
6423 				pf->hw.func_caps.base_queue;
6424 		if (netif_msg_rx_err(pf))
6425 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6426 				 event, queue, func);
6427 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6428 		mdd_detected = true;
6429 	}
6430 
6431 	if (mdd_detected) {
6432 		reg = rd32(hw, I40E_PF_MDET_TX);
6433 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6434 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6435 			dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6436 			pf_mdd_detected = true;
6437 		}
6438 		reg = rd32(hw, I40E_PF_MDET_RX);
6439 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6440 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6441 			dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6442 			pf_mdd_detected = true;
6443 		}
6444 		/* Queue belongs to the PF, initiate a reset */
6445 		if (pf_mdd_detected) {
6446 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6447 			i40e_service_event_schedule(pf);
6448 		}
6449 	}
6450 
6451 	/* see if one of the VFs needs its hand slapped */
6452 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6453 		vf = &(pf->vf[i]);
6454 		reg = rd32(hw, I40E_VP_MDET_TX(i));
6455 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6456 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6457 			vf->num_mdd_events++;
6458 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6459 				 i);
6460 		}
6461 
6462 		reg = rd32(hw, I40E_VP_MDET_RX(i));
6463 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6464 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6465 			vf->num_mdd_events++;
6466 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6467 				 i);
6468 		}
6469 
6470 		if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6471 			dev_info(&pf->pdev->dev,
6472 				 "Too many MDD events on VF %d, disabled\n", i);
6473 			dev_info(&pf->pdev->dev,
6474 				 "Use PF Control I/F to re-enable the VF\n");
6475 			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6476 		}
6477 	}
6478 
6479 	/* re-enable mdd interrupt cause */
6480 	clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6481 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6482 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6483 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6484 	i40e_flush(hw);
6485 }
6486 
6487 #ifdef CONFIG_I40E_VXLAN
6488 /**
6489  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6490  * @pf: board private structure
6491  **/
6492 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6493 {
6494 	struct i40e_hw *hw = &pf->hw;
6495 	i40e_status ret;
6496 	u8 filter_index;
6497 	__be16 port;
6498 	int i;
6499 
6500 	if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6501 		return;
6502 
6503 	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6504 
6505 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6506 		if (pf->pending_vxlan_bitmap & (1 << i)) {
6507 			pf->pending_vxlan_bitmap &= ~(1 << i);
6508 			port = pf->vxlan_ports[i];
6509 			ret = port ?
6510 			      i40e_aq_add_udp_tunnel(hw, ntohs(port),
6511 						     I40E_AQC_TUNNEL_TYPE_VXLAN,
6512 						     &filter_index, NULL)
6513 			      : i40e_aq_del_udp_tunnel(hw, i, NULL);
6514 
6515 			if (ret) {
6516 				dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
6517 					 port ? "adding" : "deleting",
6518 					 ntohs(port), port ? i : i);
6519 
6520 				pf->vxlan_ports[i] = 0;
6521 			} else {
6522 				dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
6523 					 port ? "Added" : "Deleted",
6524 					 ntohs(port), port ? i : filter_index);
6525 			}
6526 		}
6527 	}
6528 }
6529 
6530 #endif
6531 /**
6532  * i40e_service_task - Run the driver's async subtasks
6533  * @work: pointer to work_struct containing our data
6534  **/
6535 static void i40e_service_task(struct work_struct *work)
6536 {
6537 	struct i40e_pf *pf = container_of(work,
6538 					  struct i40e_pf,
6539 					  service_task);
6540 	unsigned long start_time = jiffies;
6541 
6542 	/* don't bother with service tasks if a reset is in progress */
6543 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6544 		i40e_service_event_complete(pf);
6545 		return;
6546 	}
6547 
6548 	i40e_reset_subtask(pf);
6549 	i40e_handle_mdd_event(pf);
6550 	i40e_vc_process_vflr_event(pf);
6551 	i40e_watchdog_subtask(pf);
6552 	i40e_fdir_reinit_subtask(pf);
6553 	i40e_sync_filters_subtask(pf);
6554 #ifdef CONFIG_I40E_VXLAN
6555 	i40e_sync_vxlan_filters_subtask(pf);
6556 #endif
6557 	i40e_clean_adminq_subtask(pf);
6558 
6559 	i40e_service_event_complete(pf);
6560 
6561 	/* If the tasks have taken longer than one timer cycle or there
6562 	 * is more work to be done, reschedule the service task now
6563 	 * rather than wait for the timer to tick again.
6564 	 */
6565 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6566 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)		 ||
6567 	    test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)		 ||
6568 	    test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6569 		i40e_service_event_schedule(pf);
6570 }
6571 
6572 /**
6573  * i40e_service_timer - timer callback
6574  * @data: pointer to PF struct
6575  **/
6576 static void i40e_service_timer(unsigned long data)
6577 {
6578 	struct i40e_pf *pf = (struct i40e_pf *)data;
6579 
6580 	mod_timer(&pf->service_timer,
6581 		  round_jiffies(jiffies + pf->service_timer_period));
6582 	i40e_service_event_schedule(pf);
6583 }
6584 
6585 /**
6586  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6587  * @vsi: the VSI being configured
6588  **/
6589 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6590 {
6591 	struct i40e_pf *pf = vsi->back;
6592 
6593 	switch (vsi->type) {
6594 	case I40E_VSI_MAIN:
6595 		vsi->alloc_queue_pairs = pf->num_lan_qps;
6596 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6597 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6598 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6599 			vsi->num_q_vectors = pf->num_lan_msix;
6600 		else
6601 			vsi->num_q_vectors = 1;
6602 
6603 		break;
6604 
6605 	case I40E_VSI_FDIR:
6606 		vsi->alloc_queue_pairs = 1;
6607 		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6608 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6609 		vsi->num_q_vectors = 1;
6610 		break;
6611 
6612 	case I40E_VSI_VMDQ2:
6613 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6614 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6615 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6616 		vsi->num_q_vectors = pf->num_vmdq_msix;
6617 		break;
6618 
6619 	case I40E_VSI_SRIOV:
6620 		vsi->alloc_queue_pairs = pf->num_vf_qps;
6621 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6622 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6623 		break;
6624 
6625 #ifdef I40E_FCOE
6626 	case I40E_VSI_FCOE:
6627 		vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6628 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6629 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6630 		vsi->num_q_vectors = pf->num_fcoe_msix;
6631 		break;
6632 
6633 #endif /* I40E_FCOE */
6634 	default:
6635 		WARN_ON(1);
6636 		return -ENODATA;
6637 	}
6638 
6639 	return 0;
6640 }
6641 
6642 /**
6643  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6644  * @type: VSI pointer
6645  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6646  *
6647  * On error: returns error code (negative)
6648  * On success: returns 0
6649  **/
6650 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6651 {
6652 	int size;
6653 	int ret = 0;
6654 
6655 	/* allocate memory for both Tx and Rx ring pointers */
6656 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6657 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6658 	if (!vsi->tx_rings)
6659 		return -ENOMEM;
6660 	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6661 
6662 	if (alloc_qvectors) {
6663 		/* allocate memory for q_vector pointers */
6664 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6665 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6666 		if (!vsi->q_vectors) {
6667 			ret = -ENOMEM;
6668 			goto err_vectors;
6669 		}
6670 	}
6671 	return ret;
6672 
6673 err_vectors:
6674 	kfree(vsi->tx_rings);
6675 	return ret;
6676 }
6677 
6678 /**
6679  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6680  * @pf: board private structure
6681  * @type: type of VSI
6682  *
6683  * On error: returns error code (negative)
6684  * On success: returns vsi index in PF (positive)
6685  **/
6686 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6687 {
6688 	int ret = -ENODEV;
6689 	struct i40e_vsi *vsi;
6690 	int vsi_idx;
6691 	int i;
6692 
6693 	/* Need to protect the allocation of the VSIs at the PF level */
6694 	mutex_lock(&pf->switch_mutex);
6695 
6696 	/* VSI list may be fragmented if VSI creation/destruction has
6697 	 * been happening.  We can afford to do a quick scan to look
6698 	 * for any free VSIs in the list.
6699 	 *
6700 	 * find next empty vsi slot, looping back around if necessary
6701 	 */
6702 	i = pf->next_vsi;
6703 	while (i < pf->num_alloc_vsi && pf->vsi[i])
6704 		i++;
6705 	if (i >= pf->num_alloc_vsi) {
6706 		i = 0;
6707 		while (i < pf->next_vsi && pf->vsi[i])
6708 			i++;
6709 	}
6710 
6711 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6712 		vsi_idx = i;             /* Found one! */
6713 	} else {
6714 		ret = -ENODEV;
6715 		goto unlock_pf;  /* out of VSI slots! */
6716 	}
6717 	pf->next_vsi = ++i;
6718 
6719 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6720 	if (!vsi) {
6721 		ret = -ENOMEM;
6722 		goto unlock_pf;
6723 	}
6724 	vsi->type = type;
6725 	vsi->back = pf;
6726 	set_bit(__I40E_DOWN, &vsi->state);
6727 	vsi->flags = 0;
6728 	vsi->idx = vsi_idx;
6729 	vsi->rx_itr_setting = pf->rx_itr_default;
6730 	vsi->tx_itr_setting = pf->tx_itr_default;
6731 	vsi->netdev_registered = false;
6732 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6733 	INIT_LIST_HEAD(&vsi->mac_filter_list);
6734 	vsi->irqs_ready = false;
6735 
6736 	ret = i40e_set_num_rings_in_vsi(vsi);
6737 	if (ret)
6738 		goto err_rings;
6739 
6740 	ret = i40e_vsi_alloc_arrays(vsi, true);
6741 	if (ret)
6742 		goto err_rings;
6743 
6744 	/* Setup default MSIX irq handler for VSI */
6745 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6746 
6747 	pf->vsi[vsi_idx] = vsi;
6748 	ret = vsi_idx;
6749 	goto unlock_pf;
6750 
6751 err_rings:
6752 	pf->next_vsi = i - 1;
6753 	kfree(vsi);
6754 unlock_pf:
6755 	mutex_unlock(&pf->switch_mutex);
6756 	return ret;
6757 }
6758 
6759 /**
6760  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6761  * @type: VSI pointer
6762  * @free_qvectors: a bool to specify if q_vectors need to be freed.
6763  *
6764  * On error: returns error code (negative)
6765  * On success: returns 0
6766  **/
6767 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6768 {
6769 	/* free the ring and vector containers */
6770 	if (free_qvectors) {
6771 		kfree(vsi->q_vectors);
6772 		vsi->q_vectors = NULL;
6773 	}
6774 	kfree(vsi->tx_rings);
6775 	vsi->tx_rings = NULL;
6776 	vsi->rx_rings = NULL;
6777 }
6778 
6779 /**
6780  * i40e_vsi_clear - Deallocate the VSI provided
6781  * @vsi: the VSI being un-configured
6782  **/
6783 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6784 {
6785 	struct i40e_pf *pf;
6786 
6787 	if (!vsi)
6788 		return 0;
6789 
6790 	if (!vsi->back)
6791 		goto free_vsi;
6792 	pf = vsi->back;
6793 
6794 	mutex_lock(&pf->switch_mutex);
6795 	if (!pf->vsi[vsi->idx]) {
6796 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6797 			vsi->idx, vsi->idx, vsi, vsi->type);
6798 		goto unlock_vsi;
6799 	}
6800 
6801 	if (pf->vsi[vsi->idx] != vsi) {
6802 		dev_err(&pf->pdev->dev,
6803 			"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6804 			pf->vsi[vsi->idx]->idx,
6805 			pf->vsi[vsi->idx],
6806 			pf->vsi[vsi->idx]->type,
6807 			vsi->idx, vsi, vsi->type);
6808 		goto unlock_vsi;
6809 	}
6810 
6811 	/* updates the pf for this cleared vsi */
6812 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6813 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6814 
6815 	i40e_vsi_free_arrays(vsi, true);
6816 
6817 	pf->vsi[vsi->idx] = NULL;
6818 	if (vsi->idx < pf->next_vsi)
6819 		pf->next_vsi = vsi->idx;
6820 
6821 unlock_vsi:
6822 	mutex_unlock(&pf->switch_mutex);
6823 free_vsi:
6824 	kfree(vsi);
6825 
6826 	return 0;
6827 }
6828 
6829 /**
6830  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6831  * @vsi: the VSI being cleaned
6832  **/
6833 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
6834 {
6835 	int i;
6836 
6837 	if (vsi->tx_rings && vsi->tx_rings[0]) {
6838 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6839 			kfree_rcu(vsi->tx_rings[i], rcu);
6840 			vsi->tx_rings[i] = NULL;
6841 			vsi->rx_rings[i] = NULL;
6842 		}
6843 	}
6844 }
6845 
6846 /**
6847  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6848  * @vsi: the VSI being configured
6849  **/
6850 static int i40e_alloc_rings(struct i40e_vsi *vsi)
6851 {
6852 	struct i40e_ring *tx_ring, *rx_ring;
6853 	struct i40e_pf *pf = vsi->back;
6854 	int i;
6855 
6856 	/* Set basic values in the rings to be used later during open() */
6857 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6858 		/* allocate space for both Tx and Rx in one shot */
6859 		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6860 		if (!tx_ring)
6861 			goto err_out;
6862 
6863 		tx_ring->queue_index = i;
6864 		tx_ring->reg_idx = vsi->base_queue + i;
6865 		tx_ring->ring_active = false;
6866 		tx_ring->vsi = vsi;
6867 		tx_ring->netdev = vsi->netdev;
6868 		tx_ring->dev = &pf->pdev->dev;
6869 		tx_ring->count = vsi->num_desc;
6870 		tx_ring->size = 0;
6871 		tx_ring->dcb_tc = 0;
6872 		vsi->tx_rings[i] = tx_ring;
6873 
6874 		rx_ring = &tx_ring[1];
6875 		rx_ring->queue_index = i;
6876 		rx_ring->reg_idx = vsi->base_queue + i;
6877 		rx_ring->ring_active = false;
6878 		rx_ring->vsi = vsi;
6879 		rx_ring->netdev = vsi->netdev;
6880 		rx_ring->dev = &pf->pdev->dev;
6881 		rx_ring->count = vsi->num_desc;
6882 		rx_ring->size = 0;
6883 		rx_ring->dcb_tc = 0;
6884 		if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
6885 			set_ring_16byte_desc_enabled(rx_ring);
6886 		else
6887 			clear_ring_16byte_desc_enabled(rx_ring);
6888 		vsi->rx_rings[i] = rx_ring;
6889 	}
6890 
6891 	return 0;
6892 
6893 err_out:
6894 	i40e_vsi_clear_rings(vsi);
6895 	return -ENOMEM;
6896 }
6897 
6898 /**
6899  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6900  * @pf: board private structure
6901  * @vectors: the number of MSI-X vectors to request
6902  *
6903  * Returns the number of vectors reserved, or error
6904  **/
6905 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6906 {
6907 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6908 					I40E_MIN_MSIX, vectors);
6909 	if (vectors < 0) {
6910 		dev_info(&pf->pdev->dev,
6911 			 "MSI-X vector reservation failed: %d\n", vectors);
6912 		vectors = 0;
6913 	}
6914 
6915 	return vectors;
6916 }
6917 
6918 /**
6919  * i40e_init_msix - Setup the MSIX capability
6920  * @pf: board private structure
6921  *
6922  * Work with the OS to set up the MSIX vectors needed.
6923  *
6924  * Returns 0 on success, negative on failure
6925  **/
6926 static int i40e_init_msix(struct i40e_pf *pf)
6927 {
6928 	i40e_status err = 0;
6929 	struct i40e_hw *hw = &pf->hw;
6930 	int other_vecs = 0;
6931 	int v_budget, i;
6932 	int vec;
6933 
6934 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6935 		return -ENODEV;
6936 
6937 	/* The number of vectors we'll request will be comprised of:
6938 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
6939 	 *   - The number of LAN queue pairs
6940 	 *	- Queues being used for RSS.
6941 	 *		We don't need as many as max_rss_size vectors.
6942 	 *		use rss_size instead in the calculation since that
6943 	 *		is governed by number of cpus in the system.
6944 	 *	- assumes symmetric Tx/Rx pairing
6945 	 *   - The number of VMDq pairs
6946 #ifdef I40E_FCOE
6947 	 *   - The number of FCOE qps.
6948 #endif
6949 	 * Once we count this up, try the request.
6950 	 *
6951 	 * If we can't get what we want, we'll simplify to nearly nothing
6952 	 * and try again.  If that still fails, we punt.
6953 	 */
6954 	pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
6955 	pf->num_vmdq_msix = pf->num_vmdq_qps;
6956 	other_vecs = 1;
6957 	other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
6958 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
6959 		other_vecs++;
6960 
6961 	/* Scale down if necessary, and the rings will share vectors */
6962 	pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6963 			(hw->func_caps.num_msix_vectors - other_vecs));
6964 	v_budget = pf->num_lan_msix + other_vecs;
6965 
6966 #ifdef I40E_FCOE
6967 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6968 		pf->num_fcoe_msix = pf->num_fcoe_qps;
6969 		v_budget += pf->num_fcoe_msix;
6970 	}
6971 #endif
6972 
6973 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6974 				   GFP_KERNEL);
6975 	if (!pf->msix_entries)
6976 		return -ENOMEM;
6977 
6978 	for (i = 0; i < v_budget; i++)
6979 		pf->msix_entries[i].entry = i;
6980 	vec = i40e_reserve_msix_vectors(pf, v_budget);
6981 
6982 	if (vec != v_budget) {
6983 		/* If we have limited resources, we will start with no vectors
6984 		 * for the special features and then allocate vectors to some
6985 		 * of these features based on the policy and at the end disable
6986 		 * the features that did not get any vectors.
6987 		 */
6988 #ifdef I40E_FCOE
6989 		pf->num_fcoe_qps = 0;
6990 		pf->num_fcoe_msix = 0;
6991 #endif
6992 		pf->num_vmdq_msix = 0;
6993 	}
6994 
6995 	if (vec < I40E_MIN_MSIX) {
6996 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6997 		kfree(pf->msix_entries);
6998 		pf->msix_entries = NULL;
6999 		return -ENODEV;
7000 
7001 	} else if (vec == I40E_MIN_MSIX) {
7002 		/* Adjust for minimal MSIX use */
7003 		pf->num_vmdq_vsis = 0;
7004 		pf->num_vmdq_qps = 0;
7005 		pf->num_lan_qps = 1;
7006 		pf->num_lan_msix = 1;
7007 
7008 	} else if (vec != v_budget) {
7009 		/* reserve the misc vector */
7010 		vec--;
7011 
7012 		/* Scale vector usage down */
7013 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7014 		pf->num_vmdq_vsis = 1;
7015 
7016 		/* partition out the remaining vectors */
7017 		switch (vec) {
7018 		case 2:
7019 			pf->num_lan_msix = 1;
7020 			break;
7021 		case 3:
7022 #ifdef I40E_FCOE
7023 			/* give one vector to FCoE */
7024 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7025 				pf->num_lan_msix = 1;
7026 				pf->num_fcoe_msix = 1;
7027 			}
7028 #else
7029 			pf->num_lan_msix = 2;
7030 #endif
7031 			break;
7032 		default:
7033 #ifdef I40E_FCOE
7034 			/* give one vector to FCoE */
7035 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7036 				pf->num_fcoe_msix = 1;
7037 				vec--;
7038 			}
7039 #endif
7040 			pf->num_lan_msix = min_t(int, (vec / 2),
7041 						 pf->num_lan_qps);
7042 			pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
7043 						  I40E_DEFAULT_NUM_VMDQ_VSI);
7044 			break;
7045 		}
7046 	}
7047 
7048 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7049 	    (pf->num_vmdq_msix == 0)) {
7050 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7051 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7052 	}
7053 #ifdef I40E_FCOE
7054 
7055 	if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7056 		dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7057 		pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7058 	}
7059 #endif
7060 	return err;
7061 }
7062 
7063 /**
7064  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7065  * @vsi: the VSI being configured
7066  * @v_idx: index of the vector in the vsi struct
7067  *
7068  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7069  **/
7070 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7071 {
7072 	struct i40e_q_vector *q_vector;
7073 
7074 	/* allocate q_vector */
7075 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7076 	if (!q_vector)
7077 		return -ENOMEM;
7078 
7079 	q_vector->vsi = vsi;
7080 	q_vector->v_idx = v_idx;
7081 	cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7082 	if (vsi->netdev)
7083 		netif_napi_add(vsi->netdev, &q_vector->napi,
7084 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
7085 
7086 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
7087 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
7088 
7089 	/* tie q_vector and vsi together */
7090 	vsi->q_vectors[v_idx] = q_vector;
7091 
7092 	return 0;
7093 }
7094 
7095 /**
7096  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7097  * @vsi: the VSI being configured
7098  *
7099  * We allocate one q_vector per queue interrupt.  If allocation fails we
7100  * return -ENOMEM.
7101  **/
7102 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7103 {
7104 	struct i40e_pf *pf = vsi->back;
7105 	int v_idx, num_q_vectors;
7106 	int err;
7107 
7108 	/* if not MSIX, give the one vector only to the LAN VSI */
7109 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7110 		num_q_vectors = vsi->num_q_vectors;
7111 	else if (vsi == pf->vsi[pf->lan_vsi])
7112 		num_q_vectors = 1;
7113 	else
7114 		return -EINVAL;
7115 
7116 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7117 		err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7118 		if (err)
7119 			goto err_out;
7120 	}
7121 
7122 	return 0;
7123 
7124 err_out:
7125 	while (v_idx--)
7126 		i40e_free_q_vector(vsi, v_idx);
7127 
7128 	return err;
7129 }
7130 
7131 /**
7132  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7133  * @pf: board private structure to initialize
7134  **/
7135 static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
7136 {
7137 	int err = 0;
7138 
7139 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7140 		err = i40e_init_msix(pf);
7141 		if (err) {
7142 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
7143 #ifdef I40E_FCOE
7144 				       I40E_FLAG_FCOE_ENABLED	|
7145 #endif
7146 				       I40E_FLAG_RSS_ENABLED	|
7147 				       I40E_FLAG_DCB_CAPABLE	|
7148 				       I40E_FLAG_SRIOV_ENABLED	|
7149 				       I40E_FLAG_FD_SB_ENABLED	|
7150 				       I40E_FLAG_FD_ATR_ENABLED	|
7151 				       I40E_FLAG_VMDQ_ENABLED);
7152 
7153 			/* rework the queue expectations without MSIX */
7154 			i40e_determine_queue_usage(pf);
7155 		}
7156 	}
7157 
7158 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7159 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7160 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7161 		err = pci_enable_msi(pf->pdev);
7162 		if (err) {
7163 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
7164 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7165 		}
7166 	}
7167 
7168 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7169 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7170 
7171 	/* track first vector for misc interrupts */
7172 	err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
7173 }
7174 
7175 /**
7176  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7177  * @pf: board private structure
7178  *
7179  * This sets up the handler for MSIX 0, which is used to manage the
7180  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7181  * when in MSI or Legacy interrupt mode.
7182  **/
7183 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7184 {
7185 	struct i40e_hw *hw = &pf->hw;
7186 	int err = 0;
7187 
7188 	/* Only request the irq if this is the first time through, and
7189 	 * not when we're rebuilding after a Reset
7190 	 */
7191 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7192 		err = request_irq(pf->msix_entries[0].vector,
7193 				  i40e_intr, 0, pf->int_name, pf);
7194 		if (err) {
7195 			dev_info(&pf->pdev->dev,
7196 				 "request_irq for %s failed: %d\n",
7197 				 pf->int_name, err);
7198 			return -EFAULT;
7199 		}
7200 	}
7201 
7202 	i40e_enable_misc_int_causes(pf);
7203 
7204 	/* associate no queues to the misc vector */
7205 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7206 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7207 
7208 	i40e_flush(hw);
7209 
7210 	i40e_irq_dynamic_enable_icr0(pf);
7211 
7212 	return err;
7213 }
7214 
7215 /**
7216  * i40e_config_rss - Prepare for RSS if used
7217  * @pf: board private structure
7218  **/
7219 static int i40e_config_rss(struct i40e_pf *pf)
7220 {
7221 	u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
7222 	struct i40e_hw *hw = &pf->hw;
7223 	u32 lut = 0;
7224 	int i, j;
7225 	u64 hena;
7226 	u32 reg_val;
7227 
7228 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
7229 	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7230 		wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
7231 
7232 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7233 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7234 		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7235 	hena |= I40E_DEFAULT_RSS_HENA;
7236 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7237 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7238 
7239 	/* Check capability and Set table size and register per hw expectation*/
7240 	reg_val = rd32(hw, I40E_PFQF_CTL_0);
7241 	if (hw->func_caps.rss_table_size == 512) {
7242 		reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7243 		pf->rss_table_size = 512;
7244 	} else {
7245 		pf->rss_table_size = 128;
7246 		reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7247 	}
7248 	wr32(hw, I40E_PFQF_CTL_0, reg_val);
7249 
7250 	/* Populate the LUT with max no. of queues in round robin fashion */
7251 	for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
7252 
7253 		/* The assumption is that lan qp count will be the highest
7254 		 * qp count for any PF VSI that needs RSS.
7255 		 * If multiple VSIs need RSS support, all the qp counts
7256 		 * for those VSIs should be a power of 2 for RSS to work.
7257 		 * If LAN VSI is the only consumer for RSS then this requirement
7258 		 * is not necessary.
7259 		 */
7260 		if (j == pf->rss_size)
7261 			j = 0;
7262 		/* lut = 4-byte sliding window of 4 lut entries */
7263 		lut = (lut << 8) | (j &
7264 			 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
7265 		/* On i = 3, we have 4 entries in lut; write to the register */
7266 		if ((i & 3) == 3)
7267 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
7268 	}
7269 	i40e_flush(hw);
7270 
7271 	return 0;
7272 }
7273 
7274 /**
7275  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7276  * @pf: board private structure
7277  * @queue_count: the requested queue count for rss.
7278  *
7279  * returns 0 if rss is not enabled, if enabled returns the final rss queue
7280  * count which may be different from the requested queue count.
7281  **/
7282 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7283 {
7284 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7285 		return 0;
7286 
7287 	queue_count = min_t(int, queue_count, pf->rss_size_max);
7288 
7289 	if (queue_count != pf->rss_size) {
7290 		i40e_prep_for_reset(pf);
7291 
7292 		pf->rss_size = queue_count;
7293 
7294 		i40e_reset_and_rebuild(pf, true);
7295 		i40e_config_rss(pf);
7296 	}
7297 	dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
7298 	return pf->rss_size;
7299 }
7300 
7301 /**
7302  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7303  * @pf: board private structure to initialize
7304  *
7305  * i40e_sw_init initializes the Adapter private data structure.
7306  * Fields are initialized based on PCI device information and
7307  * OS network device settings (MTU size).
7308  **/
7309 static int i40e_sw_init(struct i40e_pf *pf)
7310 {
7311 	int err = 0;
7312 	int size;
7313 
7314 	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7315 				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7316 	pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7317 	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7318 		if (I40E_DEBUG_USER & debug)
7319 			pf->hw.debug_mask = debug;
7320 		pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7321 						I40E_DEFAULT_MSG_ENABLE);
7322 	}
7323 
7324 	/* Set default capability flags */
7325 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7326 		    I40E_FLAG_MSI_ENABLED     |
7327 		    I40E_FLAG_MSIX_ENABLED    |
7328 		    I40E_FLAG_RX_1BUF_ENABLED;
7329 
7330 	/* Set default ITR */
7331 	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7332 	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7333 
7334 	/* Depending on PF configurations, it is possible that the RSS
7335 	 * maximum might end up larger than the available queues
7336 	 */
7337 	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7338 	pf->rss_size = 1;
7339 	pf->rss_size_max = min_t(int, pf->rss_size_max,
7340 				 pf->hw.func_caps.num_tx_qp);
7341 	if (pf->hw.func_caps.rss) {
7342 		pf->flags |= I40E_FLAG_RSS_ENABLED;
7343 		pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7344 	}
7345 
7346 	/* MFP mode enabled */
7347 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
7348 		pf->flags |= I40E_FLAG_MFP_ENABLED;
7349 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7350 	}
7351 
7352 	/* FW/NVM is not yet fixed in this regard */
7353 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7354 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7355 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7356 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7357 		/* Setup a counter for fd_atr per pf */
7358 		pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
7359 		if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7360 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7361 			/* Setup a counter for fd_sb per pf */
7362 			pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
7363 		} else {
7364 			dev_info(&pf->pdev->dev,
7365 				 "Flow Director Sideband mode Disabled in MFP mode\n");
7366 		}
7367 		pf->fdir_pf_filter_count =
7368 				 pf->hw.func_caps.fd_filters_guaranteed;
7369 		pf->hw.fdir_shared_filter_count =
7370 				 pf->hw.func_caps.fd_filters_best_effort;
7371 	}
7372 
7373 	if (pf->hw.func_caps.vmdq) {
7374 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7375 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7376 		pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
7377 	}
7378 
7379 #ifdef I40E_FCOE
7380 	err = i40e_init_pf_fcoe(pf);
7381 	if (err)
7382 		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7383 
7384 #endif /* I40E_FCOE */
7385 #ifdef CONFIG_PCI_IOV
7386 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7387 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7388 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7389 		pf->num_req_vfs = min_t(int,
7390 					pf->hw.func_caps.num_vfs,
7391 					I40E_MAX_VF_COUNT);
7392 	}
7393 #endif /* CONFIG_PCI_IOV */
7394 	pf->eeprom_version = 0xDEAD;
7395 	pf->lan_veb = I40E_NO_VEB;
7396 	pf->lan_vsi = I40E_NO_VSI;
7397 
7398 	/* set up queue assignment tracking */
7399 	size = sizeof(struct i40e_lump_tracking)
7400 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7401 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
7402 	if (!pf->qp_pile) {
7403 		err = -ENOMEM;
7404 		goto sw_init_done;
7405 	}
7406 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7407 	pf->qp_pile->search_hint = 0;
7408 
7409 	/* set up vector assignment tracking */
7410 	size = sizeof(struct i40e_lump_tracking)
7411 		+ (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
7412 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
7413 	if (!pf->irq_pile) {
7414 		kfree(pf->qp_pile);
7415 		err = -ENOMEM;
7416 		goto sw_init_done;
7417 	}
7418 	pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
7419 	pf->irq_pile->search_hint = 0;
7420 
7421 	pf->tx_timeout_recovery_level = 1;
7422 
7423 	mutex_init(&pf->switch_mutex);
7424 
7425 sw_init_done:
7426 	return err;
7427 }
7428 
7429 /**
7430  * i40e_set_ntuple - set the ntuple feature flag and take action
7431  * @pf: board private structure to initialize
7432  * @features: the feature set that the stack is suggesting
7433  *
7434  * returns a bool to indicate if reset needs to happen
7435  **/
7436 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7437 {
7438 	bool need_reset = false;
7439 
7440 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
7441 	 * the state changed, we need to reset.
7442 	 */
7443 	if (features & NETIF_F_NTUPLE) {
7444 		/* Enable filters and mark for reset */
7445 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7446 			need_reset = true;
7447 		pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7448 	} else {
7449 		/* turn off filters, mark for reset and clear SW filter list */
7450 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7451 			need_reset = true;
7452 			i40e_fdir_filter_exit(pf);
7453 		}
7454 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7455 		pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7456 		/* reset fd counters */
7457 		pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7458 		pf->fdir_pf_active_filters = 0;
7459 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7460 		dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7461 		/* if ATR was auto disabled it can be re-enabled. */
7462 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7463 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7464 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7465 	}
7466 	return need_reset;
7467 }
7468 
7469 /**
7470  * i40e_set_features - set the netdev feature flags
7471  * @netdev: ptr to the netdev being adjusted
7472  * @features: the feature set that the stack is suggesting
7473  **/
7474 static int i40e_set_features(struct net_device *netdev,
7475 			     netdev_features_t features)
7476 {
7477 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7478 	struct i40e_vsi *vsi = np->vsi;
7479 	struct i40e_pf *pf = vsi->back;
7480 	bool need_reset;
7481 
7482 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
7483 		i40e_vlan_stripping_enable(vsi);
7484 	else
7485 		i40e_vlan_stripping_disable(vsi);
7486 
7487 	need_reset = i40e_set_ntuple(pf, features);
7488 
7489 	if (need_reset)
7490 		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
7491 
7492 	return 0;
7493 }
7494 
7495 #ifdef CONFIG_I40E_VXLAN
7496 /**
7497  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7498  * @pf: board private structure
7499  * @port: The UDP port to look up
7500  *
7501  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7502  **/
7503 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7504 {
7505 	u8 i;
7506 
7507 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7508 		if (pf->vxlan_ports[i] == port)
7509 			return i;
7510 	}
7511 
7512 	return i;
7513 }
7514 
7515 /**
7516  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
7517  * @netdev: This physical port's netdev
7518  * @sa_family: Socket Family that VXLAN is notifying us about
7519  * @port: New UDP port number that VXLAN started listening to
7520  **/
7521 static void i40e_add_vxlan_port(struct net_device *netdev,
7522 				sa_family_t sa_family, __be16 port)
7523 {
7524 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7525 	struct i40e_vsi *vsi = np->vsi;
7526 	struct i40e_pf *pf = vsi->back;
7527 	u8 next_idx;
7528 	u8 idx;
7529 
7530 	if (sa_family == AF_INET6)
7531 		return;
7532 
7533 	idx = i40e_get_vxlan_port_idx(pf, port);
7534 
7535 	/* Check if port already exists */
7536 	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7537 		netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
7538 		return;
7539 	}
7540 
7541 	/* Now check if there is space to add the new port */
7542 	next_idx = i40e_get_vxlan_port_idx(pf, 0);
7543 
7544 	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7545 		netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
7546 			    ntohs(port));
7547 		return;
7548 	}
7549 
7550 	/* New port: add it and mark its index in the bitmap */
7551 	pf->vxlan_ports[next_idx] = port;
7552 	pf->pending_vxlan_bitmap |= (1 << next_idx);
7553 
7554 	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7555 }
7556 
7557 /**
7558  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
7559  * @netdev: This physical port's netdev
7560  * @sa_family: Socket Family that VXLAN is notifying us about
7561  * @port: UDP port number that VXLAN stopped listening to
7562  **/
7563 static void i40e_del_vxlan_port(struct net_device *netdev,
7564 				sa_family_t sa_family, __be16 port)
7565 {
7566 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7567 	struct i40e_vsi *vsi = np->vsi;
7568 	struct i40e_pf *pf = vsi->back;
7569 	u8 idx;
7570 
7571 	if (sa_family == AF_INET6)
7572 		return;
7573 
7574 	idx = i40e_get_vxlan_port_idx(pf, port);
7575 
7576 	/* Check if port already exists */
7577 	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7578 		/* if port exists, set it to 0 (mark for deletion)
7579 		 * and make it pending
7580 		 */
7581 		pf->vxlan_ports[idx] = 0;
7582 
7583 		pf->pending_vxlan_bitmap |= (1 << idx);
7584 
7585 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7586 	} else {
7587 		netdev_warn(netdev, "Port %d was not found, not deleting\n",
7588 			    ntohs(port));
7589 	}
7590 }
7591 
7592 #endif
7593 static int i40e_get_phys_port_id(struct net_device *netdev,
7594 				 struct netdev_phys_item_id *ppid)
7595 {
7596 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7597 	struct i40e_pf *pf = np->vsi->back;
7598 	struct i40e_hw *hw = &pf->hw;
7599 
7600 	if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
7601 		return -EOPNOTSUPP;
7602 
7603 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
7604 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
7605 
7606 	return 0;
7607 }
7608 
7609 /**
7610  * i40e_ndo_fdb_add - add an entry to the hardware database
7611  * @ndm: the input from the stack
7612  * @tb: pointer to array of nladdr (unused)
7613  * @dev: the net device pointer
7614  * @addr: the MAC address entry being added
7615  * @flags: instructions from stack about fdb operation
7616  */
7617 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7618 			    struct net_device *dev,
7619 			    const unsigned char *addr, u16 vid,
7620 			    u16 flags)
7621 {
7622 	struct i40e_netdev_priv *np = netdev_priv(dev);
7623 	struct i40e_pf *pf = np->vsi->back;
7624 	int err = 0;
7625 
7626 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
7627 		return -EOPNOTSUPP;
7628 
7629 	if (vid) {
7630 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
7631 		return -EINVAL;
7632 	}
7633 
7634 	/* Hardware does not support aging addresses so if a
7635 	 * ndm_state is given only allow permanent addresses
7636 	 */
7637 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7638 		netdev_info(dev, "FDB only supports static addresses\n");
7639 		return -EINVAL;
7640 	}
7641 
7642 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
7643 		err = dev_uc_add_excl(dev, addr);
7644 	else if (is_multicast_ether_addr(addr))
7645 		err = dev_mc_add_excl(dev, addr);
7646 	else
7647 		err = -EINVAL;
7648 
7649 	/* Only return duplicate errors if NLM_F_EXCL is set */
7650 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
7651 		err = 0;
7652 
7653 	return err;
7654 }
7655 
7656 static const struct net_device_ops i40e_netdev_ops = {
7657 	.ndo_open		= i40e_open,
7658 	.ndo_stop		= i40e_close,
7659 	.ndo_start_xmit		= i40e_lan_xmit_frame,
7660 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
7661 	.ndo_set_rx_mode	= i40e_set_rx_mode,
7662 	.ndo_validate_addr	= eth_validate_addr,
7663 	.ndo_set_mac_address	= i40e_set_mac,
7664 	.ndo_change_mtu		= i40e_change_mtu,
7665 	.ndo_do_ioctl		= i40e_ioctl,
7666 	.ndo_tx_timeout		= i40e_tx_timeout,
7667 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
7668 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
7669 #ifdef CONFIG_NET_POLL_CONTROLLER
7670 	.ndo_poll_controller	= i40e_netpoll,
7671 #endif
7672 	.ndo_setup_tc		= i40e_setup_tc,
7673 #ifdef I40E_FCOE
7674 	.ndo_fcoe_enable	= i40e_fcoe_enable,
7675 	.ndo_fcoe_disable	= i40e_fcoe_disable,
7676 #endif
7677 	.ndo_set_features	= i40e_set_features,
7678 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
7679 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
7680 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
7681 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
7682 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
7683 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
7684 #ifdef CONFIG_I40E_VXLAN
7685 	.ndo_add_vxlan_port	= i40e_add_vxlan_port,
7686 	.ndo_del_vxlan_port	= i40e_del_vxlan_port,
7687 #endif
7688 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
7689 	.ndo_fdb_add		= i40e_ndo_fdb_add,
7690 };
7691 
7692 /**
7693  * i40e_config_netdev - Setup the netdev flags
7694  * @vsi: the VSI being configured
7695  *
7696  * Returns 0 on success, negative value on failure
7697  **/
7698 static int i40e_config_netdev(struct i40e_vsi *vsi)
7699 {
7700 	u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
7701 	struct i40e_pf *pf = vsi->back;
7702 	struct i40e_hw *hw = &pf->hw;
7703 	struct i40e_netdev_priv *np;
7704 	struct net_device *netdev;
7705 	u8 mac_addr[ETH_ALEN];
7706 	int etherdev_size;
7707 
7708 	etherdev_size = sizeof(struct i40e_netdev_priv);
7709 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
7710 	if (!netdev)
7711 		return -ENOMEM;
7712 
7713 	vsi->netdev = netdev;
7714 	np = netdev_priv(netdev);
7715 	np->vsi = vsi;
7716 
7717 	netdev->hw_enc_features |= NETIF_F_IP_CSUM	 |
7718 				  NETIF_F_GSO_UDP_TUNNEL |
7719 				  NETIF_F_TSO;
7720 
7721 	netdev->features = NETIF_F_SG		       |
7722 			   NETIF_F_IP_CSUM	       |
7723 			   NETIF_F_SCTP_CSUM	       |
7724 			   NETIF_F_HIGHDMA	       |
7725 			   NETIF_F_GSO_UDP_TUNNEL      |
7726 			   NETIF_F_HW_VLAN_CTAG_TX     |
7727 			   NETIF_F_HW_VLAN_CTAG_RX     |
7728 			   NETIF_F_HW_VLAN_CTAG_FILTER |
7729 			   NETIF_F_IPV6_CSUM	       |
7730 			   NETIF_F_TSO		       |
7731 			   NETIF_F_TSO_ECN	       |
7732 			   NETIF_F_TSO6		       |
7733 			   NETIF_F_RXCSUM	       |
7734 			   NETIF_F_RXHASH	       |
7735 			   0;
7736 
7737 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7738 		netdev->features |= NETIF_F_NTUPLE;
7739 
7740 	/* copy netdev features into list of user selectable features */
7741 	netdev->hw_features |= netdev->features;
7742 
7743 	if (vsi->type == I40E_VSI_MAIN) {
7744 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
7745 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
7746 		/* The following steps are necessary to prevent reception
7747 		 * of tagged packets - some older NVM configurations load a
7748 		 * default a MAC-VLAN filter that accepts any tagged packet
7749 		 * which must be replaced by a normal filter.
7750 		 */
7751 		if (!i40e_rm_default_mac_filter(vsi, mac_addr))
7752 			i40e_add_filter(vsi, mac_addr,
7753 					I40E_VLAN_ANY, false, true);
7754 	} else {
7755 		/* relate the VSI_VMDQ name to the VSI_MAIN name */
7756 		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
7757 			 pf->vsi[pf->lan_vsi]->netdev->name);
7758 		random_ether_addr(mac_addr);
7759 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
7760 	}
7761 	i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
7762 
7763 	ether_addr_copy(netdev->dev_addr, mac_addr);
7764 	ether_addr_copy(netdev->perm_addr, mac_addr);
7765 	/* vlan gets same features (except vlan offload)
7766 	 * after any tweaks for specific VSI types
7767 	 */
7768 	netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
7769 						     NETIF_F_HW_VLAN_CTAG_RX |
7770 						   NETIF_F_HW_VLAN_CTAG_FILTER);
7771 	netdev->priv_flags |= IFF_UNICAST_FLT;
7772 	netdev->priv_flags |= IFF_SUPP_NOFCS;
7773 	/* Setup netdev TC information */
7774 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
7775 
7776 	netdev->netdev_ops = &i40e_netdev_ops;
7777 	netdev->watchdog_timeo = 5 * HZ;
7778 	i40e_set_ethtool_ops(netdev);
7779 #ifdef I40E_FCOE
7780 	i40e_fcoe_config_netdev(netdev, vsi);
7781 #endif
7782 
7783 	return 0;
7784 }
7785 
7786 /**
7787  * i40e_vsi_delete - Delete a VSI from the switch
7788  * @vsi: the VSI being removed
7789  *
7790  * Returns 0 on success, negative value on failure
7791  **/
7792 static void i40e_vsi_delete(struct i40e_vsi *vsi)
7793 {
7794 	/* remove default VSI is not allowed */
7795 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
7796 		return;
7797 
7798 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
7799 }
7800 
7801 /**
7802  * i40e_add_vsi - Add a VSI to the switch
7803  * @vsi: the VSI being configured
7804  *
7805  * This initializes a VSI context depending on the VSI type to be added and
7806  * passes it down to the add_vsi aq command.
7807  **/
7808 static int i40e_add_vsi(struct i40e_vsi *vsi)
7809 {
7810 	int ret = -ENODEV;
7811 	struct i40e_mac_filter *f, *ftmp;
7812 	struct i40e_pf *pf = vsi->back;
7813 	struct i40e_hw *hw = &pf->hw;
7814 	struct i40e_vsi_context ctxt;
7815 	u8 enabled_tc = 0x1; /* TC0 enabled */
7816 	int f_count = 0;
7817 
7818 	memset(&ctxt, 0, sizeof(ctxt));
7819 	switch (vsi->type) {
7820 	case I40E_VSI_MAIN:
7821 		/* The PF's main VSI is already setup as part of the
7822 		 * device initialization, so we'll not bother with
7823 		 * the add_vsi call, but we will retrieve the current
7824 		 * VSI context.
7825 		 */
7826 		ctxt.seid = pf->main_vsi_seid;
7827 		ctxt.pf_num = pf->hw.pf_id;
7828 		ctxt.vf_num = 0;
7829 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
7830 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7831 		if (ret) {
7832 			dev_info(&pf->pdev->dev,
7833 				 "couldn't get pf vsi config, err %d, aq_err %d\n",
7834 				 ret, pf->hw.aq.asq_last_status);
7835 			return -ENOENT;
7836 		}
7837 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7838 		vsi->info.valid_sections = 0;
7839 
7840 		vsi->seid = ctxt.seid;
7841 		vsi->id = ctxt.vsi_number;
7842 
7843 		enabled_tc = i40e_pf_get_tc_map(pf);
7844 
7845 		/* MFP mode setup queue map and update VSI */
7846 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
7847 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
7848 			memset(&ctxt, 0, sizeof(ctxt));
7849 			ctxt.seid = pf->main_vsi_seid;
7850 			ctxt.pf_num = pf->hw.pf_id;
7851 			ctxt.vf_num = 0;
7852 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
7853 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7854 			if (ret) {
7855 				dev_info(&pf->pdev->dev,
7856 					 "update vsi failed, aq_err=%d\n",
7857 					 pf->hw.aq.asq_last_status);
7858 				ret = -ENOENT;
7859 				goto err;
7860 			}
7861 			/* update the local VSI info queue map */
7862 			i40e_vsi_update_queue_map(vsi, &ctxt);
7863 			vsi->info.valid_sections = 0;
7864 		} else {
7865 			/* Default/Main VSI is only enabled for TC0
7866 			 * reconfigure it to enable all TCs that are
7867 			 * available on the port in SFP mode.
7868 			 * For MFP case the iSCSI PF would use this
7869 			 * flow to enable LAN+iSCSI TC.
7870 			 */
7871 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
7872 			if (ret) {
7873 				dev_info(&pf->pdev->dev,
7874 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
7875 					 enabled_tc, ret,
7876 					 pf->hw.aq.asq_last_status);
7877 				ret = -ENOENT;
7878 			}
7879 		}
7880 		break;
7881 
7882 	case I40E_VSI_FDIR:
7883 		ctxt.pf_num = hw->pf_id;
7884 		ctxt.vf_num = 0;
7885 		ctxt.uplink_seid = vsi->uplink_seid;
7886 		ctxt.connection_type = 0x1;     /* regular data port */
7887 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7888 		ctxt.info.valid_sections |=
7889 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7890 		ctxt.info.switch_id =
7891 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7892 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7893 		break;
7894 
7895 	case I40E_VSI_VMDQ2:
7896 		ctxt.pf_num = hw->pf_id;
7897 		ctxt.vf_num = 0;
7898 		ctxt.uplink_seid = vsi->uplink_seid;
7899 		ctxt.connection_type = 0x1;     /* regular data port */
7900 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
7901 
7902 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7903 
7904 		/* This VSI is connected to VEB so the switch_id
7905 		 * should be set to zero by default.
7906 		 */
7907 		ctxt.info.switch_id = 0;
7908 		ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7909 
7910 		/* Setup the VSI tx/rx queue map for TC0 only for now */
7911 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7912 		break;
7913 
7914 	case I40E_VSI_SRIOV:
7915 		ctxt.pf_num = hw->pf_id;
7916 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
7917 		ctxt.uplink_seid = vsi->uplink_seid;
7918 		ctxt.connection_type = 0x1;     /* regular data port */
7919 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
7920 
7921 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7922 
7923 		/* This VSI is connected to VEB so the switch_id
7924 		 * should be set to zero by default.
7925 		 */
7926 		ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7927 
7928 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
7929 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7930 		if (pf->vf[vsi->vf_id].spoofchk) {
7931 			ctxt.info.valid_sections |=
7932 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7933 			ctxt.info.sec_flags |=
7934 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7935 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7936 		}
7937 		/* Setup the VSI tx/rx queue map for TC0 only for now */
7938 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7939 		break;
7940 
7941 #ifdef I40E_FCOE
7942 	case I40E_VSI_FCOE:
7943 		ret = i40e_fcoe_vsi_init(vsi, &ctxt);
7944 		if (ret) {
7945 			dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
7946 			return ret;
7947 		}
7948 		break;
7949 
7950 #endif /* I40E_FCOE */
7951 	default:
7952 		return -ENODEV;
7953 	}
7954 
7955 	if (vsi->type != I40E_VSI_MAIN) {
7956 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
7957 		if (ret) {
7958 			dev_info(&vsi->back->pdev->dev,
7959 				 "add vsi failed, aq_err=%d\n",
7960 				 vsi->back->hw.aq.asq_last_status);
7961 			ret = -ENOENT;
7962 			goto err;
7963 		}
7964 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7965 		vsi->info.valid_sections = 0;
7966 		vsi->seid = ctxt.seid;
7967 		vsi->id = ctxt.vsi_number;
7968 	}
7969 
7970 	/* If macvlan filters already exist, force them to get loaded */
7971 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
7972 		f->changed = true;
7973 		f_count++;
7974 
7975 		if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
7976 			struct i40e_aqc_remove_macvlan_element_data element;
7977 
7978 			memset(&element, 0, sizeof(element));
7979 			ether_addr_copy(element.mac_addr, f->macaddr);
7980 			element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7981 			ret = i40e_aq_remove_macvlan(hw, vsi->seid,
7982 						     &element, 1, NULL);
7983 			if (ret) {
7984 				/* some older FW has a different default */
7985 				element.flags |=
7986 					       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7987 				i40e_aq_remove_macvlan(hw, vsi->seid,
7988 						       &element, 1, NULL);
7989 			}
7990 
7991 			i40e_aq_mac_address_write(hw,
7992 						  I40E_AQC_WRITE_TYPE_LAA_WOL,
7993 						  f->macaddr, NULL);
7994 		}
7995 	}
7996 	if (f_count) {
7997 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
7998 		pf->flags |= I40E_FLAG_FILTER_SYNC;
7999 	}
8000 
8001 	/* Update VSI BW information */
8002 	ret = i40e_vsi_get_bw_info(vsi);
8003 	if (ret) {
8004 		dev_info(&pf->pdev->dev,
8005 			 "couldn't get vsi bw info, err %d, aq_err %d\n",
8006 			 ret, pf->hw.aq.asq_last_status);
8007 		/* VSI is already added so not tearing that up */
8008 		ret = 0;
8009 	}
8010 
8011 err:
8012 	return ret;
8013 }
8014 
8015 /**
8016  * i40e_vsi_release - Delete a VSI and free its resources
8017  * @vsi: the VSI being removed
8018  *
8019  * Returns 0 on success or < 0 on error
8020  **/
8021 int i40e_vsi_release(struct i40e_vsi *vsi)
8022 {
8023 	struct i40e_mac_filter *f, *ftmp;
8024 	struct i40e_veb *veb = NULL;
8025 	struct i40e_pf *pf;
8026 	u16 uplink_seid;
8027 	int i, n;
8028 
8029 	pf = vsi->back;
8030 
8031 	/* release of a VEB-owner or last VSI is not allowed */
8032 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8033 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8034 			 vsi->seid, vsi->uplink_seid);
8035 		return -ENODEV;
8036 	}
8037 	if (vsi == pf->vsi[pf->lan_vsi] &&
8038 	    !test_bit(__I40E_DOWN, &pf->state)) {
8039 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8040 		return -ENODEV;
8041 	}
8042 
8043 	uplink_seid = vsi->uplink_seid;
8044 	if (vsi->type != I40E_VSI_SRIOV) {
8045 		if (vsi->netdev_registered) {
8046 			vsi->netdev_registered = false;
8047 			if (vsi->netdev) {
8048 				/* results in a call to i40e_close() */
8049 				unregister_netdev(vsi->netdev);
8050 			}
8051 		} else {
8052 			i40e_vsi_close(vsi);
8053 		}
8054 		i40e_vsi_disable_irq(vsi);
8055 	}
8056 
8057 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8058 		i40e_del_filter(vsi, f->macaddr, f->vlan,
8059 				f->is_vf, f->is_netdev);
8060 	i40e_sync_vsi_filters(vsi);
8061 
8062 	i40e_vsi_delete(vsi);
8063 	i40e_vsi_free_q_vectors(vsi);
8064 	if (vsi->netdev) {
8065 		free_netdev(vsi->netdev);
8066 		vsi->netdev = NULL;
8067 	}
8068 	i40e_vsi_clear_rings(vsi);
8069 	i40e_vsi_clear(vsi);
8070 
8071 	/* If this was the last thing on the VEB, except for the
8072 	 * controlling VSI, remove the VEB, which puts the controlling
8073 	 * VSI onto the next level down in the switch.
8074 	 *
8075 	 * Well, okay, there's one more exception here: don't remove
8076 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
8077 	 * from up the network stack.
8078 	 */
8079 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8080 		if (pf->vsi[i] &&
8081 		    pf->vsi[i]->uplink_seid == uplink_seid &&
8082 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8083 			n++;      /* count the VSIs */
8084 		}
8085 	}
8086 	for (i = 0; i < I40E_MAX_VEB; i++) {
8087 		if (!pf->veb[i])
8088 			continue;
8089 		if (pf->veb[i]->uplink_seid == uplink_seid)
8090 			n++;     /* count the VEBs */
8091 		if (pf->veb[i]->seid == uplink_seid)
8092 			veb = pf->veb[i];
8093 	}
8094 	if (n == 0 && veb && veb->uplink_seid != 0)
8095 		i40e_veb_release(veb);
8096 
8097 	return 0;
8098 }
8099 
8100 /**
8101  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8102  * @vsi: ptr to the VSI
8103  *
8104  * This should only be called after i40e_vsi_mem_alloc() which allocates the
8105  * corresponding SW VSI structure and initializes num_queue_pairs for the
8106  * newly allocated VSI.
8107  *
8108  * Returns 0 on success or negative on failure
8109  **/
8110 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8111 {
8112 	int ret = -ENOENT;
8113 	struct i40e_pf *pf = vsi->back;
8114 
8115 	if (vsi->q_vectors[0]) {
8116 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8117 			 vsi->seid);
8118 		return -EEXIST;
8119 	}
8120 
8121 	if (vsi->base_vector) {
8122 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8123 			 vsi->seid, vsi->base_vector);
8124 		return -EEXIST;
8125 	}
8126 
8127 	ret = i40e_vsi_alloc_q_vectors(vsi);
8128 	if (ret) {
8129 		dev_info(&pf->pdev->dev,
8130 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8131 			 vsi->num_q_vectors, vsi->seid, ret);
8132 		vsi->num_q_vectors = 0;
8133 		goto vector_setup_out;
8134 	}
8135 
8136 	if (vsi->num_q_vectors)
8137 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8138 						 vsi->num_q_vectors, vsi->idx);
8139 	if (vsi->base_vector < 0) {
8140 		dev_info(&pf->pdev->dev,
8141 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8142 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8143 		i40e_vsi_free_q_vectors(vsi);
8144 		ret = -ENOENT;
8145 		goto vector_setup_out;
8146 	}
8147 
8148 vector_setup_out:
8149 	return ret;
8150 }
8151 
8152 /**
8153  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8154  * @vsi: pointer to the vsi.
8155  *
8156  * This re-allocates a vsi's queue resources.
8157  *
8158  * Returns pointer to the successfully allocated and configured VSI sw struct
8159  * on success, otherwise returns NULL on failure.
8160  **/
8161 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8162 {
8163 	struct i40e_pf *pf = vsi->back;
8164 	u8 enabled_tc;
8165 	int ret;
8166 
8167 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8168 	i40e_vsi_clear_rings(vsi);
8169 
8170 	i40e_vsi_free_arrays(vsi, false);
8171 	i40e_set_num_rings_in_vsi(vsi);
8172 	ret = i40e_vsi_alloc_arrays(vsi, false);
8173 	if (ret)
8174 		goto err_vsi;
8175 
8176 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8177 	if (ret < 0) {
8178 		dev_info(&pf->pdev->dev,
8179 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
8180 			 vsi->alloc_queue_pairs, vsi->seid, ret);
8181 		goto err_vsi;
8182 	}
8183 	vsi->base_queue = ret;
8184 
8185 	/* Update the FW view of the VSI. Force a reset of TC and queue
8186 	 * layout configurations.
8187 	 */
8188 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8189 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8190 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8191 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8192 
8193 	/* assign it some queues */
8194 	ret = i40e_alloc_rings(vsi);
8195 	if (ret)
8196 		goto err_rings;
8197 
8198 	/* map all of the rings to the q_vectors */
8199 	i40e_vsi_map_rings_to_vectors(vsi);
8200 	return vsi;
8201 
8202 err_rings:
8203 	i40e_vsi_free_q_vectors(vsi);
8204 	if (vsi->netdev_registered) {
8205 		vsi->netdev_registered = false;
8206 		unregister_netdev(vsi->netdev);
8207 		free_netdev(vsi->netdev);
8208 		vsi->netdev = NULL;
8209 	}
8210 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8211 err_vsi:
8212 	i40e_vsi_clear(vsi);
8213 	return NULL;
8214 }
8215 
8216 /**
8217  * i40e_vsi_setup - Set up a VSI by a given type
8218  * @pf: board private structure
8219  * @type: VSI type
8220  * @uplink_seid: the switch element to link to
8221  * @param1: usage depends upon VSI type. For VF types, indicates VF id
8222  *
8223  * This allocates the sw VSI structure and its queue resources, then add a VSI
8224  * to the identified VEB.
8225  *
8226  * Returns pointer to the successfully allocated and configure VSI sw struct on
8227  * success, otherwise returns NULL on failure.
8228  **/
8229 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8230 				u16 uplink_seid, u32 param1)
8231 {
8232 	struct i40e_vsi *vsi = NULL;
8233 	struct i40e_veb *veb = NULL;
8234 	int ret, i;
8235 	int v_idx;
8236 
8237 	/* The requested uplink_seid must be either
8238 	 *     - the PF's port seid
8239 	 *              no VEB is needed because this is the PF
8240 	 *              or this is a Flow Director special case VSI
8241 	 *     - seid of an existing VEB
8242 	 *     - seid of a VSI that owns an existing VEB
8243 	 *     - seid of a VSI that doesn't own a VEB
8244 	 *              a new VEB is created and the VSI becomes the owner
8245 	 *     - seid of the PF VSI, which is what creates the first VEB
8246 	 *              this is a special case of the previous
8247 	 *
8248 	 * Find which uplink_seid we were given and create a new VEB if needed
8249 	 */
8250 	for (i = 0; i < I40E_MAX_VEB; i++) {
8251 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8252 			veb = pf->veb[i];
8253 			break;
8254 		}
8255 	}
8256 
8257 	if (!veb && uplink_seid != pf->mac_seid) {
8258 
8259 		for (i = 0; i < pf->num_alloc_vsi; i++) {
8260 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8261 				vsi = pf->vsi[i];
8262 				break;
8263 			}
8264 		}
8265 		if (!vsi) {
8266 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8267 				 uplink_seid);
8268 			return NULL;
8269 		}
8270 
8271 		if (vsi->uplink_seid == pf->mac_seid)
8272 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8273 					     vsi->tc_config.enabled_tc);
8274 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8275 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8276 					     vsi->tc_config.enabled_tc);
8277 		if (veb) {
8278 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8279 				dev_info(&vsi->back->pdev->dev,
8280 					 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8281 					 __func__);
8282 				return NULL;
8283 			}
8284 			i40e_enable_pf_switch_lb(pf);
8285 		}
8286 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8287 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8288 				veb = pf->veb[i];
8289 		}
8290 		if (!veb) {
8291 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8292 			return NULL;
8293 		}
8294 
8295 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8296 		uplink_seid = veb->seid;
8297 	}
8298 
8299 	/* get vsi sw struct */
8300 	v_idx = i40e_vsi_mem_alloc(pf, type);
8301 	if (v_idx < 0)
8302 		goto err_alloc;
8303 	vsi = pf->vsi[v_idx];
8304 	if (!vsi)
8305 		goto err_alloc;
8306 	vsi->type = type;
8307 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8308 
8309 	if (type == I40E_VSI_MAIN)
8310 		pf->lan_vsi = v_idx;
8311 	else if (type == I40E_VSI_SRIOV)
8312 		vsi->vf_id = param1;
8313 	/* assign it some queues */
8314 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8315 				vsi->idx);
8316 	if (ret < 0) {
8317 		dev_info(&pf->pdev->dev,
8318 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
8319 			 vsi->alloc_queue_pairs, vsi->seid, ret);
8320 		goto err_vsi;
8321 	}
8322 	vsi->base_queue = ret;
8323 
8324 	/* get a VSI from the hardware */
8325 	vsi->uplink_seid = uplink_seid;
8326 	ret = i40e_add_vsi(vsi);
8327 	if (ret)
8328 		goto err_vsi;
8329 
8330 	switch (vsi->type) {
8331 	/* setup the netdev if needed */
8332 	case I40E_VSI_MAIN:
8333 	case I40E_VSI_VMDQ2:
8334 	case I40E_VSI_FCOE:
8335 		ret = i40e_config_netdev(vsi);
8336 		if (ret)
8337 			goto err_netdev;
8338 		ret = register_netdev(vsi->netdev);
8339 		if (ret)
8340 			goto err_netdev;
8341 		vsi->netdev_registered = true;
8342 		netif_carrier_off(vsi->netdev);
8343 #ifdef CONFIG_I40E_DCB
8344 		/* Setup DCB netlink interface */
8345 		i40e_dcbnl_setup(vsi);
8346 #endif /* CONFIG_I40E_DCB */
8347 		/* fall through */
8348 
8349 	case I40E_VSI_FDIR:
8350 		/* set up vectors and rings if needed */
8351 		ret = i40e_vsi_setup_vectors(vsi);
8352 		if (ret)
8353 			goto err_msix;
8354 
8355 		ret = i40e_alloc_rings(vsi);
8356 		if (ret)
8357 			goto err_rings;
8358 
8359 		/* map all of the rings to the q_vectors */
8360 		i40e_vsi_map_rings_to_vectors(vsi);
8361 
8362 		i40e_vsi_reset_stats(vsi);
8363 		break;
8364 
8365 	default:
8366 		/* no netdev or rings for the other VSI types */
8367 		break;
8368 	}
8369 
8370 	return vsi;
8371 
8372 err_rings:
8373 	i40e_vsi_free_q_vectors(vsi);
8374 err_msix:
8375 	if (vsi->netdev_registered) {
8376 		vsi->netdev_registered = false;
8377 		unregister_netdev(vsi->netdev);
8378 		free_netdev(vsi->netdev);
8379 		vsi->netdev = NULL;
8380 	}
8381 err_netdev:
8382 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8383 err_vsi:
8384 	i40e_vsi_clear(vsi);
8385 err_alloc:
8386 	return NULL;
8387 }
8388 
8389 /**
8390  * i40e_veb_get_bw_info - Query VEB BW information
8391  * @veb: the veb to query
8392  *
8393  * Query the Tx scheduler BW configuration data for given VEB
8394  **/
8395 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
8396 {
8397 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
8398 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
8399 	struct i40e_pf *pf = veb->pf;
8400 	struct i40e_hw *hw = &pf->hw;
8401 	u32 tc_bw_max;
8402 	int ret = 0;
8403 	int i;
8404 
8405 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
8406 						  &bw_data, NULL);
8407 	if (ret) {
8408 		dev_info(&pf->pdev->dev,
8409 			 "query veb bw config failed, aq_err=%d\n",
8410 			 hw->aq.asq_last_status);
8411 		goto out;
8412 	}
8413 
8414 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
8415 						   &ets_data, NULL);
8416 	if (ret) {
8417 		dev_info(&pf->pdev->dev,
8418 			 "query veb bw ets config failed, aq_err=%d\n",
8419 			 hw->aq.asq_last_status);
8420 		goto out;
8421 	}
8422 
8423 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
8424 	veb->bw_max_quanta = ets_data.tc_bw_max;
8425 	veb->is_abs_credits = bw_data.absolute_credits_enable;
8426 	veb->enabled_tc = ets_data.tc_valid_bits;
8427 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
8428 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
8429 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8430 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
8431 		veb->bw_tc_limit_credits[i] =
8432 					le16_to_cpu(bw_data.tc_bw_limits[i]);
8433 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
8434 	}
8435 
8436 out:
8437 	return ret;
8438 }
8439 
8440 /**
8441  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
8442  * @pf: board private structure
8443  *
8444  * On error: returns error code (negative)
8445  * On success: returns vsi index in PF (positive)
8446  **/
8447 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
8448 {
8449 	int ret = -ENOENT;
8450 	struct i40e_veb *veb;
8451 	int i;
8452 
8453 	/* Need to protect the allocation of switch elements at the PF level */
8454 	mutex_lock(&pf->switch_mutex);
8455 
8456 	/* VEB list may be fragmented if VEB creation/destruction has
8457 	 * been happening.  We can afford to do a quick scan to look
8458 	 * for any free slots in the list.
8459 	 *
8460 	 * find next empty veb slot, looping back around if necessary
8461 	 */
8462 	i = 0;
8463 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
8464 		i++;
8465 	if (i >= I40E_MAX_VEB) {
8466 		ret = -ENOMEM;
8467 		goto err_alloc_veb;  /* out of VEB slots! */
8468 	}
8469 
8470 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
8471 	if (!veb) {
8472 		ret = -ENOMEM;
8473 		goto err_alloc_veb;
8474 	}
8475 	veb->pf = pf;
8476 	veb->idx = i;
8477 	veb->enabled_tc = 1;
8478 
8479 	pf->veb[i] = veb;
8480 	ret = i;
8481 err_alloc_veb:
8482 	mutex_unlock(&pf->switch_mutex);
8483 	return ret;
8484 }
8485 
8486 /**
8487  * i40e_switch_branch_release - Delete a branch of the switch tree
8488  * @branch: where to start deleting
8489  *
8490  * This uses recursion to find the tips of the branch to be
8491  * removed, deleting until we get back to and can delete this VEB.
8492  **/
8493 static void i40e_switch_branch_release(struct i40e_veb *branch)
8494 {
8495 	struct i40e_pf *pf = branch->pf;
8496 	u16 branch_seid = branch->seid;
8497 	u16 veb_idx = branch->idx;
8498 	int i;
8499 
8500 	/* release any VEBs on this VEB - RECURSION */
8501 	for (i = 0; i < I40E_MAX_VEB; i++) {
8502 		if (!pf->veb[i])
8503 			continue;
8504 		if (pf->veb[i]->uplink_seid == branch->seid)
8505 			i40e_switch_branch_release(pf->veb[i]);
8506 	}
8507 
8508 	/* Release the VSIs on this VEB, but not the owner VSI.
8509 	 *
8510 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
8511 	 *       the VEB itself, so don't use (*branch) after this loop.
8512 	 */
8513 	for (i = 0; i < pf->num_alloc_vsi; i++) {
8514 		if (!pf->vsi[i])
8515 			continue;
8516 		if (pf->vsi[i]->uplink_seid == branch_seid &&
8517 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8518 			i40e_vsi_release(pf->vsi[i]);
8519 		}
8520 	}
8521 
8522 	/* There's one corner case where the VEB might not have been
8523 	 * removed, so double check it here and remove it if needed.
8524 	 * This case happens if the veb was created from the debugfs
8525 	 * commands and no VSIs were added to it.
8526 	 */
8527 	if (pf->veb[veb_idx])
8528 		i40e_veb_release(pf->veb[veb_idx]);
8529 }
8530 
8531 /**
8532  * i40e_veb_clear - remove veb struct
8533  * @veb: the veb to remove
8534  **/
8535 static void i40e_veb_clear(struct i40e_veb *veb)
8536 {
8537 	if (!veb)
8538 		return;
8539 
8540 	if (veb->pf) {
8541 		struct i40e_pf *pf = veb->pf;
8542 
8543 		mutex_lock(&pf->switch_mutex);
8544 		if (pf->veb[veb->idx] == veb)
8545 			pf->veb[veb->idx] = NULL;
8546 		mutex_unlock(&pf->switch_mutex);
8547 	}
8548 
8549 	kfree(veb);
8550 }
8551 
8552 /**
8553  * i40e_veb_release - Delete a VEB and free its resources
8554  * @veb: the VEB being removed
8555  **/
8556 void i40e_veb_release(struct i40e_veb *veb)
8557 {
8558 	struct i40e_vsi *vsi = NULL;
8559 	struct i40e_pf *pf;
8560 	int i, n = 0;
8561 
8562 	pf = veb->pf;
8563 
8564 	/* find the remaining VSI and check for extras */
8565 	for (i = 0; i < pf->num_alloc_vsi; i++) {
8566 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
8567 			n++;
8568 			vsi = pf->vsi[i];
8569 		}
8570 	}
8571 	if (n != 1) {
8572 		dev_info(&pf->pdev->dev,
8573 			 "can't remove VEB %d with %d VSIs left\n",
8574 			 veb->seid, n);
8575 		return;
8576 	}
8577 
8578 	/* move the remaining VSI to uplink veb */
8579 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
8580 	if (veb->uplink_seid) {
8581 		vsi->uplink_seid = veb->uplink_seid;
8582 		if (veb->uplink_seid == pf->mac_seid)
8583 			vsi->veb_idx = I40E_NO_VEB;
8584 		else
8585 			vsi->veb_idx = veb->veb_idx;
8586 	} else {
8587 		/* floating VEB */
8588 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8589 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
8590 	}
8591 
8592 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
8593 	i40e_veb_clear(veb);
8594 }
8595 
8596 /**
8597  * i40e_add_veb - create the VEB in the switch
8598  * @veb: the VEB to be instantiated
8599  * @vsi: the controlling VSI
8600  **/
8601 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
8602 {
8603 	bool is_default = false;
8604 	bool is_cloud = false;
8605 	int ret;
8606 
8607 	/* get a VEB from the hardware */
8608 	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
8609 			      veb->enabled_tc, is_default,
8610 			      is_cloud, &veb->seid, NULL);
8611 	if (ret) {
8612 		dev_info(&veb->pf->pdev->dev,
8613 			 "couldn't add VEB, err %d, aq_err %d\n",
8614 			 ret, veb->pf->hw.aq.asq_last_status);
8615 		return -EPERM;
8616 	}
8617 
8618 	/* get statistics counter */
8619 	ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
8620 					 &veb->stats_idx, NULL, NULL, NULL);
8621 	if (ret) {
8622 		dev_info(&veb->pf->pdev->dev,
8623 			 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
8624 			 ret, veb->pf->hw.aq.asq_last_status);
8625 		return -EPERM;
8626 	}
8627 	ret = i40e_veb_get_bw_info(veb);
8628 	if (ret) {
8629 		dev_info(&veb->pf->pdev->dev,
8630 			 "couldn't get VEB bw info, err %d, aq_err %d\n",
8631 			 ret, veb->pf->hw.aq.asq_last_status);
8632 		i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
8633 		return -ENOENT;
8634 	}
8635 
8636 	vsi->uplink_seid = veb->seid;
8637 	vsi->veb_idx = veb->idx;
8638 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8639 
8640 	return 0;
8641 }
8642 
8643 /**
8644  * i40e_veb_setup - Set up a VEB
8645  * @pf: board private structure
8646  * @flags: VEB setup flags
8647  * @uplink_seid: the switch element to link to
8648  * @vsi_seid: the initial VSI seid
8649  * @enabled_tc: Enabled TC bit-map
8650  *
8651  * This allocates the sw VEB structure and links it into the switch
8652  * It is possible and legal for this to be a duplicate of an already
8653  * existing VEB.  It is also possible for both uplink and vsi seids
8654  * to be zero, in order to create a floating VEB.
8655  *
8656  * Returns pointer to the successfully allocated VEB sw struct on
8657  * success, otherwise returns NULL on failure.
8658  **/
8659 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
8660 				u16 uplink_seid, u16 vsi_seid,
8661 				u8 enabled_tc)
8662 {
8663 	struct i40e_veb *veb, *uplink_veb = NULL;
8664 	int vsi_idx, veb_idx;
8665 	int ret;
8666 
8667 	/* if one seid is 0, the other must be 0 to create a floating relay */
8668 	if ((uplink_seid == 0 || vsi_seid == 0) &&
8669 	    (uplink_seid + vsi_seid != 0)) {
8670 		dev_info(&pf->pdev->dev,
8671 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
8672 			 uplink_seid, vsi_seid);
8673 		return NULL;
8674 	}
8675 
8676 	/* make sure there is such a vsi and uplink */
8677 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
8678 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
8679 			break;
8680 	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
8681 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
8682 			 vsi_seid);
8683 		return NULL;
8684 	}
8685 
8686 	if (uplink_seid && uplink_seid != pf->mac_seid) {
8687 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8688 			if (pf->veb[veb_idx] &&
8689 			    pf->veb[veb_idx]->seid == uplink_seid) {
8690 				uplink_veb = pf->veb[veb_idx];
8691 				break;
8692 			}
8693 		}
8694 		if (!uplink_veb) {
8695 			dev_info(&pf->pdev->dev,
8696 				 "uplink seid %d not found\n", uplink_seid);
8697 			return NULL;
8698 		}
8699 	}
8700 
8701 	/* get veb sw struct */
8702 	veb_idx = i40e_veb_mem_alloc(pf);
8703 	if (veb_idx < 0)
8704 		goto err_alloc;
8705 	veb = pf->veb[veb_idx];
8706 	veb->flags = flags;
8707 	veb->uplink_seid = uplink_seid;
8708 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
8709 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
8710 
8711 	/* create the VEB in the switch */
8712 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
8713 	if (ret)
8714 		goto err_veb;
8715 	if (vsi_idx == pf->lan_vsi)
8716 		pf->lan_veb = veb->idx;
8717 
8718 	return veb;
8719 
8720 err_veb:
8721 	i40e_veb_clear(veb);
8722 err_alloc:
8723 	return NULL;
8724 }
8725 
8726 /**
8727  * i40e_setup_pf_switch_element - set pf vars based on switch type
8728  * @pf: board private structure
8729  * @ele: element we are building info from
8730  * @num_reported: total number of elements
8731  * @printconfig: should we print the contents
8732  *
8733  * helper function to assist in extracting a few useful SEID values.
8734  **/
8735 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
8736 				struct i40e_aqc_switch_config_element_resp *ele,
8737 				u16 num_reported, bool printconfig)
8738 {
8739 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
8740 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
8741 	u8 element_type = ele->element_type;
8742 	u16 seid = le16_to_cpu(ele->seid);
8743 
8744 	if (printconfig)
8745 		dev_info(&pf->pdev->dev,
8746 			 "type=%d seid=%d uplink=%d downlink=%d\n",
8747 			 element_type, seid, uplink_seid, downlink_seid);
8748 
8749 	switch (element_type) {
8750 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
8751 		pf->mac_seid = seid;
8752 		break;
8753 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
8754 		/* Main VEB? */
8755 		if (uplink_seid != pf->mac_seid)
8756 			break;
8757 		if (pf->lan_veb == I40E_NO_VEB) {
8758 			int v;
8759 
8760 			/* find existing or else empty VEB */
8761 			for (v = 0; v < I40E_MAX_VEB; v++) {
8762 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
8763 					pf->lan_veb = v;
8764 					break;
8765 				}
8766 			}
8767 			if (pf->lan_veb == I40E_NO_VEB) {
8768 				v = i40e_veb_mem_alloc(pf);
8769 				if (v < 0)
8770 					break;
8771 				pf->lan_veb = v;
8772 			}
8773 		}
8774 
8775 		pf->veb[pf->lan_veb]->seid = seid;
8776 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
8777 		pf->veb[pf->lan_veb]->pf = pf;
8778 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
8779 		break;
8780 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
8781 		if (num_reported != 1)
8782 			break;
8783 		/* This is immediately after a reset so we can assume this is
8784 		 * the PF's VSI
8785 		 */
8786 		pf->mac_seid = uplink_seid;
8787 		pf->pf_seid = downlink_seid;
8788 		pf->main_vsi_seid = seid;
8789 		if (printconfig)
8790 			dev_info(&pf->pdev->dev,
8791 				 "pf_seid=%d main_vsi_seid=%d\n",
8792 				 pf->pf_seid, pf->main_vsi_seid);
8793 		break;
8794 	case I40E_SWITCH_ELEMENT_TYPE_PF:
8795 	case I40E_SWITCH_ELEMENT_TYPE_VF:
8796 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
8797 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
8798 	case I40E_SWITCH_ELEMENT_TYPE_PE:
8799 	case I40E_SWITCH_ELEMENT_TYPE_PA:
8800 		/* ignore these for now */
8801 		break;
8802 	default:
8803 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
8804 			 element_type, seid);
8805 		break;
8806 	}
8807 }
8808 
8809 /**
8810  * i40e_fetch_switch_configuration - Get switch config from firmware
8811  * @pf: board private structure
8812  * @printconfig: should we print the contents
8813  *
8814  * Get the current switch configuration from the device and
8815  * extract a few useful SEID values.
8816  **/
8817 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
8818 {
8819 	struct i40e_aqc_get_switch_config_resp *sw_config;
8820 	u16 next_seid = 0;
8821 	int ret = 0;
8822 	u8 *aq_buf;
8823 	int i;
8824 
8825 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
8826 	if (!aq_buf)
8827 		return -ENOMEM;
8828 
8829 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
8830 	do {
8831 		u16 num_reported, num_total;
8832 
8833 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
8834 						I40E_AQ_LARGE_BUF,
8835 						&next_seid, NULL);
8836 		if (ret) {
8837 			dev_info(&pf->pdev->dev,
8838 				 "get switch config failed %d aq_err=%x\n",
8839 				 ret, pf->hw.aq.asq_last_status);
8840 			kfree(aq_buf);
8841 			return -ENOENT;
8842 		}
8843 
8844 		num_reported = le16_to_cpu(sw_config->header.num_reported);
8845 		num_total = le16_to_cpu(sw_config->header.num_total);
8846 
8847 		if (printconfig)
8848 			dev_info(&pf->pdev->dev,
8849 				 "header: %d reported %d total\n",
8850 				 num_reported, num_total);
8851 
8852 		for (i = 0; i < num_reported; i++) {
8853 			struct i40e_aqc_switch_config_element_resp *ele =
8854 				&sw_config->element[i];
8855 
8856 			i40e_setup_pf_switch_element(pf, ele, num_reported,
8857 						     printconfig);
8858 		}
8859 	} while (next_seid != 0);
8860 
8861 	kfree(aq_buf);
8862 	return ret;
8863 }
8864 
8865 /**
8866  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
8867  * @pf: board private structure
8868  * @reinit: if the Main VSI needs to re-initialized.
8869  *
8870  * Returns 0 on success, negative value on failure
8871  **/
8872 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
8873 {
8874 	int ret;
8875 
8876 	/* find out what's out there already */
8877 	ret = i40e_fetch_switch_configuration(pf, false);
8878 	if (ret) {
8879 		dev_info(&pf->pdev->dev,
8880 			 "couldn't fetch switch config, err %d, aq_err %d\n",
8881 			 ret, pf->hw.aq.asq_last_status);
8882 		return ret;
8883 	}
8884 	i40e_pf_reset_stats(pf);
8885 
8886 	/* first time setup */
8887 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
8888 		struct i40e_vsi *vsi = NULL;
8889 		u16 uplink_seid;
8890 
8891 		/* Set up the PF VSI associated with the PF's main VSI
8892 		 * that is already in the HW switch
8893 		 */
8894 		if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8895 			uplink_seid = pf->veb[pf->lan_veb]->seid;
8896 		else
8897 			uplink_seid = pf->mac_seid;
8898 		if (pf->lan_vsi == I40E_NO_VSI)
8899 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
8900 		else if (reinit)
8901 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
8902 		if (!vsi) {
8903 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
8904 			i40e_fdir_teardown(pf);
8905 			return -EAGAIN;
8906 		}
8907 	} else {
8908 		/* force a reset of TC and queue layout configurations */
8909 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8910 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8911 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8912 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8913 	}
8914 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
8915 
8916 	i40e_fdir_sb_setup(pf);
8917 
8918 	/* Setup static PF queue filter control settings */
8919 	ret = i40e_setup_pf_filter_control(pf);
8920 	if (ret) {
8921 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
8922 			 ret);
8923 		/* Failure here should not stop continuing other steps */
8924 	}
8925 
8926 	/* enable RSS in the HW, even for only one queue, as the stack can use
8927 	 * the hash
8928 	 */
8929 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
8930 		i40e_config_rss(pf);
8931 
8932 	/* fill in link information and enable LSE reporting */
8933 	i40e_update_link_info(&pf->hw, true);
8934 	i40e_link_event(pf);
8935 
8936 	/* Initialize user-specific link properties */
8937 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8938 				  I40E_AQ_AN_COMPLETED) ? true : false);
8939 
8940 	/* fill in link information and enable LSE reporting */
8941 	i40e_update_link_info(&pf->hw, true);
8942 	i40e_link_event(pf);
8943 
8944 	/* Initialize user-specific link properties */
8945 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8946 				  I40E_AQ_AN_COMPLETED) ? true : false);
8947 
8948 	i40e_ptp_init(pf);
8949 
8950 	return ret;
8951 }
8952 
8953 /**
8954  * i40e_determine_queue_usage - Work out queue distribution
8955  * @pf: board private structure
8956  **/
8957 static void i40e_determine_queue_usage(struct i40e_pf *pf)
8958 {
8959 	int queues_left;
8960 
8961 	pf->num_lan_qps = 0;
8962 #ifdef I40E_FCOE
8963 	pf->num_fcoe_qps = 0;
8964 #endif
8965 
8966 	/* Find the max queues to be put into basic use.  We'll always be
8967 	 * using TC0, whether or not DCB is running, and TC0 will get the
8968 	 * big RSS set.
8969 	 */
8970 	queues_left = pf->hw.func_caps.num_tx_qp;
8971 
8972 	if ((queues_left == 1) ||
8973 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
8974 		/* one qp for PF, no queues for anything else */
8975 		queues_left = 0;
8976 		pf->rss_size = pf->num_lan_qps = 1;
8977 
8978 		/* make sure all the fancies are disabled */
8979 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
8980 #ifdef I40E_FCOE
8981 			       I40E_FLAG_FCOE_ENABLED	|
8982 #endif
8983 			       I40E_FLAG_FD_SB_ENABLED	|
8984 			       I40E_FLAG_FD_ATR_ENABLED	|
8985 			       I40E_FLAG_DCB_CAPABLE	|
8986 			       I40E_FLAG_SRIOV_ENABLED	|
8987 			       I40E_FLAG_VMDQ_ENABLED);
8988 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8989 				  I40E_FLAG_FD_SB_ENABLED |
8990 				  I40E_FLAG_FD_ATR_ENABLED |
8991 				  I40E_FLAG_DCB_CAPABLE))) {
8992 		/* one qp for PF */
8993 		pf->rss_size = pf->num_lan_qps = 1;
8994 		queues_left -= pf->num_lan_qps;
8995 
8996 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
8997 #ifdef I40E_FCOE
8998 			       I40E_FLAG_FCOE_ENABLED	|
8999 #endif
9000 			       I40E_FLAG_FD_SB_ENABLED	|
9001 			       I40E_FLAG_FD_ATR_ENABLED	|
9002 			       I40E_FLAG_DCB_ENABLED	|
9003 			       I40E_FLAG_VMDQ_ENABLED);
9004 	} else {
9005 		/* Not enough queues for all TCs */
9006 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9007 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9008 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9009 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9010 		}
9011 		pf->num_lan_qps = pf->rss_size_max;
9012 		queues_left -= pf->num_lan_qps;
9013 	}
9014 
9015 #ifdef I40E_FCOE
9016 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9017 		if (I40E_DEFAULT_FCOE <= queues_left) {
9018 			pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9019 		} else if (I40E_MINIMUM_FCOE <= queues_left) {
9020 			pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9021 		} else {
9022 			pf->num_fcoe_qps = 0;
9023 			pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9024 			dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9025 		}
9026 
9027 		queues_left -= pf->num_fcoe_qps;
9028 	}
9029 
9030 #endif
9031 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9032 		if (queues_left > 1) {
9033 			queues_left -= 1; /* save 1 queue for FD */
9034 		} else {
9035 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9036 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9037 		}
9038 	}
9039 
9040 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9041 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9042 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9043 					(queues_left / pf->num_vf_qps));
9044 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9045 	}
9046 
9047 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9048 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9049 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9050 					  (queues_left / pf->num_vmdq_qps));
9051 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9052 	}
9053 
9054 	pf->queues_left = queues_left;
9055 #ifdef I40E_FCOE
9056 	dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9057 #endif
9058 }
9059 
9060 /**
9061  * i40e_setup_pf_filter_control - Setup PF static filter control
9062  * @pf: PF to be setup
9063  *
9064  * i40e_setup_pf_filter_control sets up a pf's initial filter control
9065  * settings. If PE/FCoE are enabled then it will also set the per PF
9066  * based filter sizes required for them. It also enables Flow director,
9067  * ethertype and macvlan type filter settings for the pf.
9068  *
9069  * Returns 0 on success, negative on failure
9070  **/
9071 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9072 {
9073 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
9074 
9075 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9076 
9077 	/* Flow Director is enabled */
9078 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9079 		settings->enable_fdir = true;
9080 
9081 	/* Ethtype and MACVLAN filters enabled for PF */
9082 	settings->enable_ethtype = true;
9083 	settings->enable_macvlan = true;
9084 
9085 	if (i40e_set_filter_control(&pf->hw, settings))
9086 		return -ENOENT;
9087 
9088 	return 0;
9089 }
9090 
9091 #define INFO_STRING_LEN 255
9092 static void i40e_print_features(struct i40e_pf *pf)
9093 {
9094 	struct i40e_hw *hw = &pf->hw;
9095 	char *buf, *string;
9096 
9097 	string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9098 	if (!string) {
9099 		dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9100 		return;
9101 	}
9102 
9103 	buf = string;
9104 
9105 	buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9106 #ifdef CONFIG_PCI_IOV
9107 	buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9108 #endif
9109 	buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
9110 		       pf->vsi[pf->lan_vsi]->num_queue_pairs);
9111 
9112 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
9113 		buf += sprintf(buf, "RSS ");
9114 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9115 		buf += sprintf(buf, "FD_ATR ");
9116 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9117 		buf += sprintf(buf, "FD_SB ");
9118 		buf += sprintf(buf, "NTUPLE ");
9119 	}
9120 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9121 		buf += sprintf(buf, "DCB ");
9122 	if (pf->flags & I40E_FLAG_PTP)
9123 		buf += sprintf(buf, "PTP ");
9124 #ifdef I40E_FCOE
9125 	if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9126 		buf += sprintf(buf, "FCOE ");
9127 #endif
9128 
9129 	BUG_ON(buf > (string + INFO_STRING_LEN));
9130 	dev_info(&pf->pdev->dev, "%s\n", string);
9131 	kfree(string);
9132 }
9133 
9134 /**
9135  * i40e_probe - Device initialization routine
9136  * @pdev: PCI device information struct
9137  * @ent: entry in i40e_pci_tbl
9138  *
9139  * i40e_probe initializes a pf identified by a pci_dev structure.
9140  * The OS initialization, configuring of the pf private structure,
9141  * and a hardware reset occur.
9142  *
9143  * Returns 0 on success, negative on failure
9144  **/
9145 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9146 {
9147 	struct i40e_pf *pf;
9148 	struct i40e_hw *hw;
9149 	static u16 pfs_found;
9150 	u16 link_status;
9151 	int err = 0;
9152 	u32 len;
9153 	u32 i;
9154 
9155 	err = pci_enable_device_mem(pdev);
9156 	if (err)
9157 		return err;
9158 
9159 	/* set up for high or low dma */
9160 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9161 	if (err) {
9162 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9163 		if (err) {
9164 			dev_err(&pdev->dev,
9165 				"DMA configuration failed: 0x%x\n", err);
9166 			goto err_dma;
9167 		}
9168 	}
9169 
9170 	/* set up pci connections */
9171 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9172 					   IORESOURCE_MEM), i40e_driver_name);
9173 	if (err) {
9174 		dev_info(&pdev->dev,
9175 			 "pci_request_selected_regions failed %d\n", err);
9176 		goto err_pci_reg;
9177 	}
9178 
9179 	pci_enable_pcie_error_reporting(pdev);
9180 	pci_set_master(pdev);
9181 
9182 	/* Now that we have a PCI connection, we need to do the
9183 	 * low level device setup.  This is primarily setting up
9184 	 * the Admin Queue structures and then querying for the
9185 	 * device's current profile information.
9186 	 */
9187 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9188 	if (!pf) {
9189 		err = -ENOMEM;
9190 		goto err_pf_alloc;
9191 	}
9192 	pf->next_vsi = 0;
9193 	pf->pdev = pdev;
9194 	set_bit(__I40E_DOWN, &pf->state);
9195 
9196 	hw = &pf->hw;
9197 	hw->back = pf;
9198 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
9199 			      pci_resource_len(pdev, 0));
9200 	if (!hw->hw_addr) {
9201 		err = -EIO;
9202 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9203 			 (unsigned int)pci_resource_start(pdev, 0),
9204 			 (unsigned int)pci_resource_len(pdev, 0), err);
9205 		goto err_ioremap;
9206 	}
9207 	hw->vendor_id = pdev->vendor;
9208 	hw->device_id = pdev->device;
9209 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9210 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
9211 	hw->subsystem_device_id = pdev->subsystem_device;
9212 	hw->bus.device = PCI_SLOT(pdev->devfn);
9213 	hw->bus.func = PCI_FUNC(pdev->devfn);
9214 	pf->instance = pfs_found;
9215 
9216 	if (debug != -1) {
9217 		pf->msg_enable = pf->hw.debug_mask;
9218 		pf->msg_enable = debug;
9219 	}
9220 
9221 	/* do a special CORER for clearing PXE mode once at init */
9222 	if (hw->revision_id == 0 &&
9223 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9224 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9225 		i40e_flush(hw);
9226 		msleep(200);
9227 		pf->corer_count++;
9228 
9229 		i40e_clear_pxe_mode(hw);
9230 	}
9231 
9232 	/* Reset here to make sure all is clean and to define PF 'n' */
9233 	i40e_clear_hw(hw);
9234 	err = i40e_pf_reset(hw);
9235 	if (err) {
9236 		dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9237 		goto err_pf_reset;
9238 	}
9239 	pf->pfr_count++;
9240 
9241 	hw->aq.num_arq_entries = I40E_AQ_LEN;
9242 	hw->aq.num_asq_entries = I40E_AQ_LEN;
9243 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9244 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9245 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9246 
9247 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9248 		 "%s-%s:misc",
9249 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9250 
9251 	err = i40e_init_shared_code(hw);
9252 	if (err) {
9253 		dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
9254 		goto err_pf_reset;
9255 	}
9256 
9257 	/* set up a default setting for link flow control */
9258 	pf->hw.fc.requested_mode = I40E_FC_NONE;
9259 
9260 	err = i40e_init_adminq(hw);
9261 	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9262 	if (err) {
9263 		dev_info(&pdev->dev,
9264 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
9265 		goto err_pf_reset;
9266 	}
9267 
9268 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9269 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
9270 		dev_info(&pdev->dev,
9271 			 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9272 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9273 		 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
9274 		dev_info(&pdev->dev,
9275 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
9276 
9277 
9278 	i40e_verify_eeprom(pf);
9279 
9280 	/* Rev 0 hardware was never productized */
9281 	if (hw->revision_id < 1)
9282 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9283 
9284 	i40e_clear_pxe_mode(hw);
9285 	err = i40e_get_capabilities(pf);
9286 	if (err)
9287 		goto err_adminq_setup;
9288 
9289 	err = i40e_sw_init(pf);
9290 	if (err) {
9291 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9292 		goto err_sw_init;
9293 	}
9294 
9295 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9296 				hw->func_caps.num_rx_qp,
9297 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9298 	if (err) {
9299 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9300 		goto err_init_lan_hmc;
9301 	}
9302 
9303 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9304 	if (err) {
9305 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9306 		err = -ENOENT;
9307 		goto err_configure_lan_hmc;
9308 	}
9309 
9310 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
9311 	 * Ignore error return codes because if it was already disabled via
9312 	 * hardware settings this will fail
9313 	 */
9314 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9315 	    (pf->hw.aq.fw_maj_ver < 4)) {
9316 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9317 		i40e_aq_stop_lldp(hw, true, NULL);
9318 	}
9319 
9320 	i40e_get_mac_addr(hw, hw->mac.addr);
9321 	if (!is_valid_ether_addr(hw->mac.addr)) {
9322 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
9323 		err = -EIO;
9324 		goto err_mac_addr;
9325 	}
9326 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9327 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
9328 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
9329 	if (is_valid_ether_addr(hw->mac.port_addr))
9330 		pf->flags |= I40E_FLAG_PORT_ID_VALID;
9331 #ifdef I40E_FCOE
9332 	err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
9333 	if (err)
9334 		dev_info(&pdev->dev,
9335 			 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
9336 	if (!is_valid_ether_addr(hw->mac.san_addr)) {
9337 		dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
9338 			 hw->mac.san_addr);
9339 		ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
9340 	}
9341 	dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
9342 #endif /* I40E_FCOE */
9343 
9344 	pci_set_drvdata(pdev, pf);
9345 	pci_save_state(pdev);
9346 #ifdef CONFIG_I40E_DCB
9347 	err = i40e_init_pf_dcb(pf);
9348 	if (err) {
9349 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
9350 		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9351 		/* Continue without DCB enabled */
9352 	}
9353 #endif /* CONFIG_I40E_DCB */
9354 
9355 	/* set up periodic task facility */
9356 	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
9357 	pf->service_timer_period = HZ;
9358 
9359 	INIT_WORK(&pf->service_task, i40e_service_task);
9360 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
9361 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
9362 	pf->link_check_timeout = jiffies;
9363 
9364 	/* WoL defaults to disabled */
9365 	pf->wol_en = false;
9366 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
9367 
9368 	/* set up the main switch operations */
9369 	i40e_determine_queue_usage(pf);
9370 	i40e_init_interrupt_scheme(pf);
9371 
9372 	/* The number of VSIs reported by the FW is the minimum guaranteed
9373 	 * to us; HW supports far more and we share the remaining pool with
9374 	 * the other PFs. We allocate space for more than the guarantee with
9375 	 * the understanding that we might not get them all later.
9376 	 */
9377 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
9378 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
9379 	else
9380 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
9381 
9382 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
9383 	len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
9384 	pf->vsi = kzalloc(len, GFP_KERNEL);
9385 	if (!pf->vsi) {
9386 		err = -ENOMEM;
9387 		goto err_switch_setup;
9388 	}
9389 
9390 	err = i40e_setup_pf_switch(pf, false);
9391 	if (err) {
9392 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
9393 		goto err_vsis;
9394 	}
9395 	/* if FDIR VSI was set up, start it now */
9396 	for (i = 0; i < pf->num_alloc_vsi; i++) {
9397 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
9398 			i40e_vsi_open(pf->vsi[i]);
9399 			break;
9400 		}
9401 	}
9402 
9403 	/* driver is only interested in link up/down and module qualification
9404 	 * reports from firmware
9405 	 */
9406 	err = i40e_aq_set_phy_int_mask(&pf->hw,
9407 				       I40E_AQ_EVENT_LINK_UPDOWN |
9408 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
9409 	if (err)
9410 		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
9411 
9412 	msleep(75);
9413 	err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9414 	if (err) {
9415 		dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
9416 			 pf->hw.aq.asq_last_status);
9417 	}
9418 
9419 	/* The main driver is (mostly) up and happy. We need to set this state
9420 	 * before setting up the misc vector or we get a race and the vector
9421 	 * ends up disabled forever.
9422 	 */
9423 	clear_bit(__I40E_DOWN, &pf->state);
9424 
9425 	/* In case of MSIX we are going to setup the misc vector right here
9426 	 * to handle admin queue events etc. In case of legacy and MSI
9427 	 * the misc functionality and queue processing is combined in
9428 	 * the same vector and that gets setup at open.
9429 	 */
9430 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9431 		err = i40e_setup_misc_vector(pf);
9432 		if (err) {
9433 			dev_info(&pdev->dev,
9434 				 "setup of misc vector failed: %d\n", err);
9435 			goto err_vsis;
9436 		}
9437 	}
9438 
9439 #ifdef CONFIG_PCI_IOV
9440 	/* prep for VF support */
9441 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9442 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9443 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9444 		u32 val;
9445 
9446 		/* disable link interrupts for VFs */
9447 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
9448 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
9449 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
9450 		i40e_flush(hw);
9451 
9452 		if (pci_num_vf(pdev)) {
9453 			dev_info(&pdev->dev,
9454 				 "Active VFs found, allocating resources.\n");
9455 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
9456 			if (err)
9457 				dev_info(&pdev->dev,
9458 					 "Error %d allocating resources for existing VFs\n",
9459 					 err);
9460 		}
9461 	}
9462 #endif /* CONFIG_PCI_IOV */
9463 
9464 	pfs_found++;
9465 
9466 	i40e_dbg_pf_init(pf);
9467 
9468 	/* tell the firmware that we're starting */
9469 	i40e_send_version(pf);
9470 
9471 	/* since everything's happy, start the service_task timer */
9472 	mod_timer(&pf->service_timer,
9473 		  round_jiffies(jiffies + pf->service_timer_period));
9474 
9475 #ifdef I40E_FCOE
9476 	/* create FCoE interface */
9477 	i40e_fcoe_vsi_setup(pf);
9478 
9479 #endif
9480 	/* Get the negotiated link width and speed from PCI config space */
9481 	pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
9482 
9483 	i40e_set_pci_config_data(hw, link_status);
9484 
9485 	dev_info(&pdev->dev, "PCI-Express: %s %s\n",
9486 		(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
9487 		 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
9488 		 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
9489 		 "Unknown"),
9490 		(hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
9491 		 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
9492 		 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
9493 		 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
9494 		 "Unknown"));
9495 
9496 	if (hw->bus.width < i40e_bus_width_pcie_x8 ||
9497 	    hw->bus.speed < i40e_bus_speed_8000) {
9498 		dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
9499 		dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
9500 	}
9501 
9502 	/* print a string summarizing features */
9503 	i40e_print_features(pf);
9504 
9505 	return 0;
9506 
9507 	/* Unwind what we've done if something failed in the setup */
9508 err_vsis:
9509 	set_bit(__I40E_DOWN, &pf->state);
9510 	i40e_clear_interrupt_scheme(pf);
9511 	kfree(pf->vsi);
9512 err_switch_setup:
9513 	i40e_reset_interrupt_capability(pf);
9514 	del_timer_sync(&pf->service_timer);
9515 err_mac_addr:
9516 err_configure_lan_hmc:
9517 	(void)i40e_shutdown_lan_hmc(hw);
9518 err_init_lan_hmc:
9519 	kfree(pf->qp_pile);
9520 	kfree(pf->irq_pile);
9521 err_sw_init:
9522 err_adminq_setup:
9523 	(void)i40e_shutdown_adminq(hw);
9524 err_pf_reset:
9525 	iounmap(hw->hw_addr);
9526 err_ioremap:
9527 	kfree(pf);
9528 err_pf_alloc:
9529 	pci_disable_pcie_error_reporting(pdev);
9530 	pci_release_selected_regions(pdev,
9531 				     pci_select_bars(pdev, IORESOURCE_MEM));
9532 err_pci_reg:
9533 err_dma:
9534 	pci_disable_device(pdev);
9535 	return err;
9536 }
9537 
9538 /**
9539  * i40e_remove - Device removal routine
9540  * @pdev: PCI device information struct
9541  *
9542  * i40e_remove is called by the PCI subsystem to alert the driver
9543  * that is should release a PCI device.  This could be caused by a
9544  * Hot-Plug event, or because the driver is going to be removed from
9545  * memory.
9546  **/
9547 static void i40e_remove(struct pci_dev *pdev)
9548 {
9549 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9550 	i40e_status ret_code;
9551 	int i;
9552 
9553 	i40e_dbg_pf_exit(pf);
9554 
9555 	i40e_ptp_stop(pf);
9556 
9557 	/* no more scheduling of any task */
9558 	set_bit(__I40E_DOWN, &pf->state);
9559 	del_timer_sync(&pf->service_timer);
9560 	cancel_work_sync(&pf->service_task);
9561 	i40e_fdir_teardown(pf);
9562 
9563 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9564 		i40e_free_vfs(pf);
9565 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
9566 	}
9567 
9568 	i40e_fdir_teardown(pf);
9569 
9570 	/* If there is a switch structure or any orphans, remove them.
9571 	 * This will leave only the PF's VSI remaining.
9572 	 */
9573 	for (i = 0; i < I40E_MAX_VEB; i++) {
9574 		if (!pf->veb[i])
9575 			continue;
9576 
9577 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
9578 		    pf->veb[i]->uplink_seid == 0)
9579 			i40e_switch_branch_release(pf->veb[i]);
9580 	}
9581 
9582 	/* Now we can shutdown the PF's VSI, just before we kill
9583 	 * adminq and hmc.
9584 	 */
9585 	if (pf->vsi[pf->lan_vsi])
9586 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9587 
9588 	/* shutdown and destroy the HMC */
9589 	if (pf->hw.hmc.hmc_obj) {
9590 		ret_code = i40e_shutdown_lan_hmc(&pf->hw);
9591 		if (ret_code)
9592 			dev_warn(&pdev->dev,
9593 				 "Failed to destroy the HMC resources: %d\n",
9594 				 ret_code);
9595 	}
9596 
9597 	/* shutdown the adminq */
9598 	ret_code = i40e_shutdown_adminq(&pf->hw);
9599 	if (ret_code)
9600 		dev_warn(&pdev->dev,
9601 			 "Failed to destroy the Admin Queue resources: %d\n",
9602 			 ret_code);
9603 
9604 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
9605 	i40e_clear_interrupt_scheme(pf);
9606 	for (i = 0; i < pf->num_alloc_vsi; i++) {
9607 		if (pf->vsi[i]) {
9608 			i40e_vsi_clear_rings(pf->vsi[i]);
9609 			i40e_vsi_clear(pf->vsi[i]);
9610 			pf->vsi[i] = NULL;
9611 		}
9612 	}
9613 
9614 	for (i = 0; i < I40E_MAX_VEB; i++) {
9615 		kfree(pf->veb[i]);
9616 		pf->veb[i] = NULL;
9617 	}
9618 
9619 	kfree(pf->qp_pile);
9620 	kfree(pf->irq_pile);
9621 	kfree(pf->vsi);
9622 
9623 	iounmap(pf->hw.hw_addr);
9624 	kfree(pf);
9625 	pci_release_selected_regions(pdev,
9626 				     pci_select_bars(pdev, IORESOURCE_MEM));
9627 
9628 	pci_disable_pcie_error_reporting(pdev);
9629 	pci_disable_device(pdev);
9630 }
9631 
9632 /**
9633  * i40e_pci_error_detected - warning that something funky happened in PCI land
9634  * @pdev: PCI device information struct
9635  *
9636  * Called to warn that something happened and the error handling steps
9637  * are in progress.  Allows the driver to quiesce things, be ready for
9638  * remediation.
9639  **/
9640 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
9641 						enum pci_channel_state error)
9642 {
9643 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9644 
9645 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
9646 
9647 	/* shutdown all operations */
9648 	if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
9649 		rtnl_lock();
9650 		i40e_prep_for_reset(pf);
9651 		rtnl_unlock();
9652 	}
9653 
9654 	/* Request a slot reset */
9655 	return PCI_ERS_RESULT_NEED_RESET;
9656 }
9657 
9658 /**
9659  * i40e_pci_error_slot_reset - a PCI slot reset just happened
9660  * @pdev: PCI device information struct
9661  *
9662  * Called to find if the driver can work with the device now that
9663  * the pci slot has been reset.  If a basic connection seems good
9664  * (registers are readable and have sane content) then return a
9665  * happy little PCI_ERS_RESULT_xxx.
9666  **/
9667 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
9668 {
9669 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9670 	pci_ers_result_t result;
9671 	int err;
9672 	u32 reg;
9673 
9674 	dev_info(&pdev->dev, "%s\n", __func__);
9675 	if (pci_enable_device_mem(pdev)) {
9676 		dev_info(&pdev->dev,
9677 			 "Cannot re-enable PCI device after reset.\n");
9678 		result = PCI_ERS_RESULT_DISCONNECT;
9679 	} else {
9680 		pci_set_master(pdev);
9681 		pci_restore_state(pdev);
9682 		pci_save_state(pdev);
9683 		pci_wake_from_d3(pdev, false);
9684 
9685 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9686 		if (reg == 0)
9687 			result = PCI_ERS_RESULT_RECOVERED;
9688 		else
9689 			result = PCI_ERS_RESULT_DISCONNECT;
9690 	}
9691 
9692 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
9693 	if (err) {
9694 		dev_info(&pdev->dev,
9695 			 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9696 			 err);
9697 		/* non-fatal, continue */
9698 	}
9699 
9700 	return result;
9701 }
9702 
9703 /**
9704  * i40e_pci_error_resume - restart operations after PCI error recovery
9705  * @pdev: PCI device information struct
9706  *
9707  * Called to allow the driver to bring things back up after PCI error
9708  * and/or reset recovery has finished.
9709  **/
9710 static void i40e_pci_error_resume(struct pci_dev *pdev)
9711 {
9712 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9713 
9714 	dev_info(&pdev->dev, "%s\n", __func__);
9715 	if (test_bit(__I40E_SUSPENDED, &pf->state))
9716 		return;
9717 
9718 	rtnl_lock();
9719 	i40e_handle_reset_warning(pf);
9720 	rtnl_lock();
9721 }
9722 
9723 /**
9724  * i40e_shutdown - PCI callback for shutting down
9725  * @pdev: PCI device information struct
9726  **/
9727 static void i40e_shutdown(struct pci_dev *pdev)
9728 {
9729 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9730 	struct i40e_hw *hw = &pf->hw;
9731 
9732 	set_bit(__I40E_SUSPENDED, &pf->state);
9733 	set_bit(__I40E_DOWN, &pf->state);
9734 	rtnl_lock();
9735 	i40e_prep_for_reset(pf);
9736 	rtnl_unlock();
9737 
9738 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9739 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9740 
9741 	i40e_clear_interrupt_scheme(pf);
9742 
9743 	if (system_state == SYSTEM_POWER_OFF) {
9744 		pci_wake_from_d3(pdev, pf->wol_en);
9745 		pci_set_power_state(pdev, PCI_D3hot);
9746 	}
9747 }
9748 
9749 #ifdef CONFIG_PM
9750 /**
9751  * i40e_suspend - PCI callback for moving to D3
9752  * @pdev: PCI device information struct
9753  **/
9754 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
9755 {
9756 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9757 	struct i40e_hw *hw = &pf->hw;
9758 
9759 	set_bit(__I40E_SUSPENDED, &pf->state);
9760 	set_bit(__I40E_DOWN, &pf->state);
9761 	del_timer_sync(&pf->service_timer);
9762 	cancel_work_sync(&pf->service_task);
9763 	rtnl_lock();
9764 	i40e_prep_for_reset(pf);
9765 	rtnl_unlock();
9766 
9767 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9768 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9769 
9770 	pci_wake_from_d3(pdev, pf->wol_en);
9771 	pci_set_power_state(pdev, PCI_D3hot);
9772 
9773 	return 0;
9774 }
9775 
9776 /**
9777  * i40e_resume - PCI callback for waking up from D3
9778  * @pdev: PCI device information struct
9779  **/
9780 static int i40e_resume(struct pci_dev *pdev)
9781 {
9782 	struct i40e_pf *pf = pci_get_drvdata(pdev);
9783 	u32 err;
9784 
9785 	pci_set_power_state(pdev, PCI_D0);
9786 	pci_restore_state(pdev);
9787 	/* pci_restore_state() clears dev->state_saves, so
9788 	 * call pci_save_state() again to restore it.
9789 	 */
9790 	pci_save_state(pdev);
9791 
9792 	err = pci_enable_device_mem(pdev);
9793 	if (err) {
9794 		dev_err(&pdev->dev,
9795 			"%s: Cannot enable PCI device from suspend\n",
9796 			__func__);
9797 		return err;
9798 	}
9799 	pci_set_master(pdev);
9800 
9801 	/* no wakeup events while running */
9802 	pci_wake_from_d3(pdev, false);
9803 
9804 	/* handling the reset will rebuild the device state */
9805 	if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
9806 		clear_bit(__I40E_DOWN, &pf->state);
9807 		rtnl_lock();
9808 		i40e_reset_and_rebuild(pf, false);
9809 		rtnl_unlock();
9810 	}
9811 
9812 	return 0;
9813 }
9814 
9815 #endif
9816 static const struct pci_error_handlers i40e_err_handler = {
9817 	.error_detected = i40e_pci_error_detected,
9818 	.slot_reset = i40e_pci_error_slot_reset,
9819 	.resume = i40e_pci_error_resume,
9820 };
9821 
9822 static struct pci_driver i40e_driver = {
9823 	.name     = i40e_driver_name,
9824 	.id_table = i40e_pci_tbl,
9825 	.probe    = i40e_probe,
9826 	.remove   = i40e_remove,
9827 #ifdef CONFIG_PM
9828 	.suspend  = i40e_suspend,
9829 	.resume   = i40e_resume,
9830 #endif
9831 	.shutdown = i40e_shutdown,
9832 	.err_handler = &i40e_err_handler,
9833 	.sriov_configure = i40e_pci_sriov_configure,
9834 };
9835 
9836 /**
9837  * i40e_init_module - Driver registration routine
9838  *
9839  * i40e_init_module is the first routine called when the driver is
9840  * loaded. All it does is register with the PCI subsystem.
9841  **/
9842 static int __init i40e_init_module(void)
9843 {
9844 	pr_info("%s: %s - version %s\n", i40e_driver_name,
9845 		i40e_driver_string, i40e_driver_version_str);
9846 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
9847 	i40e_dbg_init();
9848 	return pci_register_driver(&i40e_driver);
9849 }
9850 module_init(i40e_init_module);
9851 
9852 /**
9853  * i40e_exit_module - Driver exit cleanup routine
9854  *
9855  * i40e_exit_module is called just before the driver is removed
9856  * from memory.
9857  **/
9858 static void __exit i40e_exit_module(void)
9859 {
9860 	pci_unregister_driver(&i40e_driver);
9861 	i40e_dbg_exit();
9862 }
9863 module_exit(i40e_exit_module);
9864