1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33 
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36 			"Intel(R) Ethernet Connection XL710 Network Driver";
37 
38 #define DRV_KERN "-k"
39 
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 9
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 	     __stringify(DRV_VERSION_MINOR) "." \
45 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48 
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60 
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
80 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
81 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
82 	/* required last entry */
83 	{0, }
84 };
85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
86 
87 #define I40E_MAX_VF_COUNT 128
88 static int debug = -1;
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
91 
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(DRV_VERSION);
96 
97 /**
98  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
99  * @hw:   pointer to the HW structure
100  * @mem:  ptr to mem struct to fill out
101  * @size: size of memory requested
102  * @alignment: what to align the allocation to
103  **/
104 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
105 			    u64 size, u32 alignment)
106 {
107 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
108 
109 	mem->size = ALIGN(size, alignment);
110 	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
111 				      &mem->pa, GFP_KERNEL);
112 	if (!mem->va)
113 		return -ENOMEM;
114 
115 	return 0;
116 }
117 
118 /**
119  * i40e_free_dma_mem_d - OS specific memory free for shared code
120  * @hw:   pointer to the HW structure
121  * @mem:  ptr to mem struct to free
122  **/
123 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
124 {
125 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
126 
127 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
128 	mem->va = NULL;
129 	mem->pa = 0;
130 	mem->size = 0;
131 
132 	return 0;
133 }
134 
135 /**
136  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
137  * @hw:   pointer to the HW structure
138  * @mem:  ptr to mem struct to fill out
139  * @size: size of memory requested
140  **/
141 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
142 			     u32 size)
143 {
144 	mem->size = size;
145 	mem->va = kzalloc(size, GFP_KERNEL);
146 
147 	if (!mem->va)
148 		return -ENOMEM;
149 
150 	return 0;
151 }
152 
153 /**
154  * i40e_free_virt_mem_d - OS specific memory free for shared code
155  * @hw:   pointer to the HW structure
156  * @mem:  ptr to mem struct to free
157  **/
158 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
159 {
160 	/* it's ok to kfree a NULL pointer */
161 	kfree(mem->va);
162 	mem->va = NULL;
163 	mem->size = 0;
164 
165 	return 0;
166 }
167 
168 /**
169  * i40e_get_lump - find a lump of free generic resource
170  * @pf: board private structure
171  * @pile: the pile of resource to search
172  * @needed: the number of items needed
173  * @id: an owner id to stick on the items assigned
174  *
175  * Returns the base item index of the lump, or negative for error
176  *
177  * The search_hint trick and lack of advanced fit-finding only work
178  * because we're highly likely to have all the same size lump requests.
179  * Linear search time and any fragmentation should be minimal.
180  **/
181 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
182 			 u16 needed, u16 id)
183 {
184 	int ret = -ENOMEM;
185 	int i, j;
186 
187 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
188 		dev_info(&pf->pdev->dev,
189 			 "param err: pile=%p needed=%d id=0x%04x\n",
190 			 pile, needed, id);
191 		return -EINVAL;
192 	}
193 
194 	/* start the linear search with an imperfect hint */
195 	i = pile->search_hint;
196 	while (i < pile->num_entries) {
197 		/* skip already allocated entries */
198 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
199 			i++;
200 			continue;
201 		}
202 
203 		/* do we have enough in this lump? */
204 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
205 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
206 				break;
207 		}
208 
209 		if (j == needed) {
210 			/* there was enough, so assign it to the requestor */
211 			for (j = 0; j < needed; j++)
212 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
213 			ret = i;
214 			pile->search_hint = i + j;
215 			break;
216 		} else {
217 			/* not enough, so skip over it and continue looking */
218 			i += j;
219 		}
220 	}
221 
222 	return ret;
223 }
224 
225 /**
226  * i40e_put_lump - return a lump of generic resource
227  * @pile: the pile of resource to search
228  * @index: the base item index
229  * @id: the owner id of the items assigned
230  *
231  * Returns the count of items in the lump
232  **/
233 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
234 {
235 	int valid_id = (id | I40E_PILE_VALID_BIT);
236 	int count = 0;
237 	int i;
238 
239 	if (!pile || index >= pile->num_entries)
240 		return -EINVAL;
241 
242 	for (i = index;
243 	     i < pile->num_entries && pile->list[i] == valid_id;
244 	     i++) {
245 		pile->list[i] = 0;
246 		count++;
247 	}
248 
249 	if (count && index < pile->search_hint)
250 		pile->search_hint = index;
251 
252 	return count;
253 }
254 
255 /**
256  * i40e_find_vsi_from_id - searches for the vsi with the given id
257  * @pf - the pf structure to search for the vsi
258  * @id - id of the vsi it is searching for
259  **/
260 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
261 {
262 	int i;
263 
264 	for (i = 0; i < pf->num_alloc_vsi; i++)
265 		if (pf->vsi[i] && (pf->vsi[i]->id == id))
266 			return pf->vsi[i];
267 
268 	return NULL;
269 }
270 
271 /**
272  * i40e_service_event_schedule - Schedule the service task to wake up
273  * @pf: board private structure
274  *
275  * If not already scheduled, this puts the task into the work queue
276  **/
277 static void i40e_service_event_schedule(struct i40e_pf *pf)
278 {
279 	if (!test_bit(__I40E_DOWN, &pf->state) &&
280 	    !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
281 	    !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
282 		schedule_work(&pf->service_task);
283 }
284 
285 /**
286  * i40e_tx_timeout - Respond to a Tx Hang
287  * @netdev: network interface device structure
288  *
289  * If any port has noticed a Tx timeout, it is likely that the whole
290  * device is munged, not just the one netdev port, so go for the full
291  * reset.
292  **/
293 #ifdef I40E_FCOE
294 void i40e_tx_timeout(struct net_device *netdev)
295 #else
296 static void i40e_tx_timeout(struct net_device *netdev)
297 #endif
298 {
299 	struct i40e_netdev_priv *np = netdev_priv(netdev);
300 	struct i40e_vsi *vsi = np->vsi;
301 	struct i40e_pf *pf = vsi->back;
302 
303 	pf->tx_timeout_count++;
304 
305 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
306 		pf->tx_timeout_recovery_level = 1;
307 	pf->tx_timeout_last_recovery = jiffies;
308 	netdev_info(netdev, "tx_timeout recovery level %d\n",
309 		    pf->tx_timeout_recovery_level);
310 
311 	switch (pf->tx_timeout_recovery_level) {
312 	case 0:
313 		/* disable and re-enable queues for the VSI */
314 		if (in_interrupt()) {
315 			set_bit(__I40E_REINIT_REQUESTED, &pf->state);
316 			set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
317 		} else {
318 			i40e_vsi_reinit_locked(vsi);
319 		}
320 		break;
321 	case 1:
322 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
323 		break;
324 	case 2:
325 		set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
326 		break;
327 	case 3:
328 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
329 		break;
330 	default:
331 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
332 		set_bit(__I40E_DOWN_REQUESTED, &pf->state);
333 		set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
334 		break;
335 	}
336 	i40e_service_event_schedule(pf);
337 	pf->tx_timeout_recovery_level++;
338 }
339 
340 /**
341  * i40e_release_rx_desc - Store the new tail and head values
342  * @rx_ring: ring to bump
343  * @val: new head index
344  **/
345 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
346 {
347 	rx_ring->next_to_use = val;
348 
349 	/* Force memory writes to complete before letting h/w
350 	 * know there are new descriptors to fetch.  (Only
351 	 * applicable for weak-ordered memory model archs,
352 	 * such as IA-64).
353 	 */
354 	wmb();
355 	writel(val, rx_ring->tail);
356 }
357 
358 /**
359  * i40e_get_vsi_stats_struct - Get System Network Statistics
360  * @vsi: the VSI we care about
361  *
362  * Returns the address of the device statistics structure.
363  * The statistics are actually updated from the service task.
364  **/
365 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
366 {
367 	return &vsi->net_stats;
368 }
369 
370 /**
371  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
372  * @netdev: network interface device structure
373  *
374  * Returns the address of the device statistics structure.
375  * The statistics are actually updated from the service task.
376  **/
377 #ifdef I40E_FCOE
378 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
379 					     struct net_device *netdev,
380 					     struct rtnl_link_stats64 *stats)
381 #else
382 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
383 					     struct net_device *netdev,
384 					     struct rtnl_link_stats64 *stats)
385 #endif
386 {
387 	struct i40e_netdev_priv *np = netdev_priv(netdev);
388 	struct i40e_ring *tx_ring, *rx_ring;
389 	struct i40e_vsi *vsi = np->vsi;
390 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
391 	int i;
392 
393 	if (test_bit(__I40E_DOWN, &vsi->state))
394 		return stats;
395 
396 	if (!vsi->tx_rings)
397 		return stats;
398 
399 	rcu_read_lock();
400 	for (i = 0; i < vsi->num_queue_pairs; i++) {
401 		u64 bytes, packets;
402 		unsigned int start;
403 
404 		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
405 		if (!tx_ring)
406 			continue;
407 
408 		do {
409 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
410 			packets = tx_ring->stats.packets;
411 			bytes   = tx_ring->stats.bytes;
412 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
413 
414 		stats->tx_packets += packets;
415 		stats->tx_bytes   += bytes;
416 		rx_ring = &tx_ring[1];
417 
418 		do {
419 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
420 			packets = rx_ring->stats.packets;
421 			bytes   = rx_ring->stats.bytes;
422 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
423 
424 		stats->rx_packets += packets;
425 		stats->rx_bytes   += bytes;
426 	}
427 	rcu_read_unlock();
428 
429 	/* following stats updated by i40e_watchdog_subtask() */
430 	stats->multicast	= vsi_stats->multicast;
431 	stats->tx_errors	= vsi_stats->tx_errors;
432 	stats->tx_dropped	= vsi_stats->tx_dropped;
433 	stats->rx_errors	= vsi_stats->rx_errors;
434 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
435 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
436 
437 	return stats;
438 }
439 
440 /**
441  * i40e_vsi_reset_stats - Resets all stats of the given vsi
442  * @vsi: the VSI to have its stats reset
443  **/
444 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
445 {
446 	struct rtnl_link_stats64 *ns;
447 	int i;
448 
449 	if (!vsi)
450 		return;
451 
452 	ns = i40e_get_vsi_stats_struct(vsi);
453 	memset(ns, 0, sizeof(*ns));
454 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
455 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
456 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
457 	if (vsi->rx_rings && vsi->rx_rings[0]) {
458 		for (i = 0; i < vsi->num_queue_pairs; i++) {
459 			memset(&vsi->rx_rings[i]->stats, 0 ,
460 			       sizeof(vsi->rx_rings[i]->stats));
461 			memset(&vsi->rx_rings[i]->rx_stats, 0 ,
462 			       sizeof(vsi->rx_rings[i]->rx_stats));
463 			memset(&vsi->tx_rings[i]->stats, 0 ,
464 			       sizeof(vsi->tx_rings[i]->stats));
465 			memset(&vsi->tx_rings[i]->tx_stats, 0,
466 			       sizeof(vsi->tx_rings[i]->tx_stats));
467 		}
468 	}
469 	vsi->stat_offsets_loaded = false;
470 }
471 
472 /**
473  * i40e_pf_reset_stats - Reset all of the stats for the given PF
474  * @pf: the PF to be reset
475  **/
476 void i40e_pf_reset_stats(struct i40e_pf *pf)
477 {
478 	int i;
479 
480 	memset(&pf->stats, 0, sizeof(pf->stats));
481 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
482 	pf->stat_offsets_loaded = false;
483 
484 	for (i = 0; i < I40E_MAX_VEB; i++) {
485 		if (pf->veb[i]) {
486 			memset(&pf->veb[i]->stats, 0,
487 			       sizeof(pf->veb[i]->stats));
488 			memset(&pf->veb[i]->stats_offsets, 0,
489 			       sizeof(pf->veb[i]->stats_offsets));
490 			pf->veb[i]->stat_offsets_loaded = false;
491 		}
492 	}
493 }
494 
495 /**
496  * i40e_stat_update48 - read and update a 48 bit stat from the chip
497  * @hw: ptr to the hardware info
498  * @hireg: the high 32 bit reg to read
499  * @loreg: the low 32 bit reg to read
500  * @offset_loaded: has the initial offset been loaded yet
501  * @offset: ptr to current offset value
502  * @stat: ptr to the stat
503  *
504  * Since the device stats are not reset at PFReset, they likely will not
505  * be zeroed when the driver starts.  We'll save the first values read
506  * and use them as offsets to be subtracted from the raw values in order
507  * to report stats that count from zero.  In the process, we also manage
508  * the potential roll-over.
509  **/
510 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
511 			       bool offset_loaded, u64 *offset, u64 *stat)
512 {
513 	u64 new_data;
514 
515 	if (hw->device_id == I40E_DEV_ID_QEMU) {
516 		new_data = rd32(hw, loreg);
517 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
518 	} else {
519 		new_data = rd64(hw, loreg);
520 	}
521 	if (!offset_loaded)
522 		*offset = new_data;
523 	if (likely(new_data >= *offset))
524 		*stat = new_data - *offset;
525 	else
526 		*stat = (new_data + BIT_ULL(48)) - *offset;
527 	*stat &= 0xFFFFFFFFFFFFULL;
528 }
529 
530 /**
531  * i40e_stat_update32 - read and update a 32 bit stat from the chip
532  * @hw: ptr to the hardware info
533  * @reg: the hw reg to read
534  * @offset_loaded: has the initial offset been loaded yet
535  * @offset: ptr to current offset value
536  * @stat: ptr to the stat
537  **/
538 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
539 			       bool offset_loaded, u64 *offset, u64 *stat)
540 {
541 	u32 new_data;
542 
543 	new_data = rd32(hw, reg);
544 	if (!offset_loaded)
545 		*offset = new_data;
546 	if (likely(new_data >= *offset))
547 		*stat = (u32)(new_data - *offset);
548 	else
549 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
550 }
551 
552 /**
553  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
554  * @vsi: the VSI to be updated
555  **/
556 void i40e_update_eth_stats(struct i40e_vsi *vsi)
557 {
558 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
559 	struct i40e_pf *pf = vsi->back;
560 	struct i40e_hw *hw = &pf->hw;
561 	struct i40e_eth_stats *oes;
562 	struct i40e_eth_stats *es;     /* device's eth stats */
563 
564 	es = &vsi->eth_stats;
565 	oes = &vsi->eth_stats_offsets;
566 
567 	/* Gather up the stats that the hw collects */
568 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
569 			   vsi->stat_offsets_loaded,
570 			   &oes->tx_errors, &es->tx_errors);
571 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
572 			   vsi->stat_offsets_loaded,
573 			   &oes->rx_discards, &es->rx_discards);
574 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
575 			   vsi->stat_offsets_loaded,
576 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
577 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
578 			   vsi->stat_offsets_loaded,
579 			   &oes->tx_errors, &es->tx_errors);
580 
581 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
582 			   I40E_GLV_GORCL(stat_idx),
583 			   vsi->stat_offsets_loaded,
584 			   &oes->rx_bytes, &es->rx_bytes);
585 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
586 			   I40E_GLV_UPRCL(stat_idx),
587 			   vsi->stat_offsets_loaded,
588 			   &oes->rx_unicast, &es->rx_unicast);
589 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
590 			   I40E_GLV_MPRCL(stat_idx),
591 			   vsi->stat_offsets_loaded,
592 			   &oes->rx_multicast, &es->rx_multicast);
593 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
594 			   I40E_GLV_BPRCL(stat_idx),
595 			   vsi->stat_offsets_loaded,
596 			   &oes->rx_broadcast, &es->rx_broadcast);
597 
598 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
599 			   I40E_GLV_GOTCL(stat_idx),
600 			   vsi->stat_offsets_loaded,
601 			   &oes->tx_bytes, &es->tx_bytes);
602 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
603 			   I40E_GLV_UPTCL(stat_idx),
604 			   vsi->stat_offsets_loaded,
605 			   &oes->tx_unicast, &es->tx_unicast);
606 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
607 			   I40E_GLV_MPTCL(stat_idx),
608 			   vsi->stat_offsets_loaded,
609 			   &oes->tx_multicast, &es->tx_multicast);
610 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
611 			   I40E_GLV_BPTCL(stat_idx),
612 			   vsi->stat_offsets_loaded,
613 			   &oes->tx_broadcast, &es->tx_broadcast);
614 	vsi->stat_offsets_loaded = true;
615 }
616 
617 /**
618  * i40e_update_veb_stats - Update Switch component statistics
619  * @veb: the VEB being updated
620  **/
621 static void i40e_update_veb_stats(struct i40e_veb *veb)
622 {
623 	struct i40e_pf *pf = veb->pf;
624 	struct i40e_hw *hw = &pf->hw;
625 	struct i40e_eth_stats *oes;
626 	struct i40e_eth_stats *es;     /* device's eth stats */
627 	struct i40e_veb_tc_stats *veb_oes;
628 	struct i40e_veb_tc_stats *veb_es;
629 	int i, idx = 0;
630 
631 	idx = veb->stats_idx;
632 	es = &veb->stats;
633 	oes = &veb->stats_offsets;
634 	veb_es = &veb->tc_stats;
635 	veb_oes = &veb->tc_stats_offsets;
636 
637 	/* Gather up the stats that the hw collects */
638 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
639 			   veb->stat_offsets_loaded,
640 			   &oes->tx_discards, &es->tx_discards);
641 	if (hw->revision_id > 0)
642 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
643 				   veb->stat_offsets_loaded,
644 				   &oes->rx_unknown_protocol,
645 				   &es->rx_unknown_protocol);
646 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
647 			   veb->stat_offsets_loaded,
648 			   &oes->rx_bytes, &es->rx_bytes);
649 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
650 			   veb->stat_offsets_loaded,
651 			   &oes->rx_unicast, &es->rx_unicast);
652 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
653 			   veb->stat_offsets_loaded,
654 			   &oes->rx_multicast, &es->rx_multicast);
655 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
656 			   veb->stat_offsets_loaded,
657 			   &oes->rx_broadcast, &es->rx_broadcast);
658 
659 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
660 			   veb->stat_offsets_loaded,
661 			   &oes->tx_bytes, &es->tx_bytes);
662 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
663 			   veb->stat_offsets_loaded,
664 			   &oes->tx_unicast, &es->tx_unicast);
665 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
666 			   veb->stat_offsets_loaded,
667 			   &oes->tx_multicast, &es->tx_multicast);
668 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
669 			   veb->stat_offsets_loaded,
670 			   &oes->tx_broadcast, &es->tx_broadcast);
671 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
672 		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
673 				   I40E_GLVEBTC_RPCL(i, idx),
674 				   veb->stat_offsets_loaded,
675 				   &veb_oes->tc_rx_packets[i],
676 				   &veb_es->tc_rx_packets[i]);
677 		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
678 				   I40E_GLVEBTC_RBCL(i, idx),
679 				   veb->stat_offsets_loaded,
680 				   &veb_oes->tc_rx_bytes[i],
681 				   &veb_es->tc_rx_bytes[i]);
682 		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
683 				   I40E_GLVEBTC_TPCL(i, idx),
684 				   veb->stat_offsets_loaded,
685 				   &veb_oes->tc_tx_packets[i],
686 				   &veb_es->tc_tx_packets[i]);
687 		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
688 				   I40E_GLVEBTC_TBCL(i, idx),
689 				   veb->stat_offsets_loaded,
690 				   &veb_oes->tc_tx_bytes[i],
691 				   &veb_es->tc_tx_bytes[i]);
692 	}
693 	veb->stat_offsets_loaded = true;
694 }
695 
696 #ifdef I40E_FCOE
697 /**
698  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
699  * @vsi: the VSI that is capable of doing FCoE
700  **/
701 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
702 {
703 	struct i40e_pf *pf = vsi->back;
704 	struct i40e_hw *hw = &pf->hw;
705 	struct i40e_fcoe_stats *ofs;
706 	struct i40e_fcoe_stats *fs;     /* device's eth stats */
707 	int idx;
708 
709 	if (vsi->type != I40E_VSI_FCOE)
710 		return;
711 
712 	idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
713 	fs = &vsi->fcoe_stats;
714 	ofs = &vsi->fcoe_stats_offsets;
715 
716 	i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
717 			   vsi->fcoe_stat_offsets_loaded,
718 			   &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
719 	i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
720 			   vsi->fcoe_stat_offsets_loaded,
721 			   &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
722 	i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
723 			   vsi->fcoe_stat_offsets_loaded,
724 			   &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
725 	i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
726 			   vsi->fcoe_stat_offsets_loaded,
727 			   &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
728 	i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
729 			   vsi->fcoe_stat_offsets_loaded,
730 			   &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
731 	i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
732 			   vsi->fcoe_stat_offsets_loaded,
733 			   &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
734 	i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
735 			   vsi->fcoe_stat_offsets_loaded,
736 			   &ofs->fcoe_last_error, &fs->fcoe_last_error);
737 	i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
738 			   vsi->fcoe_stat_offsets_loaded,
739 			   &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
740 
741 	vsi->fcoe_stat_offsets_loaded = true;
742 }
743 
744 #endif
745 /**
746  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
747  * @pf: the corresponding PF
748  *
749  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
750  **/
751 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
752 {
753 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
754 	struct i40e_hw_port_stats *nsd = &pf->stats;
755 	struct i40e_hw *hw = &pf->hw;
756 	u64 xoff = 0;
757 	u16 i, v;
758 
759 	if ((hw->fc.current_mode != I40E_FC_FULL) &&
760 	    (hw->fc.current_mode != I40E_FC_RX_PAUSE))
761 		return;
762 
763 	xoff = nsd->link_xoff_rx;
764 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
765 			   pf->stat_offsets_loaded,
766 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
767 
768 	/* No new LFC xoff rx */
769 	if (!(nsd->link_xoff_rx - xoff))
770 		return;
771 
772 	/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
773 	for (v = 0; v < pf->num_alloc_vsi; v++) {
774 		struct i40e_vsi *vsi = pf->vsi[v];
775 
776 		if (!vsi || !vsi->tx_rings[0])
777 			continue;
778 
779 		for (i = 0; i < vsi->num_queue_pairs; i++) {
780 			struct i40e_ring *ring = vsi->tx_rings[i];
781 			clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
782 		}
783 	}
784 }
785 
786 /**
787  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
788  * @pf: the corresponding PF
789  *
790  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
791  **/
792 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
793 {
794 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
795 	struct i40e_hw_port_stats *nsd = &pf->stats;
796 	bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
797 	struct i40e_dcbx_config *dcb_cfg;
798 	struct i40e_hw *hw = &pf->hw;
799 	u16 i, v;
800 	u8 tc;
801 
802 	dcb_cfg = &hw->local_dcbx_config;
803 
804 	/* Collect Link XOFF stats when PFC is disabled */
805 	if (!dcb_cfg->pfc.pfcenable) {
806 		i40e_update_link_xoff_rx(pf);
807 		return;
808 	}
809 
810 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
811 		u64 prio_xoff = nsd->priority_xoff_rx[i];
812 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
813 				   pf->stat_offsets_loaded,
814 				   &osd->priority_xoff_rx[i],
815 				   &nsd->priority_xoff_rx[i]);
816 
817 		/* No new PFC xoff rx */
818 		if (!(nsd->priority_xoff_rx[i] - prio_xoff))
819 			continue;
820 		/* Get the TC for given priority */
821 		tc = dcb_cfg->etscfg.prioritytable[i];
822 		xoff[tc] = true;
823 	}
824 
825 	/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
826 	for (v = 0; v < pf->num_alloc_vsi; v++) {
827 		struct i40e_vsi *vsi = pf->vsi[v];
828 
829 		if (!vsi || !vsi->tx_rings[0])
830 			continue;
831 
832 		for (i = 0; i < vsi->num_queue_pairs; i++) {
833 			struct i40e_ring *ring = vsi->tx_rings[i];
834 
835 			tc = ring->dcb_tc;
836 			if (xoff[tc])
837 				clear_bit(__I40E_HANG_CHECK_ARMED,
838 					  &ring->state);
839 		}
840 	}
841 }
842 
843 /**
844  * i40e_update_vsi_stats - Update the vsi statistics counters.
845  * @vsi: the VSI to be updated
846  *
847  * There are a few instances where we store the same stat in a
848  * couple of different structs.  This is partly because we have
849  * the netdev stats that need to be filled out, which is slightly
850  * different from the "eth_stats" defined by the chip and used in
851  * VF communications.  We sort it out here.
852  **/
853 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
854 {
855 	struct i40e_pf *pf = vsi->back;
856 	struct rtnl_link_stats64 *ons;
857 	struct rtnl_link_stats64 *ns;   /* netdev stats */
858 	struct i40e_eth_stats *oes;
859 	struct i40e_eth_stats *es;     /* device's eth stats */
860 	u32 tx_restart, tx_busy;
861 	struct i40e_ring *p;
862 	u32 rx_page, rx_buf;
863 	u64 bytes, packets;
864 	unsigned int start;
865 	u64 rx_p, rx_b;
866 	u64 tx_p, tx_b;
867 	u16 q;
868 
869 	if (test_bit(__I40E_DOWN, &vsi->state) ||
870 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
871 		return;
872 
873 	ns = i40e_get_vsi_stats_struct(vsi);
874 	ons = &vsi->net_stats_offsets;
875 	es = &vsi->eth_stats;
876 	oes = &vsi->eth_stats_offsets;
877 
878 	/* Gather up the netdev and vsi stats that the driver collects
879 	 * on the fly during packet processing
880 	 */
881 	rx_b = rx_p = 0;
882 	tx_b = tx_p = 0;
883 	tx_restart = tx_busy = 0;
884 	rx_page = 0;
885 	rx_buf = 0;
886 	rcu_read_lock();
887 	for (q = 0; q < vsi->num_queue_pairs; q++) {
888 		/* locate Tx ring */
889 		p = ACCESS_ONCE(vsi->tx_rings[q]);
890 
891 		do {
892 			start = u64_stats_fetch_begin_irq(&p->syncp);
893 			packets = p->stats.packets;
894 			bytes = p->stats.bytes;
895 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
896 		tx_b += bytes;
897 		tx_p += packets;
898 		tx_restart += p->tx_stats.restart_queue;
899 		tx_busy += p->tx_stats.tx_busy;
900 
901 		/* Rx queue is part of the same block as Tx queue */
902 		p = &p[1];
903 		do {
904 			start = u64_stats_fetch_begin_irq(&p->syncp);
905 			packets = p->stats.packets;
906 			bytes = p->stats.bytes;
907 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
908 		rx_b += bytes;
909 		rx_p += packets;
910 		rx_buf += p->rx_stats.alloc_buff_failed;
911 		rx_page += p->rx_stats.alloc_page_failed;
912 	}
913 	rcu_read_unlock();
914 	vsi->tx_restart = tx_restart;
915 	vsi->tx_busy = tx_busy;
916 	vsi->rx_page_failed = rx_page;
917 	vsi->rx_buf_failed = rx_buf;
918 
919 	ns->rx_packets = rx_p;
920 	ns->rx_bytes = rx_b;
921 	ns->tx_packets = tx_p;
922 	ns->tx_bytes = tx_b;
923 
924 	/* update netdev stats from eth stats */
925 	i40e_update_eth_stats(vsi);
926 	ons->tx_errors = oes->tx_errors;
927 	ns->tx_errors = es->tx_errors;
928 	ons->multicast = oes->rx_multicast;
929 	ns->multicast = es->rx_multicast;
930 	ons->rx_dropped = oes->rx_discards;
931 	ns->rx_dropped = es->rx_discards;
932 	ons->tx_dropped = oes->tx_discards;
933 	ns->tx_dropped = es->tx_discards;
934 
935 	/* pull in a couple PF stats if this is the main vsi */
936 	if (vsi == pf->vsi[pf->lan_vsi]) {
937 		ns->rx_crc_errors = pf->stats.crc_errors;
938 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
939 		ns->rx_length_errors = pf->stats.rx_length_errors;
940 	}
941 }
942 
943 /**
944  * i40e_update_pf_stats - Update the PF statistics counters.
945  * @pf: the PF to be updated
946  **/
947 static void i40e_update_pf_stats(struct i40e_pf *pf)
948 {
949 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
950 	struct i40e_hw_port_stats *nsd = &pf->stats;
951 	struct i40e_hw *hw = &pf->hw;
952 	u32 val;
953 	int i;
954 
955 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
956 			   I40E_GLPRT_GORCL(hw->port),
957 			   pf->stat_offsets_loaded,
958 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
959 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
960 			   I40E_GLPRT_GOTCL(hw->port),
961 			   pf->stat_offsets_loaded,
962 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
963 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
964 			   pf->stat_offsets_loaded,
965 			   &osd->eth.rx_discards,
966 			   &nsd->eth.rx_discards);
967 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
968 			   I40E_GLPRT_UPRCL(hw->port),
969 			   pf->stat_offsets_loaded,
970 			   &osd->eth.rx_unicast,
971 			   &nsd->eth.rx_unicast);
972 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
973 			   I40E_GLPRT_MPRCL(hw->port),
974 			   pf->stat_offsets_loaded,
975 			   &osd->eth.rx_multicast,
976 			   &nsd->eth.rx_multicast);
977 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
978 			   I40E_GLPRT_BPRCL(hw->port),
979 			   pf->stat_offsets_loaded,
980 			   &osd->eth.rx_broadcast,
981 			   &nsd->eth.rx_broadcast);
982 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
983 			   I40E_GLPRT_UPTCL(hw->port),
984 			   pf->stat_offsets_loaded,
985 			   &osd->eth.tx_unicast,
986 			   &nsd->eth.tx_unicast);
987 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
988 			   I40E_GLPRT_MPTCL(hw->port),
989 			   pf->stat_offsets_loaded,
990 			   &osd->eth.tx_multicast,
991 			   &nsd->eth.tx_multicast);
992 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
993 			   I40E_GLPRT_BPTCL(hw->port),
994 			   pf->stat_offsets_loaded,
995 			   &osd->eth.tx_broadcast,
996 			   &nsd->eth.tx_broadcast);
997 
998 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
999 			   pf->stat_offsets_loaded,
1000 			   &osd->tx_dropped_link_down,
1001 			   &nsd->tx_dropped_link_down);
1002 
1003 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1004 			   pf->stat_offsets_loaded,
1005 			   &osd->crc_errors, &nsd->crc_errors);
1006 
1007 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1008 			   pf->stat_offsets_loaded,
1009 			   &osd->illegal_bytes, &nsd->illegal_bytes);
1010 
1011 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1012 			   pf->stat_offsets_loaded,
1013 			   &osd->mac_local_faults,
1014 			   &nsd->mac_local_faults);
1015 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1016 			   pf->stat_offsets_loaded,
1017 			   &osd->mac_remote_faults,
1018 			   &nsd->mac_remote_faults);
1019 
1020 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1021 			   pf->stat_offsets_loaded,
1022 			   &osd->rx_length_errors,
1023 			   &nsd->rx_length_errors);
1024 
1025 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1026 			   pf->stat_offsets_loaded,
1027 			   &osd->link_xon_rx, &nsd->link_xon_rx);
1028 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1029 			   pf->stat_offsets_loaded,
1030 			   &osd->link_xon_tx, &nsd->link_xon_tx);
1031 	i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
1032 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1033 			   pf->stat_offsets_loaded,
1034 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
1035 
1036 	for (i = 0; i < 8; i++) {
1037 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1038 				   pf->stat_offsets_loaded,
1039 				   &osd->priority_xon_rx[i],
1040 				   &nsd->priority_xon_rx[i]);
1041 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1042 				   pf->stat_offsets_loaded,
1043 				   &osd->priority_xon_tx[i],
1044 				   &nsd->priority_xon_tx[i]);
1045 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1046 				   pf->stat_offsets_loaded,
1047 				   &osd->priority_xoff_tx[i],
1048 				   &nsd->priority_xoff_tx[i]);
1049 		i40e_stat_update32(hw,
1050 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1051 				   pf->stat_offsets_loaded,
1052 				   &osd->priority_xon_2_xoff[i],
1053 				   &nsd->priority_xon_2_xoff[i]);
1054 	}
1055 
1056 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1057 			   I40E_GLPRT_PRC64L(hw->port),
1058 			   pf->stat_offsets_loaded,
1059 			   &osd->rx_size_64, &nsd->rx_size_64);
1060 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1061 			   I40E_GLPRT_PRC127L(hw->port),
1062 			   pf->stat_offsets_loaded,
1063 			   &osd->rx_size_127, &nsd->rx_size_127);
1064 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1065 			   I40E_GLPRT_PRC255L(hw->port),
1066 			   pf->stat_offsets_loaded,
1067 			   &osd->rx_size_255, &nsd->rx_size_255);
1068 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1069 			   I40E_GLPRT_PRC511L(hw->port),
1070 			   pf->stat_offsets_loaded,
1071 			   &osd->rx_size_511, &nsd->rx_size_511);
1072 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1073 			   I40E_GLPRT_PRC1023L(hw->port),
1074 			   pf->stat_offsets_loaded,
1075 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1076 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1077 			   I40E_GLPRT_PRC1522L(hw->port),
1078 			   pf->stat_offsets_loaded,
1079 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1080 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1081 			   I40E_GLPRT_PRC9522L(hw->port),
1082 			   pf->stat_offsets_loaded,
1083 			   &osd->rx_size_big, &nsd->rx_size_big);
1084 
1085 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1086 			   I40E_GLPRT_PTC64L(hw->port),
1087 			   pf->stat_offsets_loaded,
1088 			   &osd->tx_size_64, &nsd->tx_size_64);
1089 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1090 			   I40E_GLPRT_PTC127L(hw->port),
1091 			   pf->stat_offsets_loaded,
1092 			   &osd->tx_size_127, &nsd->tx_size_127);
1093 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1094 			   I40E_GLPRT_PTC255L(hw->port),
1095 			   pf->stat_offsets_loaded,
1096 			   &osd->tx_size_255, &nsd->tx_size_255);
1097 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1098 			   I40E_GLPRT_PTC511L(hw->port),
1099 			   pf->stat_offsets_loaded,
1100 			   &osd->tx_size_511, &nsd->tx_size_511);
1101 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1102 			   I40E_GLPRT_PTC1023L(hw->port),
1103 			   pf->stat_offsets_loaded,
1104 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1105 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1106 			   I40E_GLPRT_PTC1522L(hw->port),
1107 			   pf->stat_offsets_loaded,
1108 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1109 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1110 			   I40E_GLPRT_PTC9522L(hw->port),
1111 			   pf->stat_offsets_loaded,
1112 			   &osd->tx_size_big, &nsd->tx_size_big);
1113 
1114 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1115 			   pf->stat_offsets_loaded,
1116 			   &osd->rx_undersize, &nsd->rx_undersize);
1117 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1118 			   pf->stat_offsets_loaded,
1119 			   &osd->rx_fragments, &nsd->rx_fragments);
1120 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1121 			   pf->stat_offsets_loaded,
1122 			   &osd->rx_oversize, &nsd->rx_oversize);
1123 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1124 			   pf->stat_offsets_loaded,
1125 			   &osd->rx_jabber, &nsd->rx_jabber);
1126 
1127 	/* FDIR stats */
1128 	i40e_stat_update32(hw,
1129 			   I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1130 			   pf->stat_offsets_loaded,
1131 			   &osd->fd_atr_match, &nsd->fd_atr_match);
1132 	i40e_stat_update32(hw,
1133 			   I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1134 			   pf->stat_offsets_loaded,
1135 			   &osd->fd_sb_match, &nsd->fd_sb_match);
1136 	i40e_stat_update32(hw,
1137 		      I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1138 		      pf->stat_offsets_loaded,
1139 		      &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1140 
1141 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1142 	nsd->tx_lpi_status =
1143 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1144 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1145 	nsd->rx_lpi_status =
1146 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1147 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1148 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1149 			   pf->stat_offsets_loaded,
1150 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1151 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1152 			   pf->stat_offsets_loaded,
1153 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1154 
1155 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1156 	    !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1157 		nsd->fd_sb_status = true;
1158 	else
1159 		nsd->fd_sb_status = false;
1160 
1161 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1162 	    !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1163 		nsd->fd_atr_status = true;
1164 	else
1165 		nsd->fd_atr_status = false;
1166 
1167 	pf->stat_offsets_loaded = true;
1168 }
1169 
1170 /**
1171  * i40e_update_stats - Update the various statistics counters.
1172  * @vsi: the VSI to be updated
1173  *
1174  * Update the various stats for this VSI and its related entities.
1175  **/
1176 void i40e_update_stats(struct i40e_vsi *vsi)
1177 {
1178 	struct i40e_pf *pf = vsi->back;
1179 
1180 	if (vsi == pf->vsi[pf->lan_vsi])
1181 		i40e_update_pf_stats(pf);
1182 
1183 	i40e_update_vsi_stats(vsi);
1184 #ifdef I40E_FCOE
1185 	i40e_update_fcoe_stats(vsi);
1186 #endif
1187 }
1188 
1189 /**
1190  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1191  * @vsi: the VSI to be searched
1192  * @macaddr: the MAC address
1193  * @vlan: the vlan
1194  * @is_vf: make sure its a VF filter, else doesn't matter
1195  * @is_netdev: make sure its a netdev filter, else doesn't matter
1196  *
1197  * Returns ptr to the filter object or NULL
1198  **/
1199 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1200 						u8 *macaddr, s16 vlan,
1201 						bool is_vf, bool is_netdev)
1202 {
1203 	struct i40e_mac_filter *f;
1204 
1205 	if (!vsi || !macaddr)
1206 		return NULL;
1207 
1208 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1209 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1210 		    (vlan == f->vlan)    &&
1211 		    (!is_vf || f->is_vf) &&
1212 		    (!is_netdev || f->is_netdev))
1213 			return f;
1214 	}
1215 	return NULL;
1216 }
1217 
1218 /**
1219  * i40e_find_mac - Find a mac addr in the macvlan filters list
1220  * @vsi: the VSI to be searched
1221  * @macaddr: the MAC address we are searching for
1222  * @is_vf: make sure its a VF filter, else doesn't matter
1223  * @is_netdev: make sure its a netdev filter, else doesn't matter
1224  *
1225  * Returns the first filter with the provided MAC address or NULL if
1226  * MAC address was not found
1227  **/
1228 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1229 				      bool is_vf, bool is_netdev)
1230 {
1231 	struct i40e_mac_filter *f;
1232 
1233 	if (!vsi || !macaddr)
1234 		return NULL;
1235 
1236 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1237 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1238 		    (!is_vf || f->is_vf) &&
1239 		    (!is_netdev || f->is_netdev))
1240 			return f;
1241 	}
1242 	return NULL;
1243 }
1244 
1245 /**
1246  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1247  * @vsi: the VSI to be searched
1248  *
1249  * Returns true if VSI is in vlan mode or false otherwise
1250  **/
1251 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1252 {
1253 	struct i40e_mac_filter *f;
1254 
1255 	/* Only -1 for all the filters denotes not in vlan mode
1256 	 * so we have to go through all the list in order to make sure
1257 	 */
1258 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1259 		if (f->vlan >= 0)
1260 			return true;
1261 	}
1262 
1263 	return false;
1264 }
1265 
1266 /**
1267  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1268  * @vsi: the VSI to be searched
1269  * @macaddr: the mac address to be filtered
1270  * @is_vf: true if it is a VF
1271  * @is_netdev: true if it is a netdev
1272  *
1273  * Goes through all the macvlan filters and adds a
1274  * macvlan filter for each unique vlan that already exists
1275  *
1276  * Returns first filter found on success, else NULL
1277  **/
1278 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1279 					     bool is_vf, bool is_netdev)
1280 {
1281 	struct i40e_mac_filter *f;
1282 
1283 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1284 		if (vsi->info.pvid)
1285 			f->vlan = le16_to_cpu(vsi->info.pvid);
1286 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
1287 				      is_vf, is_netdev)) {
1288 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
1289 					     is_vf, is_netdev))
1290 				return NULL;
1291 		}
1292 	}
1293 
1294 	return list_first_entry_or_null(&vsi->mac_filter_list,
1295 					struct i40e_mac_filter, list);
1296 }
1297 
1298 /**
1299  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1300  * @vsi: the PF Main VSI - inappropriate for any other VSI
1301  * @macaddr: the MAC address
1302  *
1303  * Some older firmware configurations set up a default promiscuous VLAN
1304  * filter that needs to be removed.
1305  **/
1306 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1307 {
1308 	struct i40e_aqc_remove_macvlan_element_data element;
1309 	struct i40e_pf *pf = vsi->back;
1310 	i40e_status ret;
1311 
1312 	/* Only appropriate for the PF main VSI */
1313 	if (vsi->type != I40E_VSI_MAIN)
1314 		return -EINVAL;
1315 
1316 	memset(&element, 0, sizeof(element));
1317 	ether_addr_copy(element.mac_addr, macaddr);
1318 	element.vlan_tag = 0;
1319 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1320 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1321 	ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1322 	if (ret)
1323 		return -ENOENT;
1324 
1325 	return 0;
1326 }
1327 
1328 /**
1329  * i40e_add_filter - Add a mac/vlan filter to the VSI
1330  * @vsi: the VSI to be searched
1331  * @macaddr: the MAC address
1332  * @vlan: the vlan
1333  * @is_vf: make sure its a VF filter, else doesn't matter
1334  * @is_netdev: make sure its a netdev filter, else doesn't matter
1335  *
1336  * Returns ptr to the filter object or NULL when no memory available.
1337  **/
1338 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1339 					u8 *macaddr, s16 vlan,
1340 					bool is_vf, bool is_netdev)
1341 {
1342 	struct i40e_mac_filter *f;
1343 
1344 	if (!vsi || !macaddr)
1345 		return NULL;
1346 
1347 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1348 	if (!f) {
1349 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1350 		if (!f)
1351 			goto add_filter_out;
1352 
1353 		ether_addr_copy(f->macaddr, macaddr);
1354 		f->vlan = vlan;
1355 		f->changed = true;
1356 
1357 		INIT_LIST_HEAD(&f->list);
1358 		list_add(&f->list, &vsi->mac_filter_list);
1359 	}
1360 
1361 	/* increment counter and add a new flag if needed */
1362 	if (is_vf) {
1363 		if (!f->is_vf) {
1364 			f->is_vf = true;
1365 			f->counter++;
1366 		}
1367 	} else if (is_netdev) {
1368 		if (!f->is_netdev) {
1369 			f->is_netdev = true;
1370 			f->counter++;
1371 		}
1372 	} else {
1373 		f->counter++;
1374 	}
1375 
1376 	/* changed tells sync_filters_subtask to
1377 	 * push the filter down to the firmware
1378 	 */
1379 	if (f->changed) {
1380 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1381 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1382 	}
1383 
1384 add_filter_out:
1385 	return f;
1386 }
1387 
1388 /**
1389  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1390  * @vsi: the VSI to be searched
1391  * @macaddr: the MAC address
1392  * @vlan: the vlan
1393  * @is_vf: make sure it's a VF filter, else doesn't matter
1394  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1395  **/
1396 void i40e_del_filter(struct i40e_vsi *vsi,
1397 		     u8 *macaddr, s16 vlan,
1398 		     bool is_vf, bool is_netdev)
1399 {
1400 	struct i40e_mac_filter *f;
1401 
1402 	if (!vsi || !macaddr)
1403 		return;
1404 
1405 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1406 	if (!f || f->counter == 0)
1407 		return;
1408 
1409 	if (is_vf) {
1410 		if (f->is_vf) {
1411 			f->is_vf = false;
1412 			f->counter--;
1413 		}
1414 	} else if (is_netdev) {
1415 		if (f->is_netdev) {
1416 			f->is_netdev = false;
1417 			f->counter--;
1418 		}
1419 	} else {
1420 		/* make sure we don't remove a filter in use by VF or netdev */
1421 		int min_f = 0;
1422 		min_f += (f->is_vf ? 1 : 0);
1423 		min_f += (f->is_netdev ? 1 : 0);
1424 
1425 		if (f->counter > min_f)
1426 			f->counter--;
1427 	}
1428 
1429 	/* counter == 0 tells sync_filters_subtask to
1430 	 * remove the filter from the firmware's list
1431 	 */
1432 	if (f->counter == 0) {
1433 		f->changed = true;
1434 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1435 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1436 	}
1437 }
1438 
1439 /**
1440  * i40e_set_mac - NDO callback to set mac address
1441  * @netdev: network interface device structure
1442  * @p: pointer to an address structure
1443  *
1444  * Returns 0 on success, negative on failure
1445  **/
1446 #ifdef I40E_FCOE
1447 int i40e_set_mac(struct net_device *netdev, void *p)
1448 #else
1449 static int i40e_set_mac(struct net_device *netdev, void *p)
1450 #endif
1451 {
1452 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1453 	struct i40e_vsi *vsi = np->vsi;
1454 	struct i40e_pf *pf = vsi->back;
1455 	struct i40e_hw *hw = &pf->hw;
1456 	struct sockaddr *addr = p;
1457 	struct i40e_mac_filter *f;
1458 
1459 	if (!is_valid_ether_addr(addr->sa_data))
1460 		return -EADDRNOTAVAIL;
1461 
1462 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1463 		netdev_info(netdev, "already using mac address %pM\n",
1464 			    addr->sa_data);
1465 		return 0;
1466 	}
1467 
1468 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1469 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1470 		return -EADDRNOTAVAIL;
1471 
1472 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1473 		netdev_info(netdev, "returning to hw mac address %pM\n",
1474 			    hw->mac.addr);
1475 	else
1476 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1477 
1478 	if (vsi->type == I40E_VSI_MAIN) {
1479 		i40e_status ret;
1480 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
1481 						I40E_AQC_WRITE_TYPE_LAA_WOL,
1482 						addr->sa_data, NULL);
1483 		if (ret) {
1484 			netdev_info(netdev,
1485 				    "Addr change for Main VSI failed: %d\n",
1486 				    ret);
1487 			return -EADDRNOTAVAIL;
1488 		}
1489 	}
1490 
1491 	if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1492 		struct i40e_aqc_remove_macvlan_element_data element;
1493 
1494 		memset(&element, 0, sizeof(element));
1495 		ether_addr_copy(element.mac_addr, netdev->dev_addr);
1496 		element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1497 		i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1498 	} else {
1499 		i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1500 				false, false);
1501 	}
1502 
1503 	if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1504 		struct i40e_aqc_add_macvlan_element_data element;
1505 
1506 		memset(&element, 0, sizeof(element));
1507 		ether_addr_copy(element.mac_addr, hw->mac.addr);
1508 		element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1509 		i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1510 	} else {
1511 		f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1512 				    false, false);
1513 		if (f)
1514 			f->is_laa = true;
1515 	}
1516 
1517 	i40e_sync_vsi_filters(vsi);
1518 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1519 
1520 	return 0;
1521 }
1522 
1523 /**
1524  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1525  * @vsi: the VSI being setup
1526  * @ctxt: VSI context structure
1527  * @enabled_tc: Enabled TCs bitmap
1528  * @is_add: True if called before Add VSI
1529  *
1530  * Setup VSI queue mapping for enabled traffic classes.
1531  **/
1532 #ifdef I40E_FCOE
1533 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1534 			      struct i40e_vsi_context *ctxt,
1535 			      u8 enabled_tc,
1536 			      bool is_add)
1537 #else
1538 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1539 				     struct i40e_vsi_context *ctxt,
1540 				     u8 enabled_tc,
1541 				     bool is_add)
1542 #endif
1543 {
1544 	struct i40e_pf *pf = vsi->back;
1545 	u16 sections = 0;
1546 	u8 netdev_tc = 0;
1547 	u16 numtc = 0;
1548 	u16 qcount;
1549 	u8 offset;
1550 	u16 qmap;
1551 	int i;
1552 	u16 num_tc_qps = 0;
1553 
1554 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1555 	offset = 0;
1556 
1557 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1558 		/* Find numtc from enabled TC bitmap */
1559 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1560 			if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
1561 				numtc++;
1562 		}
1563 		if (!numtc) {
1564 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1565 			numtc = 1;
1566 		}
1567 	} else {
1568 		/* At least TC0 is enabled in case of non-DCB case */
1569 		numtc = 1;
1570 	}
1571 
1572 	vsi->tc_config.numtc = numtc;
1573 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1574 	/* Number of queues per enabled TC */
1575 	/* In MFP case we can have a much lower count of MSIx
1576 	 * vectors available and so we need to lower the used
1577 	 * q count.
1578 	 */
1579 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1580 		qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1581 	else
1582 		qcount = vsi->alloc_queue_pairs;
1583 	num_tc_qps = qcount / numtc;
1584 	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1585 
1586 	/* Setup queue offset/count for all TCs for given VSI */
1587 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1588 		/* See if the given TC is enabled for the given VSI */
1589 		if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
1590 			/* TC is enabled */
1591 			int pow, num_qps;
1592 
1593 			switch (vsi->type) {
1594 			case I40E_VSI_MAIN:
1595 				qcount = min_t(int, pf->rss_size, num_tc_qps);
1596 				break;
1597 #ifdef I40E_FCOE
1598 			case I40E_VSI_FCOE:
1599 				qcount = num_tc_qps;
1600 				break;
1601 #endif
1602 			case I40E_VSI_FDIR:
1603 			case I40E_VSI_SRIOV:
1604 			case I40E_VSI_VMDQ2:
1605 			default:
1606 				qcount = num_tc_qps;
1607 				WARN_ON(i != 0);
1608 				break;
1609 			}
1610 			vsi->tc_config.tc_info[i].qoffset = offset;
1611 			vsi->tc_config.tc_info[i].qcount = qcount;
1612 
1613 			/* find the next higher power-of-2 of num queue pairs */
1614 			num_qps = qcount;
1615 			pow = 0;
1616 			while (num_qps && (BIT_ULL(pow) < qcount)) {
1617 				pow++;
1618 				num_qps >>= 1;
1619 			}
1620 
1621 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1622 			qmap =
1623 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1624 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1625 
1626 			offset += qcount;
1627 		} else {
1628 			/* TC is not enabled so set the offset to
1629 			 * default queue and allocate one queue
1630 			 * for the given TC.
1631 			 */
1632 			vsi->tc_config.tc_info[i].qoffset = 0;
1633 			vsi->tc_config.tc_info[i].qcount = 1;
1634 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1635 
1636 			qmap = 0;
1637 		}
1638 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1639 	}
1640 
1641 	/* Set actual Tx/Rx queue pairs */
1642 	vsi->num_queue_pairs = offset;
1643 	if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1644 		if (vsi->req_queue_pairs > 0)
1645 			vsi->num_queue_pairs = vsi->req_queue_pairs;
1646 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1647 			vsi->num_queue_pairs = pf->num_lan_msix;
1648 	}
1649 
1650 	/* Scheduler section valid can only be set for ADD VSI */
1651 	if (is_add) {
1652 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1653 
1654 		ctxt->info.up_enable_bits = enabled_tc;
1655 	}
1656 	if (vsi->type == I40E_VSI_SRIOV) {
1657 		ctxt->info.mapping_flags |=
1658 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1659 		for (i = 0; i < vsi->num_queue_pairs; i++)
1660 			ctxt->info.queue_mapping[i] =
1661 					       cpu_to_le16(vsi->base_queue + i);
1662 	} else {
1663 		ctxt->info.mapping_flags |=
1664 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1665 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1666 	}
1667 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1668 }
1669 
1670 /**
1671  * i40e_set_rx_mode - NDO callback to set the netdev filters
1672  * @netdev: network interface device structure
1673  **/
1674 #ifdef I40E_FCOE
1675 void i40e_set_rx_mode(struct net_device *netdev)
1676 #else
1677 static void i40e_set_rx_mode(struct net_device *netdev)
1678 #endif
1679 {
1680 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1681 	struct i40e_mac_filter *f, *ftmp;
1682 	struct i40e_vsi *vsi = np->vsi;
1683 	struct netdev_hw_addr *uca;
1684 	struct netdev_hw_addr *mca;
1685 	struct netdev_hw_addr *ha;
1686 
1687 	/* add addr if not already in the filter list */
1688 	netdev_for_each_uc_addr(uca, netdev) {
1689 		if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1690 			if (i40e_is_vsi_in_vlan(vsi))
1691 				i40e_put_mac_in_vlan(vsi, uca->addr,
1692 						     false, true);
1693 			else
1694 				i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1695 						false, true);
1696 		}
1697 	}
1698 
1699 	netdev_for_each_mc_addr(mca, netdev) {
1700 		if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1701 			if (i40e_is_vsi_in_vlan(vsi))
1702 				i40e_put_mac_in_vlan(vsi, mca->addr,
1703 						     false, true);
1704 			else
1705 				i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1706 						false, true);
1707 		}
1708 	}
1709 
1710 	/* remove filter if not in netdev list */
1711 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1712 		bool found = false;
1713 
1714 		if (!f->is_netdev)
1715 			continue;
1716 
1717 		if (is_multicast_ether_addr(f->macaddr)) {
1718 			netdev_for_each_mc_addr(mca, netdev) {
1719 				if (ether_addr_equal(mca->addr, f->macaddr)) {
1720 					found = true;
1721 					break;
1722 				}
1723 			}
1724 		} else {
1725 			netdev_for_each_uc_addr(uca, netdev) {
1726 				if (ether_addr_equal(uca->addr, f->macaddr)) {
1727 					found = true;
1728 					break;
1729 				}
1730 			}
1731 
1732 			for_each_dev_addr(netdev, ha) {
1733 				if (ether_addr_equal(ha->addr, f->macaddr)) {
1734 					found = true;
1735 					break;
1736 				}
1737 			}
1738 		}
1739 		if (!found)
1740 			i40e_del_filter(
1741 			   vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1742 	}
1743 
1744 	/* check for other flag changes */
1745 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
1746 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1747 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1748 	}
1749 }
1750 
1751 /**
1752  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1753  * @vsi: ptr to the VSI
1754  *
1755  * Push any outstanding VSI filter changes through the AdminQ.
1756  *
1757  * Returns 0 or error value
1758  **/
1759 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1760 {
1761 	struct i40e_mac_filter *f, *ftmp;
1762 	bool promisc_forced_on = false;
1763 	bool add_happened = false;
1764 	int filter_list_len = 0;
1765 	u32 changed_flags = 0;
1766 	i40e_status ret = 0;
1767 	struct i40e_pf *pf;
1768 	int num_add = 0;
1769 	int num_del = 0;
1770 	int aq_err = 0;
1771 	u16 cmd_flags;
1772 
1773 	/* empty array typed pointers, kcalloc later */
1774 	struct i40e_aqc_add_macvlan_element_data *add_list;
1775 	struct i40e_aqc_remove_macvlan_element_data *del_list;
1776 
1777 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1778 		usleep_range(1000, 2000);
1779 	pf = vsi->back;
1780 
1781 	if (vsi->netdev) {
1782 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1783 		vsi->current_netdev_flags = vsi->netdev->flags;
1784 	}
1785 
1786 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1787 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1788 
1789 		filter_list_len = pf->hw.aq.asq_buf_size /
1790 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
1791 		del_list = kcalloc(filter_list_len,
1792 			    sizeof(struct i40e_aqc_remove_macvlan_element_data),
1793 			    GFP_KERNEL);
1794 		if (!del_list)
1795 			return -ENOMEM;
1796 
1797 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1798 			if (!f->changed)
1799 				continue;
1800 
1801 			if (f->counter != 0)
1802 				continue;
1803 			f->changed = false;
1804 			cmd_flags = 0;
1805 
1806 			/* add to delete list */
1807 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1808 			del_list[num_del].vlan_tag =
1809 				cpu_to_le16((u16)(f->vlan ==
1810 					    I40E_VLAN_ANY ? 0 : f->vlan));
1811 
1812 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1813 			del_list[num_del].flags = cmd_flags;
1814 			num_del++;
1815 
1816 			/* unlink from filter list */
1817 			list_del(&f->list);
1818 			kfree(f);
1819 
1820 			/* flush a full buffer */
1821 			if (num_del == filter_list_len) {
1822 				ret = i40e_aq_remove_macvlan(&pf->hw,
1823 						  vsi->seid, del_list, num_del,
1824 						  NULL);
1825 				aq_err = pf->hw.aq.asq_last_status;
1826 				num_del = 0;
1827 				memset(del_list, 0, sizeof(*del_list));
1828 
1829 				if (ret && aq_err != I40E_AQ_RC_ENOENT)
1830 					dev_info(&pf->pdev->dev,
1831 						 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1832 						 i40e_stat_str(&pf->hw, ret),
1833 						 i40e_aq_str(&pf->hw, aq_err));
1834 			}
1835 		}
1836 		if (num_del) {
1837 			ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1838 						     del_list, num_del, NULL);
1839 			aq_err = pf->hw.aq.asq_last_status;
1840 			num_del = 0;
1841 
1842 			if (ret && aq_err != I40E_AQ_RC_ENOENT)
1843 				dev_info(&pf->pdev->dev,
1844 					 "ignoring delete macvlan error, err %s aq_err %s\n",
1845 					 i40e_stat_str(&pf->hw, ret),
1846 					 i40e_aq_str(&pf->hw, aq_err));
1847 		}
1848 
1849 		kfree(del_list);
1850 		del_list = NULL;
1851 
1852 		/* do all the adds now */
1853 		filter_list_len = pf->hw.aq.asq_buf_size /
1854 			       sizeof(struct i40e_aqc_add_macvlan_element_data),
1855 		add_list = kcalloc(filter_list_len,
1856 			       sizeof(struct i40e_aqc_add_macvlan_element_data),
1857 			       GFP_KERNEL);
1858 		if (!add_list)
1859 			return -ENOMEM;
1860 
1861 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1862 			if (!f->changed)
1863 				continue;
1864 
1865 			if (f->counter == 0)
1866 				continue;
1867 			f->changed = false;
1868 			add_happened = true;
1869 			cmd_flags = 0;
1870 
1871 			/* add to add array */
1872 			ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1873 			add_list[num_add].vlan_tag =
1874 				cpu_to_le16(
1875 				 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1876 			add_list[num_add].queue_number = 0;
1877 
1878 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1879 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
1880 			num_add++;
1881 
1882 			/* flush a full buffer */
1883 			if (num_add == filter_list_len) {
1884 				ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1885 							  add_list, num_add,
1886 							  NULL);
1887 				aq_err = pf->hw.aq.asq_last_status;
1888 				num_add = 0;
1889 
1890 				if (ret)
1891 					break;
1892 				memset(add_list, 0, sizeof(*add_list));
1893 			}
1894 		}
1895 		if (num_add) {
1896 			ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1897 						  add_list, num_add, NULL);
1898 			aq_err = pf->hw.aq.asq_last_status;
1899 			num_add = 0;
1900 		}
1901 		kfree(add_list);
1902 		add_list = NULL;
1903 
1904 		if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
1905 			dev_info(&pf->pdev->dev,
1906 				 "add filter failed, err %s aq_err %s\n",
1907 				 i40e_stat_str(&pf->hw, ret),
1908 				 i40e_aq_str(&pf->hw, aq_err));
1909 			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1910 			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1911 				      &vsi->state)) {
1912 				promisc_forced_on = true;
1913 				set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1914 					&vsi->state);
1915 				dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1916 			}
1917 		}
1918 	}
1919 
1920 	/* check for changes in promiscuous modes */
1921 	if (changed_flags & IFF_ALLMULTI) {
1922 		bool cur_multipromisc;
1923 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1924 		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1925 							    vsi->seid,
1926 							    cur_multipromisc,
1927 							    NULL);
1928 		if (ret)
1929 			dev_info(&pf->pdev->dev,
1930 				 "set multi promisc failed, err %s aq_err %s\n",
1931 				 i40e_stat_str(&pf->hw, ret),
1932 				 i40e_aq_str(&pf->hw,
1933 					     pf->hw.aq.asq_last_status));
1934 	}
1935 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1936 		bool cur_promisc;
1937 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1938 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1939 					&vsi->state));
1940 		if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
1941 			/* set defport ON for Main VSI instead of true promisc
1942 			 * this way we will get all unicast/multicast and VLAN
1943 			 * promisc behavior but will not get VF or VMDq traffic
1944 			 * replicated on the Main VSI.
1945 			 */
1946 			if (pf->cur_promisc != cur_promisc) {
1947 				pf->cur_promisc = cur_promisc;
1948 				i40e_do_reset_safe(pf,
1949 						BIT(__I40E_PF_RESET_REQUESTED));
1950 			}
1951 		} else {
1952 			ret = i40e_aq_set_vsi_unicast_promiscuous(
1953 							  &vsi->back->hw,
1954 							  vsi->seid,
1955 							  cur_promisc, NULL);
1956 			if (ret)
1957 				dev_info(&pf->pdev->dev,
1958 					 "set unicast promisc failed, err %d, aq_err %d\n",
1959 					 ret, pf->hw.aq.asq_last_status);
1960 			ret = i40e_aq_set_vsi_multicast_promiscuous(
1961 							  &vsi->back->hw,
1962 							  vsi->seid,
1963 							  cur_promisc, NULL);
1964 			if (ret)
1965 				dev_info(&pf->pdev->dev,
1966 					 "set multicast promisc failed, err %d, aq_err %d\n",
1967 					 ret, pf->hw.aq.asq_last_status);
1968 		}
1969 		ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1970 						vsi->seid,
1971 						cur_promisc, NULL);
1972 		if (ret)
1973 			dev_info(&pf->pdev->dev,
1974 				 "set brdcast promisc failed, err %s, aq_err %s\n",
1975 				 i40e_stat_str(&pf->hw, ret),
1976 				 i40e_aq_str(&pf->hw,
1977 					     pf->hw.aq.asq_last_status));
1978 	}
1979 
1980 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1981 	return 0;
1982 }
1983 
1984 /**
1985  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1986  * @pf: board private structure
1987  **/
1988 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1989 {
1990 	int v;
1991 
1992 	if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1993 		return;
1994 	pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1995 
1996 	for (v = 0; v < pf->num_alloc_vsi; v++) {
1997 		if (pf->vsi[v] &&
1998 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1999 			i40e_sync_vsi_filters(pf->vsi[v]);
2000 	}
2001 }
2002 
2003 /**
2004  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2005  * @netdev: network interface device structure
2006  * @new_mtu: new value for maximum frame size
2007  *
2008  * Returns 0 on success, negative on failure
2009  **/
2010 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2011 {
2012 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2013 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2014 	struct i40e_vsi *vsi = np->vsi;
2015 
2016 	/* MTU < 68 is an error and causes problems on some kernels */
2017 	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2018 		return -EINVAL;
2019 
2020 	netdev_info(netdev, "changing MTU from %d to %d\n",
2021 		    netdev->mtu, new_mtu);
2022 	netdev->mtu = new_mtu;
2023 	if (netif_running(netdev))
2024 		i40e_vsi_reinit_locked(vsi);
2025 
2026 	return 0;
2027 }
2028 
2029 /**
2030  * i40e_ioctl - Access the hwtstamp interface
2031  * @netdev: network interface device structure
2032  * @ifr: interface request data
2033  * @cmd: ioctl command
2034  **/
2035 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2036 {
2037 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2038 	struct i40e_pf *pf = np->vsi->back;
2039 
2040 	switch (cmd) {
2041 	case SIOCGHWTSTAMP:
2042 		return i40e_ptp_get_ts_config(pf, ifr);
2043 	case SIOCSHWTSTAMP:
2044 		return i40e_ptp_set_ts_config(pf, ifr);
2045 	default:
2046 		return -EOPNOTSUPP;
2047 	}
2048 }
2049 
2050 /**
2051  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2052  * @vsi: the vsi being adjusted
2053  **/
2054 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2055 {
2056 	struct i40e_vsi_context ctxt;
2057 	i40e_status ret;
2058 
2059 	if ((vsi->info.valid_sections &
2060 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2061 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2062 		return;  /* already enabled */
2063 
2064 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2065 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2066 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2067 
2068 	ctxt.seid = vsi->seid;
2069 	ctxt.info = vsi->info;
2070 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2071 	if (ret) {
2072 		dev_info(&vsi->back->pdev->dev,
2073 			 "update vlan stripping failed, err %s aq_err %s\n",
2074 			 i40e_stat_str(&vsi->back->hw, ret),
2075 			 i40e_aq_str(&vsi->back->hw,
2076 				     vsi->back->hw.aq.asq_last_status));
2077 	}
2078 }
2079 
2080 /**
2081  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2082  * @vsi: the vsi being adjusted
2083  **/
2084 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2085 {
2086 	struct i40e_vsi_context ctxt;
2087 	i40e_status ret;
2088 
2089 	if ((vsi->info.valid_sections &
2090 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2091 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2092 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
2093 		return;  /* already disabled */
2094 
2095 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2096 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2097 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2098 
2099 	ctxt.seid = vsi->seid;
2100 	ctxt.info = vsi->info;
2101 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2102 	if (ret) {
2103 		dev_info(&vsi->back->pdev->dev,
2104 			 "update vlan stripping failed, err %s aq_err %s\n",
2105 			 i40e_stat_str(&vsi->back->hw, ret),
2106 			 i40e_aq_str(&vsi->back->hw,
2107 				     vsi->back->hw.aq.asq_last_status));
2108 	}
2109 }
2110 
2111 /**
2112  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2113  * @netdev: network interface to be adjusted
2114  * @features: netdev features to test if VLAN offload is enabled or not
2115  **/
2116 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2117 {
2118 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2119 	struct i40e_vsi *vsi = np->vsi;
2120 
2121 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2122 		i40e_vlan_stripping_enable(vsi);
2123 	else
2124 		i40e_vlan_stripping_disable(vsi);
2125 }
2126 
2127 /**
2128  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2129  * @vsi: the vsi being configured
2130  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2131  **/
2132 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2133 {
2134 	struct i40e_mac_filter *f, *add_f;
2135 	bool is_netdev, is_vf;
2136 
2137 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2138 	is_netdev = !!(vsi->netdev);
2139 
2140 	if (is_netdev) {
2141 		add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2142 					is_vf, is_netdev);
2143 		if (!add_f) {
2144 			dev_info(&vsi->back->pdev->dev,
2145 				 "Could not add vlan filter %d for %pM\n",
2146 				 vid, vsi->netdev->dev_addr);
2147 			return -ENOMEM;
2148 		}
2149 	}
2150 
2151 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
2152 		add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2153 		if (!add_f) {
2154 			dev_info(&vsi->back->pdev->dev,
2155 				 "Could not add vlan filter %d for %pM\n",
2156 				 vid, f->macaddr);
2157 			return -ENOMEM;
2158 		}
2159 	}
2160 
2161 	/* Now if we add a vlan tag, make sure to check if it is the first
2162 	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2163 	 * with 0, so we now accept untagged and specified tagged traffic
2164 	 * (and not any taged and untagged)
2165 	 */
2166 	if (vid > 0) {
2167 		if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2168 						  I40E_VLAN_ANY,
2169 						  is_vf, is_netdev)) {
2170 			i40e_del_filter(vsi, vsi->netdev->dev_addr,
2171 					I40E_VLAN_ANY, is_vf, is_netdev);
2172 			add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2173 						is_vf, is_netdev);
2174 			if (!add_f) {
2175 				dev_info(&vsi->back->pdev->dev,
2176 					 "Could not add filter 0 for %pM\n",
2177 					 vsi->netdev->dev_addr);
2178 				return -ENOMEM;
2179 			}
2180 		}
2181 	}
2182 
2183 	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2184 	if (vid > 0 && !vsi->info.pvid) {
2185 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
2186 			if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2187 					     is_vf, is_netdev)) {
2188 				i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2189 						is_vf, is_netdev);
2190 				add_f = i40e_add_filter(vsi, f->macaddr,
2191 							0, is_vf, is_netdev);
2192 				if (!add_f) {
2193 					dev_info(&vsi->back->pdev->dev,
2194 						 "Could not add filter 0 for %pM\n",
2195 						 f->macaddr);
2196 					return -ENOMEM;
2197 				}
2198 			}
2199 		}
2200 	}
2201 
2202 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2203 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2204 		return 0;
2205 
2206 	return i40e_sync_vsi_filters(vsi);
2207 }
2208 
2209 /**
2210  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2211  * @vsi: the vsi being configured
2212  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2213  *
2214  * Return: 0 on success or negative otherwise
2215  **/
2216 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2217 {
2218 	struct net_device *netdev = vsi->netdev;
2219 	struct i40e_mac_filter *f, *add_f;
2220 	bool is_vf, is_netdev;
2221 	int filter_count = 0;
2222 
2223 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2224 	is_netdev = !!(netdev);
2225 
2226 	if (is_netdev)
2227 		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2228 
2229 	list_for_each_entry(f, &vsi->mac_filter_list, list)
2230 		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2231 
2232 	/* go through all the filters for this VSI and if there is only
2233 	 * vid == 0 it means there are no other filters, so vid 0 must
2234 	 * be replaced with -1. This signifies that we should from now
2235 	 * on accept any traffic (with any tag present, or untagged)
2236 	 */
2237 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
2238 		if (is_netdev) {
2239 			if (f->vlan &&
2240 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2241 				filter_count++;
2242 		}
2243 
2244 		if (f->vlan)
2245 			filter_count++;
2246 	}
2247 
2248 	if (!filter_count && is_netdev) {
2249 		i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2250 		f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2251 				    is_vf, is_netdev);
2252 		if (!f) {
2253 			dev_info(&vsi->back->pdev->dev,
2254 				 "Could not add filter %d for %pM\n",
2255 				 I40E_VLAN_ANY, netdev->dev_addr);
2256 			return -ENOMEM;
2257 		}
2258 	}
2259 
2260 	if (!filter_count) {
2261 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
2262 			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2263 			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2264 					    is_vf, is_netdev);
2265 			if (!add_f) {
2266 				dev_info(&vsi->back->pdev->dev,
2267 					 "Could not add filter %d for %pM\n",
2268 					 I40E_VLAN_ANY, f->macaddr);
2269 				return -ENOMEM;
2270 			}
2271 		}
2272 	}
2273 
2274 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2275 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2276 		return 0;
2277 
2278 	return i40e_sync_vsi_filters(vsi);
2279 }
2280 
2281 /**
2282  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2283  * @netdev: network interface to be adjusted
2284  * @vid: vlan id to be added
2285  *
2286  * net_device_ops implementation for adding vlan ids
2287  **/
2288 #ifdef I40E_FCOE
2289 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2290 			 __always_unused __be16 proto, u16 vid)
2291 #else
2292 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2293 				__always_unused __be16 proto, u16 vid)
2294 #endif
2295 {
2296 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2297 	struct i40e_vsi *vsi = np->vsi;
2298 	int ret = 0;
2299 
2300 	if (vid > 4095)
2301 		return -EINVAL;
2302 
2303 	netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2304 
2305 	/* If the network stack called us with vid = 0 then
2306 	 * it is asking to receive priority tagged packets with
2307 	 * vlan id 0.  Our HW receives them by default when configured
2308 	 * to receive untagged packets so there is no need to add an
2309 	 * extra filter for vlan 0 tagged packets.
2310 	 */
2311 	if (vid)
2312 		ret = i40e_vsi_add_vlan(vsi, vid);
2313 
2314 	if (!ret && (vid < VLAN_N_VID))
2315 		set_bit(vid, vsi->active_vlans);
2316 
2317 	return ret;
2318 }
2319 
2320 /**
2321  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2322  * @netdev: network interface to be adjusted
2323  * @vid: vlan id to be removed
2324  *
2325  * net_device_ops implementation for removing vlan ids
2326  **/
2327 #ifdef I40E_FCOE
2328 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2329 			  __always_unused __be16 proto, u16 vid)
2330 #else
2331 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2332 				 __always_unused __be16 proto, u16 vid)
2333 #endif
2334 {
2335 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2336 	struct i40e_vsi *vsi = np->vsi;
2337 
2338 	netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2339 
2340 	/* return code is ignored as there is nothing a user
2341 	 * can do about failure to remove and a log message was
2342 	 * already printed from the other function
2343 	 */
2344 	i40e_vsi_kill_vlan(vsi, vid);
2345 
2346 	clear_bit(vid, vsi->active_vlans);
2347 
2348 	return 0;
2349 }
2350 
2351 /**
2352  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2353  * @vsi: the vsi being brought back up
2354  **/
2355 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2356 {
2357 	u16 vid;
2358 
2359 	if (!vsi->netdev)
2360 		return;
2361 
2362 	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2363 
2364 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2365 		i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2366 				     vid);
2367 }
2368 
2369 /**
2370  * i40e_vsi_add_pvid - Add pvid for the VSI
2371  * @vsi: the vsi being adjusted
2372  * @vid: the vlan id to set as a PVID
2373  **/
2374 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2375 {
2376 	struct i40e_vsi_context ctxt;
2377 	i40e_status ret;
2378 
2379 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2380 	vsi->info.pvid = cpu_to_le16(vid);
2381 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2382 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
2383 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
2384 
2385 	ctxt.seid = vsi->seid;
2386 	ctxt.info = vsi->info;
2387 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2388 	if (ret) {
2389 		dev_info(&vsi->back->pdev->dev,
2390 			 "add pvid failed, err %s aq_err %s\n",
2391 			 i40e_stat_str(&vsi->back->hw, ret),
2392 			 i40e_aq_str(&vsi->back->hw,
2393 				     vsi->back->hw.aq.asq_last_status));
2394 		return -ENOENT;
2395 	}
2396 
2397 	return 0;
2398 }
2399 
2400 /**
2401  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2402  * @vsi: the vsi being adjusted
2403  *
2404  * Just use the vlan_rx_register() service to put it back to normal
2405  **/
2406 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2407 {
2408 	i40e_vlan_stripping_disable(vsi);
2409 
2410 	vsi->info.pvid = 0;
2411 }
2412 
2413 /**
2414  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2415  * @vsi: ptr to the VSI
2416  *
2417  * If this function returns with an error, then it's possible one or
2418  * more of the rings is populated (while the rest are not).  It is the
2419  * callers duty to clean those orphaned rings.
2420  *
2421  * Return 0 on success, negative on failure
2422  **/
2423 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2424 {
2425 	int i, err = 0;
2426 
2427 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2428 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2429 
2430 	return err;
2431 }
2432 
2433 /**
2434  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2435  * @vsi: ptr to the VSI
2436  *
2437  * Free VSI's transmit software resources
2438  **/
2439 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2440 {
2441 	int i;
2442 
2443 	if (!vsi->tx_rings)
2444 		return;
2445 
2446 	for (i = 0; i < vsi->num_queue_pairs; i++)
2447 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2448 			i40e_free_tx_resources(vsi->tx_rings[i]);
2449 }
2450 
2451 /**
2452  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2453  * @vsi: ptr to the VSI
2454  *
2455  * If this function returns with an error, then it's possible one or
2456  * more of the rings is populated (while the rest are not).  It is the
2457  * callers duty to clean those orphaned rings.
2458  *
2459  * Return 0 on success, negative on failure
2460  **/
2461 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2462 {
2463 	int i, err = 0;
2464 
2465 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2466 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2467 #ifdef I40E_FCOE
2468 	i40e_fcoe_setup_ddp_resources(vsi);
2469 #endif
2470 	return err;
2471 }
2472 
2473 /**
2474  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2475  * @vsi: ptr to the VSI
2476  *
2477  * Free all receive software resources
2478  **/
2479 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2480 {
2481 	int i;
2482 
2483 	if (!vsi->rx_rings)
2484 		return;
2485 
2486 	for (i = 0; i < vsi->num_queue_pairs; i++)
2487 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2488 			i40e_free_rx_resources(vsi->rx_rings[i]);
2489 #ifdef I40E_FCOE
2490 	i40e_fcoe_free_ddp_resources(vsi);
2491 #endif
2492 }
2493 
2494 /**
2495  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2496  * @ring: The Tx ring to configure
2497  *
2498  * This enables/disables XPS for a given Tx descriptor ring
2499  * based on the TCs enabled for the VSI that ring belongs to.
2500  **/
2501 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2502 {
2503 	struct i40e_vsi *vsi = ring->vsi;
2504 	cpumask_var_t mask;
2505 
2506 	if (!ring->q_vector || !ring->netdev)
2507 		return;
2508 
2509 	/* Single TC mode enable XPS */
2510 	if (vsi->tc_config.numtc <= 1) {
2511 		if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2512 			netif_set_xps_queue(ring->netdev,
2513 					    &ring->q_vector->affinity_mask,
2514 					    ring->queue_index);
2515 	} else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2516 		/* Disable XPS to allow selection based on TC */
2517 		bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2518 		netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2519 		free_cpumask_var(mask);
2520 	}
2521 }
2522 
2523 /**
2524  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2525  * @ring: The Tx ring to configure
2526  *
2527  * Configure the Tx descriptor ring in the HMC context.
2528  **/
2529 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2530 {
2531 	struct i40e_vsi *vsi = ring->vsi;
2532 	u16 pf_q = vsi->base_queue + ring->queue_index;
2533 	struct i40e_hw *hw = &vsi->back->hw;
2534 	struct i40e_hmc_obj_txq tx_ctx;
2535 	i40e_status err = 0;
2536 	u32 qtx_ctl = 0;
2537 
2538 	/* some ATR related tx ring init */
2539 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2540 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
2541 		ring->atr_count = 0;
2542 	} else {
2543 		ring->atr_sample_rate = 0;
2544 	}
2545 
2546 	/* configure XPS */
2547 	i40e_config_xps_tx_ring(ring);
2548 
2549 	/* clear the context structure first */
2550 	memset(&tx_ctx, 0, sizeof(tx_ctx));
2551 
2552 	tx_ctx.new_context = 1;
2553 	tx_ctx.base = (ring->dma / 128);
2554 	tx_ctx.qlen = ring->count;
2555 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2556 					       I40E_FLAG_FD_ATR_ENABLED));
2557 #ifdef I40E_FCOE
2558 	tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2559 #endif
2560 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2561 	/* FDIR VSI tx ring can still use RS bit and writebacks */
2562 	if (vsi->type != I40E_VSI_FDIR)
2563 		tx_ctx.head_wb_ena = 1;
2564 	tx_ctx.head_wb_addr = ring->dma +
2565 			      (ring->count * sizeof(struct i40e_tx_desc));
2566 
2567 	/* As part of VSI creation/update, FW allocates certain
2568 	 * Tx arbitration queue sets for each TC enabled for
2569 	 * the VSI. The FW returns the handles to these queue
2570 	 * sets as part of the response buffer to Add VSI,
2571 	 * Update VSI, etc. AQ commands. It is expected that
2572 	 * these queue set handles be associated with the Tx
2573 	 * queues by the driver as part of the TX queue context
2574 	 * initialization. This has to be done regardless of
2575 	 * DCB as by default everything is mapped to TC0.
2576 	 */
2577 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2578 	tx_ctx.rdylist_act = 0;
2579 
2580 	/* clear the context in the HMC */
2581 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2582 	if (err) {
2583 		dev_info(&vsi->back->pdev->dev,
2584 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2585 			 ring->queue_index, pf_q, err);
2586 		return -ENOMEM;
2587 	}
2588 
2589 	/* set the context in the HMC */
2590 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2591 	if (err) {
2592 		dev_info(&vsi->back->pdev->dev,
2593 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2594 			 ring->queue_index, pf_q, err);
2595 		return -ENOMEM;
2596 	}
2597 
2598 	/* Now associate this queue with this PCI function */
2599 	if (vsi->type == I40E_VSI_VMDQ2) {
2600 		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2601 		qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2602 			   I40E_QTX_CTL_VFVM_INDX_MASK;
2603 	} else {
2604 		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2605 	}
2606 
2607 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2608 		    I40E_QTX_CTL_PF_INDX_MASK);
2609 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2610 	i40e_flush(hw);
2611 
2612 	clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2613 
2614 	/* cache tail off for easier writes later */
2615 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2616 
2617 	return 0;
2618 }
2619 
2620 /**
2621  * i40e_configure_rx_ring - Configure a receive ring context
2622  * @ring: The Rx ring to configure
2623  *
2624  * Configure the Rx descriptor ring in the HMC context.
2625  **/
2626 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2627 {
2628 	struct i40e_vsi *vsi = ring->vsi;
2629 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2630 	u16 pf_q = vsi->base_queue + ring->queue_index;
2631 	struct i40e_hw *hw = &vsi->back->hw;
2632 	struct i40e_hmc_obj_rxq rx_ctx;
2633 	i40e_status err = 0;
2634 
2635 	ring->state = 0;
2636 
2637 	/* clear the context structure first */
2638 	memset(&rx_ctx, 0, sizeof(rx_ctx));
2639 
2640 	ring->rx_buf_len = vsi->rx_buf_len;
2641 	ring->rx_hdr_len = vsi->rx_hdr_len;
2642 
2643 	rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2644 	rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2645 
2646 	rx_ctx.base = (ring->dma / 128);
2647 	rx_ctx.qlen = ring->count;
2648 
2649 	if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2650 		set_ring_16byte_desc_enabled(ring);
2651 		rx_ctx.dsize = 0;
2652 	} else {
2653 		rx_ctx.dsize = 1;
2654 	}
2655 
2656 	rx_ctx.dtype = vsi->dtype;
2657 	if (vsi->dtype) {
2658 		set_ring_ps_enabled(ring);
2659 		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2660 				  I40E_RX_SPLIT_IP      |
2661 				  I40E_RX_SPLIT_TCP_UDP |
2662 				  I40E_RX_SPLIT_SCTP;
2663 	} else {
2664 		rx_ctx.hsplit_0 = 0;
2665 	}
2666 
2667 	rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2668 				  (chain_len * ring->rx_buf_len));
2669 	if (hw->revision_id == 0)
2670 		rx_ctx.lrxqthresh = 0;
2671 	else
2672 		rx_ctx.lrxqthresh = 2;
2673 	rx_ctx.crcstrip = 1;
2674 	rx_ctx.l2tsel = 1;
2675 	/* this controls whether VLAN is stripped from inner headers */
2676 	rx_ctx.showiv = 0;
2677 #ifdef I40E_FCOE
2678 	rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2679 #endif
2680 	/* set the prefena field to 1 because the manual says to */
2681 	rx_ctx.prefena = 1;
2682 
2683 	/* clear the context in the HMC */
2684 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2685 	if (err) {
2686 		dev_info(&vsi->back->pdev->dev,
2687 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2688 			 ring->queue_index, pf_q, err);
2689 		return -ENOMEM;
2690 	}
2691 
2692 	/* set the context in the HMC */
2693 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2694 	if (err) {
2695 		dev_info(&vsi->back->pdev->dev,
2696 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2697 			 ring->queue_index, pf_q, err);
2698 		return -ENOMEM;
2699 	}
2700 
2701 	/* cache tail for quicker writes, and clear the reg before use */
2702 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2703 	writel(0, ring->tail);
2704 
2705 	if (ring_is_ps_enabled(ring)) {
2706 		i40e_alloc_rx_headers(ring);
2707 		i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2708 	} else {
2709 		i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2710 	}
2711 
2712 	return 0;
2713 }
2714 
2715 /**
2716  * i40e_vsi_configure_tx - Configure the VSI for Tx
2717  * @vsi: VSI structure describing this set of rings and resources
2718  *
2719  * Configure the Tx VSI for operation.
2720  **/
2721 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2722 {
2723 	int err = 0;
2724 	u16 i;
2725 
2726 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2727 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2728 
2729 	return err;
2730 }
2731 
2732 /**
2733  * i40e_vsi_configure_rx - Configure the VSI for Rx
2734  * @vsi: the VSI being configured
2735  *
2736  * Configure the Rx VSI for operation.
2737  **/
2738 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2739 {
2740 	int err = 0;
2741 	u16 i;
2742 
2743 	if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2744 		vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2745 			       + ETH_FCS_LEN + VLAN_HLEN;
2746 	else
2747 		vsi->max_frame = I40E_RXBUFFER_2048;
2748 
2749 	/* figure out correct receive buffer length */
2750 	switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2751 				    I40E_FLAG_RX_PS_ENABLED)) {
2752 	case I40E_FLAG_RX_1BUF_ENABLED:
2753 		vsi->rx_hdr_len = 0;
2754 		vsi->rx_buf_len = vsi->max_frame;
2755 		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2756 		break;
2757 	case I40E_FLAG_RX_PS_ENABLED:
2758 		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2759 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
2760 		vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2761 		break;
2762 	default:
2763 		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2764 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
2765 		vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2766 		break;
2767 	}
2768 
2769 #ifdef I40E_FCOE
2770 	/* setup rx buffer for FCoE */
2771 	if ((vsi->type == I40E_VSI_FCOE) &&
2772 	    (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2773 		vsi->rx_hdr_len = 0;
2774 		vsi->rx_buf_len = I40E_RXBUFFER_3072;
2775 		vsi->max_frame = I40E_RXBUFFER_3072;
2776 		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2777 	}
2778 
2779 #endif /* I40E_FCOE */
2780 	/* round up for the chip's needs */
2781 	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2782 				BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
2783 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2784 				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2785 
2786 	/* set up individual rings */
2787 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2788 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2789 
2790 	return err;
2791 }
2792 
2793 /**
2794  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2795  * @vsi: ptr to the VSI
2796  **/
2797 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2798 {
2799 	struct i40e_ring *tx_ring, *rx_ring;
2800 	u16 qoffset, qcount;
2801 	int i, n;
2802 
2803 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2804 		/* Reset the TC information */
2805 		for (i = 0; i < vsi->num_queue_pairs; i++) {
2806 			rx_ring = vsi->rx_rings[i];
2807 			tx_ring = vsi->tx_rings[i];
2808 			rx_ring->dcb_tc = 0;
2809 			tx_ring->dcb_tc = 0;
2810 		}
2811 	}
2812 
2813 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2814 		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
2815 			continue;
2816 
2817 		qoffset = vsi->tc_config.tc_info[n].qoffset;
2818 		qcount = vsi->tc_config.tc_info[n].qcount;
2819 		for (i = qoffset; i < (qoffset + qcount); i++) {
2820 			rx_ring = vsi->rx_rings[i];
2821 			tx_ring = vsi->tx_rings[i];
2822 			rx_ring->dcb_tc = n;
2823 			tx_ring->dcb_tc = n;
2824 		}
2825 	}
2826 }
2827 
2828 /**
2829  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2830  * @vsi: ptr to the VSI
2831  **/
2832 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2833 {
2834 	if (vsi->netdev)
2835 		i40e_set_rx_mode(vsi->netdev);
2836 }
2837 
2838 /**
2839  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2840  * @vsi: Pointer to the targeted VSI
2841  *
2842  * This function replays the hlist on the hw where all the SB Flow Director
2843  * filters were saved.
2844  **/
2845 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2846 {
2847 	struct i40e_fdir_filter *filter;
2848 	struct i40e_pf *pf = vsi->back;
2849 	struct hlist_node *node;
2850 
2851 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2852 		return;
2853 
2854 	hlist_for_each_entry_safe(filter, node,
2855 				  &pf->fdir_filter_list, fdir_node) {
2856 		i40e_add_del_fdir(vsi, filter, true);
2857 	}
2858 }
2859 
2860 /**
2861  * i40e_vsi_configure - Set up the VSI for action
2862  * @vsi: the VSI being configured
2863  **/
2864 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2865 {
2866 	int err;
2867 
2868 	i40e_set_vsi_rx_mode(vsi);
2869 	i40e_restore_vlan(vsi);
2870 	i40e_vsi_config_dcb_rings(vsi);
2871 	err = i40e_vsi_configure_tx(vsi);
2872 	if (!err)
2873 		err = i40e_vsi_configure_rx(vsi);
2874 
2875 	return err;
2876 }
2877 
2878 /**
2879  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2880  * @vsi: the VSI being configured
2881  **/
2882 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2883 {
2884 	struct i40e_pf *pf = vsi->back;
2885 	struct i40e_q_vector *q_vector;
2886 	struct i40e_hw *hw = &pf->hw;
2887 	u16 vector;
2888 	int i, q;
2889 	u32 val;
2890 	u32 qp;
2891 
2892 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
2893 	 * and PFINT_LNKLSTn registers, e.g.:
2894 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2895 	 */
2896 	qp = vsi->base_queue;
2897 	vector = vsi->base_vector;
2898 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2899 		q_vector = vsi->q_vectors[i];
2900 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2901 		q_vector->rx.latency_range = I40E_LOW_LATENCY;
2902 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2903 		     q_vector->rx.itr);
2904 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2905 		q_vector->tx.latency_range = I40E_LOW_LATENCY;
2906 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2907 		     q_vector->tx.itr);
2908 
2909 		/* Linked list for the queuepairs assigned to this vector */
2910 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2911 		for (q = 0; q < q_vector->num_ringpairs; q++) {
2912 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2913 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2914 			      (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2915 			      (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2916 			      (I40E_QUEUE_TYPE_TX
2917 				      << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2918 
2919 			wr32(hw, I40E_QINT_RQCTL(qp), val);
2920 
2921 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2922 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2923 			      (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2924 			      ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2925 			      (I40E_QUEUE_TYPE_RX
2926 				      << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2927 
2928 			/* Terminate the linked list */
2929 			if (q == (q_vector->num_ringpairs - 1))
2930 				val |= (I40E_QUEUE_END_OF_LIST
2931 					   << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2932 
2933 			wr32(hw, I40E_QINT_TQCTL(qp), val);
2934 			qp++;
2935 		}
2936 	}
2937 
2938 	i40e_flush(hw);
2939 }
2940 
2941 /**
2942  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2943  * @hw: ptr to the hardware info
2944  **/
2945 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2946 {
2947 	struct i40e_hw *hw = &pf->hw;
2948 	u32 val;
2949 
2950 	/* clear things first */
2951 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2952 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2953 
2954 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2955 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2956 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
2957 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2958 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2959 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2960 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2961 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2962 
2963 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
2964 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
2965 
2966 	if (pf->flags & I40E_FLAG_PTP)
2967 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2968 
2969 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
2970 
2971 	/* SW_ITR_IDX = 0, but don't change INTENA */
2972 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2973 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2974 
2975 	/* OTHER_ITR_IDX = 0 */
2976 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2977 }
2978 
2979 /**
2980  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2981  * @vsi: the VSI being configured
2982  **/
2983 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2984 {
2985 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2986 	struct i40e_pf *pf = vsi->back;
2987 	struct i40e_hw *hw = &pf->hw;
2988 	u32 val;
2989 
2990 	/* set the ITR configuration */
2991 	q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2992 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
2993 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2994 	q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2995 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
2996 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2997 
2998 	i40e_enable_misc_int_causes(pf);
2999 
3000 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3001 	wr32(hw, I40E_PFINT_LNKLST0, 0);
3002 
3003 	/* Associate the queue pair to the vector and enable the queue int */
3004 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		      |
3005 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3006 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3007 
3008 	wr32(hw, I40E_QINT_RQCTL(0), val);
3009 
3010 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
3011 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3012 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3013 
3014 	wr32(hw, I40E_QINT_TQCTL(0), val);
3015 	i40e_flush(hw);
3016 }
3017 
3018 /**
3019  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3020  * @pf: board private structure
3021  **/
3022 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3023 {
3024 	struct i40e_hw *hw = &pf->hw;
3025 
3026 	wr32(hw, I40E_PFINT_DYN_CTL0,
3027 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3028 	i40e_flush(hw);
3029 }
3030 
3031 /**
3032  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3033  * @pf: board private structure
3034  **/
3035 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3036 {
3037 	struct i40e_hw *hw = &pf->hw;
3038 	u32 val;
3039 
3040 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3041 	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3042 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3043 
3044 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
3045 	i40e_flush(hw);
3046 }
3047 
3048 /**
3049  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
3050  * @vsi: pointer to a vsi
3051  * @vector: enable a particular Hw Interrupt vector
3052  **/
3053 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
3054 {
3055 	struct i40e_pf *pf = vsi->back;
3056 	struct i40e_hw *hw = &pf->hw;
3057 	u32 val;
3058 
3059 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3060 	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3061 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3062 	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3063 	/* skip the flush */
3064 }
3065 
3066 /**
3067  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3068  * @vsi: pointer to a vsi
3069  * @vector: disable a particular Hw Interrupt vector
3070  **/
3071 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3072 {
3073 	struct i40e_pf *pf = vsi->back;
3074 	struct i40e_hw *hw = &pf->hw;
3075 	u32 val;
3076 
3077 	val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3078 	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3079 	i40e_flush(hw);
3080 }
3081 
3082 /**
3083  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3084  * @irq: interrupt number
3085  * @data: pointer to a q_vector
3086  **/
3087 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3088 {
3089 	struct i40e_q_vector *q_vector = data;
3090 
3091 	if (!q_vector->tx.ring && !q_vector->rx.ring)
3092 		return IRQ_HANDLED;
3093 
3094 	napi_schedule(&q_vector->napi);
3095 
3096 	return IRQ_HANDLED;
3097 }
3098 
3099 /**
3100  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3101  * @vsi: the VSI being configured
3102  * @basename: name for the vector
3103  *
3104  * Allocates MSI-X vectors and requests interrupts from the kernel.
3105  **/
3106 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3107 {
3108 	int q_vectors = vsi->num_q_vectors;
3109 	struct i40e_pf *pf = vsi->back;
3110 	int base = vsi->base_vector;
3111 	int rx_int_idx = 0;
3112 	int tx_int_idx = 0;
3113 	int vector, err;
3114 
3115 	for (vector = 0; vector < q_vectors; vector++) {
3116 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3117 
3118 		if (q_vector->tx.ring && q_vector->rx.ring) {
3119 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3120 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3121 			tx_int_idx++;
3122 		} else if (q_vector->rx.ring) {
3123 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3124 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3125 		} else if (q_vector->tx.ring) {
3126 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3127 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3128 		} else {
3129 			/* skip this unused q_vector */
3130 			continue;
3131 		}
3132 		err = request_irq(pf->msix_entries[base + vector].vector,
3133 				  vsi->irq_handler,
3134 				  0,
3135 				  q_vector->name,
3136 				  q_vector);
3137 		if (err) {
3138 			dev_info(&pf->pdev->dev,
3139 				 "%s: request_irq failed, error: %d\n",
3140 				 __func__, err);
3141 			goto free_queue_irqs;
3142 		}
3143 		/* assign the mask for this irq */
3144 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3145 				      &q_vector->affinity_mask);
3146 	}
3147 
3148 	vsi->irqs_ready = true;
3149 	return 0;
3150 
3151 free_queue_irqs:
3152 	while (vector) {
3153 		vector--;
3154 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3155 				      NULL);
3156 		free_irq(pf->msix_entries[base + vector].vector,
3157 			 &(vsi->q_vectors[vector]));
3158 	}
3159 	return err;
3160 }
3161 
3162 /**
3163  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3164  * @vsi: the VSI being un-configured
3165  **/
3166 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3167 {
3168 	struct i40e_pf *pf = vsi->back;
3169 	struct i40e_hw *hw = &pf->hw;
3170 	int base = vsi->base_vector;
3171 	int i;
3172 
3173 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3174 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3175 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3176 	}
3177 
3178 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3179 		for (i = vsi->base_vector;
3180 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3181 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3182 
3183 		i40e_flush(hw);
3184 		for (i = 0; i < vsi->num_q_vectors; i++)
3185 			synchronize_irq(pf->msix_entries[i + base].vector);
3186 	} else {
3187 		/* Legacy and MSI mode - this stops all interrupt handling */
3188 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3189 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3190 		i40e_flush(hw);
3191 		synchronize_irq(pf->pdev->irq);
3192 	}
3193 }
3194 
3195 /**
3196  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3197  * @vsi: the VSI being configured
3198  **/
3199 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3200 {
3201 	struct i40e_pf *pf = vsi->back;
3202 	int i;
3203 
3204 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3205 		for (i = vsi->base_vector;
3206 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3207 			i40e_irq_dynamic_enable(vsi, i);
3208 	} else {
3209 		i40e_irq_dynamic_enable_icr0(pf);
3210 	}
3211 
3212 	i40e_flush(&pf->hw);
3213 	return 0;
3214 }
3215 
3216 /**
3217  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3218  * @pf: board private structure
3219  **/
3220 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3221 {
3222 	/* Disable ICR 0 */
3223 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3224 	i40e_flush(&pf->hw);
3225 }
3226 
3227 /**
3228  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3229  * @irq: interrupt number
3230  * @data: pointer to a q_vector
3231  *
3232  * This is the handler used for all MSI/Legacy interrupts, and deals
3233  * with both queue and non-queue interrupts.  This is also used in
3234  * MSIX mode to handle the non-queue interrupts.
3235  **/
3236 static irqreturn_t i40e_intr(int irq, void *data)
3237 {
3238 	struct i40e_pf *pf = (struct i40e_pf *)data;
3239 	struct i40e_hw *hw = &pf->hw;
3240 	irqreturn_t ret = IRQ_NONE;
3241 	u32 icr0, icr0_remaining;
3242 	u32 val, ena_mask;
3243 
3244 	icr0 = rd32(hw, I40E_PFINT_ICR0);
3245 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3246 
3247 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
3248 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3249 		goto enable_intr;
3250 
3251 	/* if interrupt but no bits showing, must be SWINT */
3252 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3253 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3254 		pf->sw_int_count++;
3255 
3256 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3257 	    (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3258 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3259 		icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3260 		dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3261 	}
3262 
3263 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3264 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3265 
3266 		/* temporarily disable queue cause for NAPI processing */
3267 		u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3268 		qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3269 		wr32(hw, I40E_QINT_RQCTL(0), qval);
3270 
3271 		qval = rd32(hw, I40E_QINT_TQCTL(0));
3272 		qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3273 		wr32(hw, I40E_QINT_TQCTL(0), qval);
3274 
3275 		if (!test_bit(__I40E_DOWN, &pf->state))
3276 			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3277 	}
3278 
3279 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3280 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3281 		set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3282 	}
3283 
3284 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3285 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3286 		set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3287 	}
3288 
3289 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3290 		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3291 		set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3292 	}
3293 
3294 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3295 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3296 			set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3297 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3298 		val = rd32(hw, I40E_GLGEN_RSTAT);
3299 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3300 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3301 		if (val == I40E_RESET_CORER) {
3302 			pf->corer_count++;
3303 		} else if (val == I40E_RESET_GLOBR) {
3304 			pf->globr_count++;
3305 		} else if (val == I40E_RESET_EMPR) {
3306 			pf->empr_count++;
3307 			set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3308 		}
3309 	}
3310 
3311 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3312 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3313 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3314 		dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3315 			 rd32(hw, I40E_PFHMC_ERRORINFO),
3316 			 rd32(hw, I40E_PFHMC_ERRORDATA));
3317 	}
3318 
3319 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3320 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3321 
3322 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3323 			icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3324 			i40e_ptp_tx_hwtstamp(pf);
3325 		}
3326 	}
3327 
3328 	/* If a critical error is pending we have no choice but to reset the
3329 	 * device.
3330 	 * Report and mask out any remaining unexpected interrupts.
3331 	 */
3332 	icr0_remaining = icr0 & ena_mask;
3333 	if (icr0_remaining) {
3334 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3335 			 icr0_remaining);
3336 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3337 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3338 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3339 			dev_info(&pf->pdev->dev, "device will be reset\n");
3340 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3341 			i40e_service_event_schedule(pf);
3342 		}
3343 		ena_mask &= ~icr0_remaining;
3344 	}
3345 	ret = IRQ_HANDLED;
3346 
3347 enable_intr:
3348 	/* re-enable interrupt causes */
3349 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3350 	if (!test_bit(__I40E_DOWN, &pf->state)) {
3351 		i40e_service_event_schedule(pf);
3352 		i40e_irq_dynamic_enable_icr0(pf);
3353 	}
3354 
3355 	return ret;
3356 }
3357 
3358 /**
3359  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3360  * @tx_ring:  tx ring to clean
3361  * @budget:   how many cleans we're allowed
3362  *
3363  * Returns true if there's any budget left (e.g. the clean is finished)
3364  **/
3365 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3366 {
3367 	struct i40e_vsi *vsi = tx_ring->vsi;
3368 	u16 i = tx_ring->next_to_clean;
3369 	struct i40e_tx_buffer *tx_buf;
3370 	struct i40e_tx_desc *tx_desc;
3371 
3372 	tx_buf = &tx_ring->tx_bi[i];
3373 	tx_desc = I40E_TX_DESC(tx_ring, i);
3374 	i -= tx_ring->count;
3375 
3376 	do {
3377 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3378 
3379 		/* if next_to_watch is not set then there is no work pending */
3380 		if (!eop_desc)
3381 			break;
3382 
3383 		/* prevent any other reads prior to eop_desc */
3384 		read_barrier_depends();
3385 
3386 		/* if the descriptor isn't done, no work yet to do */
3387 		if (!(eop_desc->cmd_type_offset_bsz &
3388 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3389 			break;
3390 
3391 		/* clear next_to_watch to prevent false hangs */
3392 		tx_buf->next_to_watch = NULL;
3393 
3394 		tx_desc->buffer_addr = 0;
3395 		tx_desc->cmd_type_offset_bsz = 0;
3396 		/* move past filter desc */
3397 		tx_buf++;
3398 		tx_desc++;
3399 		i++;
3400 		if (unlikely(!i)) {
3401 			i -= tx_ring->count;
3402 			tx_buf = tx_ring->tx_bi;
3403 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3404 		}
3405 		/* unmap skb header data */
3406 		dma_unmap_single(tx_ring->dev,
3407 				 dma_unmap_addr(tx_buf, dma),
3408 				 dma_unmap_len(tx_buf, len),
3409 				 DMA_TO_DEVICE);
3410 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3411 			kfree(tx_buf->raw_buf);
3412 
3413 		tx_buf->raw_buf = NULL;
3414 		tx_buf->tx_flags = 0;
3415 		tx_buf->next_to_watch = NULL;
3416 		dma_unmap_len_set(tx_buf, len, 0);
3417 		tx_desc->buffer_addr = 0;
3418 		tx_desc->cmd_type_offset_bsz = 0;
3419 
3420 		/* move us past the eop_desc for start of next FD desc */
3421 		tx_buf++;
3422 		tx_desc++;
3423 		i++;
3424 		if (unlikely(!i)) {
3425 			i -= tx_ring->count;
3426 			tx_buf = tx_ring->tx_bi;
3427 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3428 		}
3429 
3430 		/* update budget accounting */
3431 		budget--;
3432 	} while (likely(budget));
3433 
3434 	i += tx_ring->count;
3435 	tx_ring->next_to_clean = i;
3436 
3437 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3438 		i40e_irq_dynamic_enable(vsi,
3439 				tx_ring->q_vector->v_idx + vsi->base_vector);
3440 	}
3441 	return budget > 0;
3442 }
3443 
3444 /**
3445  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3446  * @irq: interrupt number
3447  * @data: pointer to a q_vector
3448  **/
3449 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3450 {
3451 	struct i40e_q_vector *q_vector = data;
3452 	struct i40e_vsi *vsi;
3453 
3454 	if (!q_vector->tx.ring)
3455 		return IRQ_HANDLED;
3456 
3457 	vsi = q_vector->tx.ring->vsi;
3458 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3459 
3460 	return IRQ_HANDLED;
3461 }
3462 
3463 /**
3464  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3465  * @vsi: the VSI being configured
3466  * @v_idx: vector index
3467  * @qp_idx: queue pair index
3468  **/
3469 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3470 {
3471 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3472 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3473 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3474 
3475 	tx_ring->q_vector = q_vector;
3476 	tx_ring->next = q_vector->tx.ring;
3477 	q_vector->tx.ring = tx_ring;
3478 	q_vector->tx.count++;
3479 
3480 	rx_ring->q_vector = q_vector;
3481 	rx_ring->next = q_vector->rx.ring;
3482 	q_vector->rx.ring = rx_ring;
3483 	q_vector->rx.count++;
3484 }
3485 
3486 /**
3487  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3488  * @vsi: the VSI being configured
3489  *
3490  * This function maps descriptor rings to the queue-specific vectors
3491  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3492  * one vector per queue pair, but on a constrained vector budget, we
3493  * group the queue pairs as "efficiently" as possible.
3494  **/
3495 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3496 {
3497 	int qp_remaining = vsi->num_queue_pairs;
3498 	int q_vectors = vsi->num_q_vectors;
3499 	int num_ringpairs;
3500 	int v_start = 0;
3501 	int qp_idx = 0;
3502 
3503 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3504 	 * group them so there are multiple queues per vector.
3505 	 * It is also important to go through all the vectors available to be
3506 	 * sure that if we don't use all the vectors, that the remaining vectors
3507 	 * are cleared. This is especially important when decreasing the
3508 	 * number of queues in use.
3509 	 */
3510 	for (; v_start < q_vectors; v_start++) {
3511 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3512 
3513 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3514 
3515 		q_vector->num_ringpairs = num_ringpairs;
3516 
3517 		q_vector->rx.count = 0;
3518 		q_vector->tx.count = 0;
3519 		q_vector->rx.ring = NULL;
3520 		q_vector->tx.ring = NULL;
3521 
3522 		while (num_ringpairs--) {
3523 			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3524 			qp_idx++;
3525 			qp_remaining--;
3526 		}
3527 	}
3528 }
3529 
3530 /**
3531  * i40e_vsi_request_irq - Request IRQ from the OS
3532  * @vsi: the VSI being configured
3533  * @basename: name for the vector
3534  **/
3535 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3536 {
3537 	struct i40e_pf *pf = vsi->back;
3538 	int err;
3539 
3540 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3541 		err = i40e_vsi_request_irq_msix(vsi, basename);
3542 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3543 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
3544 				  pf->int_name, pf);
3545 	else
3546 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3547 				  pf->int_name, pf);
3548 
3549 	if (err)
3550 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3551 
3552 	return err;
3553 }
3554 
3555 #ifdef CONFIG_NET_POLL_CONTROLLER
3556 /**
3557  * i40e_netpoll - A Polling 'interrupt'handler
3558  * @netdev: network interface device structure
3559  *
3560  * This is used by netconsole to send skbs without having to re-enable
3561  * interrupts.  It's not called while the normal interrupt routine is executing.
3562  **/
3563 #ifdef I40E_FCOE
3564 void i40e_netpoll(struct net_device *netdev)
3565 #else
3566 static void i40e_netpoll(struct net_device *netdev)
3567 #endif
3568 {
3569 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3570 	struct i40e_vsi *vsi = np->vsi;
3571 	struct i40e_pf *pf = vsi->back;
3572 	int i;
3573 
3574 	/* if interface is down do nothing */
3575 	if (test_bit(__I40E_DOWN, &vsi->state))
3576 		return;
3577 
3578 	pf->flags |= I40E_FLAG_IN_NETPOLL;
3579 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3580 		for (i = 0; i < vsi->num_q_vectors; i++)
3581 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3582 	} else {
3583 		i40e_intr(pf->pdev->irq, netdev);
3584 	}
3585 	pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3586 }
3587 #endif
3588 
3589 /**
3590  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3591  * @pf: the PF being configured
3592  * @pf_q: the PF queue
3593  * @enable: enable or disable state of the queue
3594  *
3595  * This routine will wait for the given Tx queue of the PF to reach the
3596  * enabled or disabled state.
3597  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3598  * multiple retries; else will return 0 in case of success.
3599  **/
3600 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3601 {
3602 	int i;
3603 	u32 tx_reg;
3604 
3605 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3606 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3607 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3608 			break;
3609 
3610 		usleep_range(10, 20);
3611 	}
3612 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3613 		return -ETIMEDOUT;
3614 
3615 	return 0;
3616 }
3617 
3618 /**
3619  * i40e_vsi_control_tx - Start or stop a VSI's rings
3620  * @vsi: the VSI being configured
3621  * @enable: start or stop the rings
3622  **/
3623 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3624 {
3625 	struct i40e_pf *pf = vsi->back;
3626 	struct i40e_hw *hw = &pf->hw;
3627 	int i, j, pf_q, ret = 0;
3628 	u32 tx_reg;
3629 
3630 	pf_q = vsi->base_queue;
3631 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3632 
3633 		/* warn the TX unit of coming changes */
3634 		i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3635 		if (!enable)
3636 			usleep_range(10, 20);
3637 
3638 		for (j = 0; j < 50; j++) {
3639 			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3640 			if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3641 			    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3642 				break;
3643 			usleep_range(1000, 2000);
3644 		}
3645 		/* Skip if the queue is already in the requested state */
3646 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3647 			continue;
3648 
3649 		/* turn on/off the queue */
3650 		if (enable) {
3651 			wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3652 			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3653 		} else {
3654 			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3655 		}
3656 
3657 		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3658 		/* No waiting for the Tx queue to disable */
3659 		if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3660 			continue;
3661 
3662 		/* wait for the change to finish */
3663 		ret = i40e_pf_txq_wait(pf, pf_q, enable);
3664 		if (ret) {
3665 			dev_info(&pf->pdev->dev,
3666 				 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3667 				 __func__, vsi->seid, pf_q,
3668 				 (enable ? "en" : "dis"));
3669 			break;
3670 		}
3671 	}
3672 
3673 	if (hw->revision_id == 0)
3674 		mdelay(50);
3675 	return ret;
3676 }
3677 
3678 /**
3679  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3680  * @pf: the PF being configured
3681  * @pf_q: the PF queue
3682  * @enable: enable or disable state of the queue
3683  *
3684  * This routine will wait for the given Rx queue of the PF to reach the
3685  * enabled or disabled state.
3686  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3687  * multiple retries; else will return 0 in case of success.
3688  **/
3689 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3690 {
3691 	int i;
3692 	u32 rx_reg;
3693 
3694 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3695 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3696 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3697 			break;
3698 
3699 		usleep_range(10, 20);
3700 	}
3701 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3702 		return -ETIMEDOUT;
3703 
3704 	return 0;
3705 }
3706 
3707 /**
3708  * i40e_vsi_control_rx - Start or stop a VSI's rings
3709  * @vsi: the VSI being configured
3710  * @enable: start or stop the rings
3711  **/
3712 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3713 {
3714 	struct i40e_pf *pf = vsi->back;
3715 	struct i40e_hw *hw = &pf->hw;
3716 	int i, j, pf_q, ret = 0;
3717 	u32 rx_reg;
3718 
3719 	pf_q = vsi->base_queue;
3720 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3721 		for (j = 0; j < 50; j++) {
3722 			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3723 			if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3724 			    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3725 				break;
3726 			usleep_range(1000, 2000);
3727 		}
3728 
3729 		/* Skip if the queue is already in the requested state */
3730 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3731 			continue;
3732 
3733 		/* turn on/off the queue */
3734 		if (enable)
3735 			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3736 		else
3737 			rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3738 		wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3739 
3740 		/* wait for the change to finish */
3741 		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3742 		if (ret) {
3743 			dev_info(&pf->pdev->dev,
3744 				 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3745 				 __func__, vsi->seid, pf_q,
3746 				 (enable ? "en" : "dis"));
3747 			break;
3748 		}
3749 	}
3750 
3751 	return ret;
3752 }
3753 
3754 /**
3755  * i40e_vsi_control_rings - Start or stop a VSI's rings
3756  * @vsi: the VSI being configured
3757  * @enable: start or stop the rings
3758  **/
3759 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3760 {
3761 	int ret = 0;
3762 
3763 	/* do rx first for enable and last for disable */
3764 	if (request) {
3765 		ret = i40e_vsi_control_rx(vsi, request);
3766 		if (ret)
3767 			return ret;
3768 		ret = i40e_vsi_control_tx(vsi, request);
3769 	} else {
3770 		/* Ignore return value, we need to shutdown whatever we can */
3771 		i40e_vsi_control_tx(vsi, request);
3772 		i40e_vsi_control_rx(vsi, request);
3773 	}
3774 
3775 	return ret;
3776 }
3777 
3778 /**
3779  * i40e_vsi_free_irq - Free the irq association with the OS
3780  * @vsi: the VSI being configured
3781  **/
3782 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3783 {
3784 	struct i40e_pf *pf = vsi->back;
3785 	struct i40e_hw *hw = &pf->hw;
3786 	int base = vsi->base_vector;
3787 	u32 val, qp;
3788 	int i;
3789 
3790 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3791 		if (!vsi->q_vectors)
3792 			return;
3793 
3794 		if (!vsi->irqs_ready)
3795 			return;
3796 
3797 		vsi->irqs_ready = false;
3798 		for (i = 0; i < vsi->num_q_vectors; i++) {
3799 			u16 vector = i + base;
3800 
3801 			/* free only the irqs that were actually requested */
3802 			if (!vsi->q_vectors[i] ||
3803 			    !vsi->q_vectors[i]->num_ringpairs)
3804 				continue;
3805 
3806 			/* clear the affinity_mask in the IRQ descriptor */
3807 			irq_set_affinity_hint(pf->msix_entries[vector].vector,
3808 					      NULL);
3809 			free_irq(pf->msix_entries[vector].vector,
3810 				 vsi->q_vectors[i]);
3811 
3812 			/* Tear down the interrupt queue link list
3813 			 *
3814 			 * We know that they come in pairs and always
3815 			 * the Rx first, then the Tx.  To clear the
3816 			 * link list, stick the EOL value into the
3817 			 * next_q field of the registers.
3818 			 */
3819 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3820 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3821 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3822 			val |= I40E_QUEUE_END_OF_LIST
3823 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3824 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3825 
3826 			while (qp != I40E_QUEUE_END_OF_LIST) {
3827 				u32 next;
3828 
3829 				val = rd32(hw, I40E_QINT_RQCTL(qp));
3830 
3831 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3832 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3833 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3834 					 I40E_QINT_RQCTL_INTEVENT_MASK);
3835 
3836 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3837 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3838 
3839 				wr32(hw, I40E_QINT_RQCTL(qp), val);
3840 
3841 				val = rd32(hw, I40E_QINT_TQCTL(qp));
3842 
3843 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3844 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3845 
3846 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3847 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3848 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3849 					 I40E_QINT_TQCTL_INTEVENT_MASK);
3850 
3851 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3852 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3853 
3854 				wr32(hw, I40E_QINT_TQCTL(qp), val);
3855 				qp = next;
3856 			}
3857 		}
3858 	} else {
3859 		free_irq(pf->pdev->irq, pf);
3860 
3861 		val = rd32(hw, I40E_PFINT_LNKLST0);
3862 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3863 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3864 		val |= I40E_QUEUE_END_OF_LIST
3865 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3866 		wr32(hw, I40E_PFINT_LNKLST0, val);
3867 
3868 		val = rd32(hw, I40E_QINT_RQCTL(qp));
3869 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3870 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3871 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3872 			 I40E_QINT_RQCTL_INTEVENT_MASK);
3873 
3874 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3875 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3876 
3877 		wr32(hw, I40E_QINT_RQCTL(qp), val);
3878 
3879 		val = rd32(hw, I40E_QINT_TQCTL(qp));
3880 
3881 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3882 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3883 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3884 			 I40E_QINT_TQCTL_INTEVENT_MASK);
3885 
3886 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3887 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3888 
3889 		wr32(hw, I40E_QINT_TQCTL(qp), val);
3890 	}
3891 }
3892 
3893 /**
3894  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3895  * @vsi: the VSI being configured
3896  * @v_idx: Index of vector to be freed
3897  *
3898  * This function frees the memory allocated to the q_vector.  In addition if
3899  * NAPI is enabled it will delete any references to the NAPI struct prior
3900  * to freeing the q_vector.
3901  **/
3902 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3903 {
3904 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3905 	struct i40e_ring *ring;
3906 
3907 	if (!q_vector)
3908 		return;
3909 
3910 	/* disassociate q_vector from rings */
3911 	i40e_for_each_ring(ring, q_vector->tx)
3912 		ring->q_vector = NULL;
3913 
3914 	i40e_for_each_ring(ring, q_vector->rx)
3915 		ring->q_vector = NULL;
3916 
3917 	/* only VSI w/ an associated netdev is set up w/ NAPI */
3918 	if (vsi->netdev)
3919 		netif_napi_del(&q_vector->napi);
3920 
3921 	vsi->q_vectors[v_idx] = NULL;
3922 
3923 	kfree_rcu(q_vector, rcu);
3924 }
3925 
3926 /**
3927  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3928  * @vsi: the VSI being un-configured
3929  *
3930  * This frees the memory allocated to the q_vectors and
3931  * deletes references to the NAPI struct.
3932  **/
3933 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3934 {
3935 	int v_idx;
3936 
3937 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3938 		i40e_free_q_vector(vsi, v_idx);
3939 }
3940 
3941 /**
3942  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3943  * @pf: board private structure
3944  **/
3945 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3946 {
3947 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3948 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3949 		pci_disable_msix(pf->pdev);
3950 		kfree(pf->msix_entries);
3951 		pf->msix_entries = NULL;
3952 		kfree(pf->irq_pile);
3953 		pf->irq_pile = NULL;
3954 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3955 		pci_disable_msi(pf->pdev);
3956 	}
3957 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3958 }
3959 
3960 /**
3961  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3962  * @pf: board private structure
3963  *
3964  * We go through and clear interrupt specific resources and reset the structure
3965  * to pre-load conditions
3966  **/
3967 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3968 {
3969 	int i;
3970 
3971 	i40e_stop_misc_vector(pf);
3972 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3973 		synchronize_irq(pf->msix_entries[0].vector);
3974 		free_irq(pf->msix_entries[0].vector, pf);
3975 	}
3976 
3977 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3978 	for (i = 0; i < pf->num_alloc_vsi; i++)
3979 		if (pf->vsi[i])
3980 			i40e_vsi_free_q_vectors(pf->vsi[i]);
3981 	i40e_reset_interrupt_capability(pf);
3982 }
3983 
3984 /**
3985  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3986  * @vsi: the VSI being configured
3987  **/
3988 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3989 {
3990 	int q_idx;
3991 
3992 	if (!vsi->netdev)
3993 		return;
3994 
3995 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3996 		napi_enable(&vsi->q_vectors[q_idx]->napi);
3997 }
3998 
3999 /**
4000  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4001  * @vsi: the VSI being configured
4002  **/
4003 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4004 {
4005 	int q_idx;
4006 
4007 	if (!vsi->netdev)
4008 		return;
4009 
4010 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4011 		napi_disable(&vsi->q_vectors[q_idx]->napi);
4012 }
4013 
4014 /**
4015  * i40e_vsi_close - Shut down a VSI
4016  * @vsi: the vsi to be quelled
4017  **/
4018 static void i40e_vsi_close(struct i40e_vsi *vsi)
4019 {
4020 	if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4021 		i40e_down(vsi);
4022 	i40e_vsi_free_irq(vsi);
4023 	i40e_vsi_free_tx_resources(vsi);
4024 	i40e_vsi_free_rx_resources(vsi);
4025 	vsi->current_netdev_flags = 0;
4026 }
4027 
4028 /**
4029  * i40e_quiesce_vsi - Pause a given VSI
4030  * @vsi: the VSI being paused
4031  **/
4032 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4033 {
4034 	if (test_bit(__I40E_DOWN, &vsi->state))
4035 		return;
4036 
4037 	/* No need to disable FCoE VSI when Tx suspended */
4038 	if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4039 	    vsi->type == I40E_VSI_FCOE) {
4040 		dev_dbg(&vsi->back->pdev->dev,
4041 			"%s: VSI seid %d skipping FCoE VSI disable\n",
4042 			 __func__, vsi->seid);
4043 		return;
4044 	}
4045 
4046 	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4047 	if (vsi->netdev && netif_running(vsi->netdev)) {
4048 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4049 	} else {
4050 		i40e_vsi_close(vsi);
4051 	}
4052 }
4053 
4054 /**
4055  * i40e_unquiesce_vsi - Resume a given VSI
4056  * @vsi: the VSI being resumed
4057  **/
4058 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4059 {
4060 	if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4061 		return;
4062 
4063 	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4064 	if (vsi->netdev && netif_running(vsi->netdev))
4065 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4066 	else
4067 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4068 }
4069 
4070 /**
4071  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4072  * @pf: the PF
4073  **/
4074 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4075 {
4076 	int v;
4077 
4078 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4079 		if (pf->vsi[v])
4080 			i40e_quiesce_vsi(pf->vsi[v]);
4081 	}
4082 }
4083 
4084 /**
4085  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4086  * @pf: the PF
4087  **/
4088 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4089 {
4090 	int v;
4091 
4092 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4093 		if (pf->vsi[v])
4094 			i40e_unquiesce_vsi(pf->vsi[v]);
4095 	}
4096 }
4097 
4098 #ifdef CONFIG_I40E_DCB
4099 /**
4100  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4101  * @vsi: the VSI being configured
4102  *
4103  * This function waits for the given VSI's Tx queues to be disabled.
4104  **/
4105 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4106 {
4107 	struct i40e_pf *pf = vsi->back;
4108 	int i, pf_q, ret;
4109 
4110 	pf_q = vsi->base_queue;
4111 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4112 		/* Check and wait for the disable status of the queue */
4113 		ret = i40e_pf_txq_wait(pf, pf_q, false);
4114 		if (ret) {
4115 			dev_info(&pf->pdev->dev,
4116 				 "%s: VSI seid %d Tx ring %d disable timeout\n",
4117 				 __func__, vsi->seid, pf_q);
4118 			return ret;
4119 		}
4120 	}
4121 
4122 	return 0;
4123 }
4124 
4125 /**
4126  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4127  * @pf: the PF
4128  *
4129  * This function waits for the Tx queues to be in disabled state for all the
4130  * VSIs that are managed by this PF.
4131  **/
4132 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4133 {
4134 	int v, ret = 0;
4135 
4136 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4137 		/* No need to wait for FCoE VSI queues */
4138 		if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4139 			ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4140 			if (ret)
4141 				break;
4142 		}
4143 	}
4144 
4145 	return ret;
4146 }
4147 
4148 #endif
4149 /**
4150  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4151  * @pf: pointer to PF
4152  *
4153  * Get TC map for ISCSI PF type that will include iSCSI TC
4154  * and LAN TC.
4155  **/
4156 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4157 {
4158 	struct i40e_dcb_app_priority_table app;
4159 	struct i40e_hw *hw = &pf->hw;
4160 	u8 enabled_tc = 1; /* TC0 is always enabled */
4161 	u8 tc, i;
4162 	/* Get the iSCSI APP TLV */
4163 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4164 
4165 	for (i = 0; i < dcbcfg->numapps; i++) {
4166 		app = dcbcfg->app[i];
4167 		if (app.selector == I40E_APP_SEL_TCPIP &&
4168 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
4169 			tc = dcbcfg->etscfg.prioritytable[app.priority];
4170 			enabled_tc |= BIT_ULL(tc);
4171 			break;
4172 		}
4173 	}
4174 
4175 	return enabled_tc;
4176 }
4177 
4178 /**
4179  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4180  * @dcbcfg: the corresponding DCBx configuration structure
4181  *
4182  * Return the number of TCs from given DCBx configuration
4183  **/
4184 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4185 {
4186 	u8 num_tc = 0;
4187 	int i;
4188 
4189 	/* Scan the ETS Config Priority Table to find
4190 	 * traffic class enabled for a given priority
4191 	 * and use the traffic class index to get the
4192 	 * number of traffic classes enabled
4193 	 */
4194 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4195 		if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4196 			num_tc = dcbcfg->etscfg.prioritytable[i];
4197 	}
4198 
4199 	/* Traffic class index starts from zero so
4200 	 * increment to return the actual count
4201 	 */
4202 	return num_tc + 1;
4203 }
4204 
4205 /**
4206  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4207  * @dcbcfg: the corresponding DCBx configuration structure
4208  *
4209  * Query the current DCB configuration and return the number of
4210  * traffic classes enabled from the given DCBX config
4211  **/
4212 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4213 {
4214 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4215 	u8 enabled_tc = 1;
4216 	u8 i;
4217 
4218 	for (i = 0; i < num_tc; i++)
4219 		enabled_tc |= BIT(i);
4220 
4221 	return enabled_tc;
4222 }
4223 
4224 /**
4225  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4226  * @pf: PF being queried
4227  *
4228  * Return number of traffic classes enabled for the given PF
4229  **/
4230 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4231 {
4232 	struct i40e_hw *hw = &pf->hw;
4233 	u8 i, enabled_tc;
4234 	u8 num_tc = 0;
4235 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4236 
4237 	/* If DCB is not enabled then always in single TC */
4238 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4239 		return 1;
4240 
4241 	/* SFP mode will be enabled for all TCs on port */
4242 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4243 		return i40e_dcb_get_num_tc(dcbcfg);
4244 
4245 	/* MFP mode return count of enabled TCs for this PF */
4246 	if (pf->hw.func_caps.iscsi)
4247 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
4248 	else
4249 		return 1; /* Only TC0 */
4250 
4251 	/* At least have TC0 */
4252 	enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4253 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4254 		if (enabled_tc & BIT_ULL(i))
4255 			num_tc++;
4256 	}
4257 	return num_tc;
4258 }
4259 
4260 /**
4261  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4262  * @pf: PF being queried
4263  *
4264  * Return a bitmap for first enabled traffic class for this PF.
4265  **/
4266 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4267 {
4268 	u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4269 	u8 i = 0;
4270 
4271 	if (!enabled_tc)
4272 		return 0x1; /* TC0 */
4273 
4274 	/* Find the first enabled TC */
4275 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4276 		if (enabled_tc & BIT_ULL(i))
4277 			break;
4278 	}
4279 
4280 	return BIT(i);
4281 }
4282 
4283 /**
4284  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4285  * @pf: PF being queried
4286  *
4287  * Return a bitmap for enabled traffic classes for this PF.
4288  **/
4289 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4290 {
4291 	/* If DCB is not enabled for this PF then just return default TC */
4292 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4293 		return i40e_pf_get_default_tc(pf);
4294 
4295 	/* SFP mode we want PF to be enabled for all TCs */
4296 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4297 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4298 
4299 	/* MFP enabled and iSCSI PF type */
4300 	if (pf->hw.func_caps.iscsi)
4301 		return i40e_get_iscsi_tc_map(pf);
4302 	else
4303 		return i40e_pf_get_default_tc(pf);
4304 }
4305 
4306 /**
4307  * i40e_vsi_get_bw_info - Query VSI BW Information
4308  * @vsi: the VSI being queried
4309  *
4310  * Returns 0 on success, negative value on failure
4311  **/
4312 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4313 {
4314 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4315 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4316 	struct i40e_pf *pf = vsi->back;
4317 	struct i40e_hw *hw = &pf->hw;
4318 	i40e_status ret;
4319 	u32 tc_bw_max;
4320 	int i;
4321 
4322 	/* Get the VSI level BW configuration */
4323 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4324 	if (ret) {
4325 		dev_info(&pf->pdev->dev,
4326 			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4327 			 i40e_stat_str(&pf->hw, ret),
4328 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4329 		return -EINVAL;
4330 	}
4331 
4332 	/* Get the VSI level BW configuration per TC */
4333 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4334 					       NULL);
4335 	if (ret) {
4336 		dev_info(&pf->pdev->dev,
4337 			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4338 			 i40e_stat_str(&pf->hw, ret),
4339 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4340 		return -EINVAL;
4341 	}
4342 
4343 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4344 		dev_info(&pf->pdev->dev,
4345 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4346 			 bw_config.tc_valid_bits,
4347 			 bw_ets_config.tc_valid_bits);
4348 		/* Still continuing */
4349 	}
4350 
4351 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4352 	vsi->bw_max_quanta = bw_config.max_bw;
4353 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4354 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4355 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4356 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4357 		vsi->bw_ets_limit_credits[i] =
4358 					le16_to_cpu(bw_ets_config.credits[i]);
4359 		/* 3 bits out of 4 for each TC */
4360 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4361 	}
4362 
4363 	return 0;
4364 }
4365 
4366 /**
4367  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4368  * @vsi: the VSI being configured
4369  * @enabled_tc: TC bitmap
4370  * @bw_credits: BW shared credits per TC
4371  *
4372  * Returns 0 on success, negative value on failure
4373  **/
4374 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4375 				       u8 *bw_share)
4376 {
4377 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4378 	i40e_status ret;
4379 	int i;
4380 
4381 	bw_data.tc_valid_bits = enabled_tc;
4382 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4383 		bw_data.tc_bw_credits[i] = bw_share[i];
4384 
4385 	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4386 				       NULL);
4387 	if (ret) {
4388 		dev_info(&vsi->back->pdev->dev,
4389 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
4390 			 vsi->back->hw.aq.asq_last_status);
4391 		return -EINVAL;
4392 	}
4393 
4394 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4395 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4396 
4397 	return 0;
4398 }
4399 
4400 /**
4401  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4402  * @vsi: the VSI being configured
4403  * @enabled_tc: TC map to be enabled
4404  *
4405  **/
4406 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4407 {
4408 	struct net_device *netdev = vsi->netdev;
4409 	struct i40e_pf *pf = vsi->back;
4410 	struct i40e_hw *hw = &pf->hw;
4411 	u8 netdev_tc = 0;
4412 	int i;
4413 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4414 
4415 	if (!netdev)
4416 		return;
4417 
4418 	if (!enabled_tc) {
4419 		netdev_reset_tc(netdev);
4420 		return;
4421 	}
4422 
4423 	/* Set up actual enabled TCs on the VSI */
4424 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4425 		return;
4426 
4427 	/* set per TC queues for the VSI */
4428 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4429 		/* Only set TC queues for enabled tcs
4430 		 *
4431 		 * e.g. For a VSI that has TC0 and TC3 enabled the
4432 		 * enabled_tc bitmap would be 0x00001001; the driver
4433 		 * will set the numtc for netdev as 2 that will be
4434 		 * referenced by the netdev layer as TC 0 and 1.
4435 		 */
4436 		if (vsi->tc_config.enabled_tc & BIT_ULL(i))
4437 			netdev_set_tc_queue(netdev,
4438 					vsi->tc_config.tc_info[i].netdev_tc,
4439 					vsi->tc_config.tc_info[i].qcount,
4440 					vsi->tc_config.tc_info[i].qoffset);
4441 	}
4442 
4443 	/* Assign UP2TC map for the VSI */
4444 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4445 		/* Get the actual TC# for the UP */
4446 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4447 		/* Get the mapped netdev TC# for the UP */
4448 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4449 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
4450 	}
4451 }
4452 
4453 /**
4454  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4455  * @vsi: the VSI being configured
4456  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4457  **/
4458 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4459 				      struct i40e_vsi_context *ctxt)
4460 {
4461 	/* copy just the sections touched not the entire info
4462 	 * since not all sections are valid as returned by
4463 	 * update vsi params
4464 	 */
4465 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
4466 	memcpy(&vsi->info.queue_mapping,
4467 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4468 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4469 	       sizeof(vsi->info.tc_mapping));
4470 }
4471 
4472 /**
4473  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4474  * @vsi: VSI to be configured
4475  * @enabled_tc: TC bitmap
4476  *
4477  * This configures a particular VSI for TCs that are mapped to the
4478  * given TC bitmap. It uses default bandwidth share for TCs across
4479  * VSIs to configure TC for a particular VSI.
4480  *
4481  * NOTE:
4482  * It is expected that the VSI queues have been quisced before calling
4483  * this function.
4484  **/
4485 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4486 {
4487 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4488 	struct i40e_vsi_context ctxt;
4489 	int ret = 0;
4490 	int i;
4491 
4492 	/* Check if enabled_tc is same as existing or new TCs */
4493 	if (vsi->tc_config.enabled_tc == enabled_tc)
4494 		return ret;
4495 
4496 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
4497 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4498 		if (enabled_tc & BIT_ULL(i))
4499 			bw_share[i] = 1;
4500 	}
4501 
4502 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4503 	if (ret) {
4504 		dev_info(&vsi->back->pdev->dev,
4505 			 "Failed configuring TC map %d for VSI %d\n",
4506 			 enabled_tc, vsi->seid);
4507 		goto out;
4508 	}
4509 
4510 	/* Update Queue Pairs Mapping for currently enabled UPs */
4511 	ctxt.seid = vsi->seid;
4512 	ctxt.pf_num = vsi->back->hw.pf_id;
4513 	ctxt.vf_num = 0;
4514 	ctxt.uplink_seid = vsi->uplink_seid;
4515 	ctxt.info = vsi->info;
4516 	i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4517 
4518 	/* Update the VSI after updating the VSI queue-mapping information */
4519 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4520 	if (ret) {
4521 		dev_info(&vsi->back->pdev->dev,
4522 			 "Update vsi tc config failed, err %s aq_err %s\n",
4523 			 i40e_stat_str(&vsi->back->hw, ret),
4524 			 i40e_aq_str(&vsi->back->hw,
4525 				     vsi->back->hw.aq.asq_last_status));
4526 		goto out;
4527 	}
4528 	/* update the local VSI info with updated queue map */
4529 	i40e_vsi_update_queue_map(vsi, &ctxt);
4530 	vsi->info.valid_sections = 0;
4531 
4532 	/* Update current VSI BW information */
4533 	ret = i40e_vsi_get_bw_info(vsi);
4534 	if (ret) {
4535 		dev_info(&vsi->back->pdev->dev,
4536 			 "Failed updating vsi bw info, err %s aq_err %s\n",
4537 			 i40e_stat_str(&vsi->back->hw, ret),
4538 			 i40e_aq_str(&vsi->back->hw,
4539 				     vsi->back->hw.aq.asq_last_status));
4540 		goto out;
4541 	}
4542 
4543 	/* Update the netdev TC setup */
4544 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4545 out:
4546 	return ret;
4547 }
4548 
4549 /**
4550  * i40e_veb_config_tc - Configure TCs for given VEB
4551  * @veb: given VEB
4552  * @enabled_tc: TC bitmap
4553  *
4554  * Configures given TC bitmap for VEB (switching) element
4555  **/
4556 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4557 {
4558 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4559 	struct i40e_pf *pf = veb->pf;
4560 	int ret = 0;
4561 	int i;
4562 
4563 	/* No TCs or already enabled TCs just return */
4564 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
4565 		return ret;
4566 
4567 	bw_data.tc_valid_bits = enabled_tc;
4568 	/* bw_data.absolute_credits is not set (relative) */
4569 
4570 	/* Enable ETS TCs with equal BW Share for now */
4571 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4572 		if (enabled_tc & BIT_ULL(i))
4573 			bw_data.tc_bw_share_credits[i] = 1;
4574 	}
4575 
4576 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4577 						   &bw_data, NULL);
4578 	if (ret) {
4579 		dev_info(&pf->pdev->dev,
4580 			 "VEB bw config failed, err %s aq_err %s\n",
4581 			 i40e_stat_str(&pf->hw, ret),
4582 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4583 		goto out;
4584 	}
4585 
4586 	/* Update the BW information */
4587 	ret = i40e_veb_get_bw_info(veb);
4588 	if (ret) {
4589 		dev_info(&pf->pdev->dev,
4590 			 "Failed getting veb bw config, err %s aq_err %s\n",
4591 			 i40e_stat_str(&pf->hw, ret),
4592 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4593 	}
4594 
4595 out:
4596 	return ret;
4597 }
4598 
4599 #ifdef CONFIG_I40E_DCB
4600 /**
4601  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4602  * @pf: PF struct
4603  *
4604  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4605  * the caller would've quiesce all the VSIs before calling
4606  * this function
4607  **/
4608 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4609 {
4610 	u8 tc_map = 0;
4611 	int ret;
4612 	u8 v;
4613 
4614 	/* Enable the TCs available on PF to all VEBs */
4615 	tc_map = i40e_pf_get_tc_map(pf);
4616 	for (v = 0; v < I40E_MAX_VEB; v++) {
4617 		if (!pf->veb[v])
4618 			continue;
4619 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4620 		if (ret) {
4621 			dev_info(&pf->pdev->dev,
4622 				 "Failed configuring TC for VEB seid=%d\n",
4623 				 pf->veb[v]->seid);
4624 			/* Will try to configure as many components */
4625 		}
4626 	}
4627 
4628 	/* Update each VSI */
4629 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4630 		if (!pf->vsi[v])
4631 			continue;
4632 
4633 		/* - Enable all TCs for the LAN VSI
4634 #ifdef I40E_FCOE
4635 		 * - For FCoE VSI only enable the TC configured
4636 		 *   as per the APP TLV
4637 #endif
4638 		 * - For all others keep them at TC0 for now
4639 		 */
4640 		if (v == pf->lan_vsi)
4641 			tc_map = i40e_pf_get_tc_map(pf);
4642 		else
4643 			tc_map = i40e_pf_get_default_tc(pf);
4644 #ifdef I40E_FCOE
4645 		if (pf->vsi[v]->type == I40E_VSI_FCOE)
4646 			tc_map = i40e_get_fcoe_tc_map(pf);
4647 #endif /* #ifdef I40E_FCOE */
4648 
4649 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4650 		if (ret) {
4651 			dev_info(&pf->pdev->dev,
4652 				 "Failed configuring TC for VSI seid=%d\n",
4653 				 pf->vsi[v]->seid);
4654 			/* Will try to configure as many components */
4655 		} else {
4656 			/* Re-configure VSI vectors based on updated TC map */
4657 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4658 			if (pf->vsi[v]->netdev)
4659 				i40e_dcbnl_set_all(pf->vsi[v]);
4660 		}
4661 	}
4662 }
4663 
4664 /**
4665  * i40e_resume_port_tx - Resume port Tx
4666  * @pf: PF struct
4667  *
4668  * Resume a port's Tx and issue a PF reset in case of failure to
4669  * resume.
4670  **/
4671 static int i40e_resume_port_tx(struct i40e_pf *pf)
4672 {
4673 	struct i40e_hw *hw = &pf->hw;
4674 	int ret;
4675 
4676 	ret = i40e_aq_resume_port_tx(hw, NULL);
4677 	if (ret) {
4678 		dev_info(&pf->pdev->dev,
4679 			 "Resume Port Tx failed, err %s aq_err %s\n",
4680 			  i40e_stat_str(&pf->hw, ret),
4681 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4682 		/* Schedule PF reset to recover */
4683 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4684 		i40e_service_event_schedule(pf);
4685 	}
4686 
4687 	return ret;
4688 }
4689 
4690 /**
4691  * i40e_init_pf_dcb - Initialize DCB configuration
4692  * @pf: PF being configured
4693  *
4694  * Query the current DCB configuration and cache it
4695  * in the hardware structure
4696  **/
4697 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4698 {
4699 	struct i40e_hw *hw = &pf->hw;
4700 	int err = 0;
4701 
4702 	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4703 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4704 	    (pf->hw.aq.fw_maj_ver < 4))
4705 		goto out;
4706 
4707 	/* Get the initial DCB configuration */
4708 	err = i40e_init_dcb(hw);
4709 	if (!err) {
4710 		/* Device/Function is not DCBX capable */
4711 		if ((!hw->func_caps.dcb) ||
4712 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4713 			dev_info(&pf->pdev->dev,
4714 				 "DCBX offload is not supported or is disabled for this PF.\n");
4715 
4716 			if (pf->flags & I40E_FLAG_MFP_ENABLED)
4717 				goto out;
4718 
4719 		} else {
4720 			/* When status is not DISABLED then DCBX in FW */
4721 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4722 				       DCB_CAP_DCBX_VER_IEEE;
4723 
4724 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
4725 			/* Enable DCB tagging only when more than one TC */
4726 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4727 				pf->flags |= I40E_FLAG_DCB_ENABLED;
4728 			dev_dbg(&pf->pdev->dev,
4729 				"DCBX offload is supported for this PF.\n");
4730 		}
4731 	} else {
4732 		dev_info(&pf->pdev->dev,
4733 			 "Query for DCB configuration failed, err %s aq_err %s\n",
4734 			 i40e_stat_str(&pf->hw, err),
4735 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4736 	}
4737 
4738 out:
4739 	return err;
4740 }
4741 #endif /* CONFIG_I40E_DCB */
4742 #define SPEED_SIZE 14
4743 #define FC_SIZE 8
4744 /**
4745  * i40e_print_link_message - print link up or down
4746  * @vsi: the VSI for which link needs a message
4747  */
4748 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4749 {
4750 	char speed[SPEED_SIZE] = "Unknown";
4751 	char fc[FC_SIZE] = "RX/TX";
4752 
4753 	if (!isup) {
4754 		netdev_info(vsi->netdev, "NIC Link is Down\n");
4755 		return;
4756 	}
4757 
4758 	/* Warn user if link speed on NPAR enabled partition is not at
4759 	 * least 10GB
4760 	 */
4761 	if (vsi->back->hw.func_caps.npar_enable &&
4762 	    (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4763 	     vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4764 		netdev_warn(vsi->netdev,
4765 			    "The partition detected link speed that is less than 10Gbps\n");
4766 
4767 	switch (vsi->back->hw.phy.link_info.link_speed) {
4768 	case I40E_LINK_SPEED_40GB:
4769 		strlcpy(speed, "40 Gbps", SPEED_SIZE);
4770 		break;
4771 	case I40E_LINK_SPEED_20GB:
4772 		strncpy(speed, "20 Gbps", SPEED_SIZE);
4773 		break;
4774 	case I40E_LINK_SPEED_10GB:
4775 		strlcpy(speed, "10 Gbps", SPEED_SIZE);
4776 		break;
4777 	case I40E_LINK_SPEED_1GB:
4778 		strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4779 		break;
4780 	case I40E_LINK_SPEED_100MB:
4781 		strncpy(speed, "100 Mbps", SPEED_SIZE);
4782 		break;
4783 	default:
4784 		break;
4785 	}
4786 
4787 	switch (vsi->back->hw.fc.current_mode) {
4788 	case I40E_FC_FULL:
4789 		strlcpy(fc, "RX/TX", FC_SIZE);
4790 		break;
4791 	case I40E_FC_TX_PAUSE:
4792 		strlcpy(fc, "TX", FC_SIZE);
4793 		break;
4794 	case I40E_FC_RX_PAUSE:
4795 		strlcpy(fc, "RX", FC_SIZE);
4796 		break;
4797 	default:
4798 		strlcpy(fc, "None", FC_SIZE);
4799 		break;
4800 	}
4801 
4802 	netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4803 		    speed, fc);
4804 }
4805 
4806 /**
4807  * i40e_up_complete - Finish the last steps of bringing up a connection
4808  * @vsi: the VSI being configured
4809  **/
4810 static int i40e_up_complete(struct i40e_vsi *vsi)
4811 {
4812 	struct i40e_pf *pf = vsi->back;
4813 	int err;
4814 
4815 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4816 		i40e_vsi_configure_msix(vsi);
4817 	else
4818 		i40e_configure_msi_and_legacy(vsi);
4819 
4820 	/* start rings */
4821 	err = i40e_vsi_control_rings(vsi, true);
4822 	if (err)
4823 		return err;
4824 
4825 	clear_bit(__I40E_DOWN, &vsi->state);
4826 	i40e_napi_enable_all(vsi);
4827 	i40e_vsi_enable_irq(vsi);
4828 
4829 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4830 	    (vsi->netdev)) {
4831 		i40e_print_link_message(vsi, true);
4832 		netif_tx_start_all_queues(vsi->netdev);
4833 		netif_carrier_on(vsi->netdev);
4834 	} else if (vsi->netdev) {
4835 		i40e_print_link_message(vsi, false);
4836 		/* need to check for qualified module here*/
4837 		if ((pf->hw.phy.link_info.link_info &
4838 			I40E_AQ_MEDIA_AVAILABLE) &&
4839 		    (!(pf->hw.phy.link_info.an_info &
4840 			I40E_AQ_QUALIFIED_MODULE)))
4841 			netdev_err(vsi->netdev,
4842 				   "the driver failed to link because an unqualified module was detected.");
4843 	}
4844 
4845 	/* replay FDIR SB filters */
4846 	if (vsi->type == I40E_VSI_FDIR) {
4847 		/* reset fd counters */
4848 		pf->fd_add_err = pf->fd_atr_cnt = 0;
4849 		if (pf->fd_tcp_rule > 0) {
4850 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4851 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
4852 				dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4853 			pf->fd_tcp_rule = 0;
4854 		}
4855 		i40e_fdir_filter_restore(vsi);
4856 	}
4857 	i40e_service_event_schedule(pf);
4858 
4859 	return 0;
4860 }
4861 
4862 /**
4863  * i40e_vsi_reinit_locked - Reset the VSI
4864  * @vsi: the VSI being configured
4865  *
4866  * Rebuild the ring structs after some configuration
4867  * has changed, e.g. MTU size.
4868  **/
4869 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4870 {
4871 	struct i40e_pf *pf = vsi->back;
4872 
4873 	WARN_ON(in_interrupt());
4874 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4875 		usleep_range(1000, 2000);
4876 	i40e_down(vsi);
4877 
4878 	/* Give a VF some time to respond to the reset.  The
4879 	 * two second wait is based upon the watchdog cycle in
4880 	 * the VF driver.
4881 	 */
4882 	if (vsi->type == I40E_VSI_SRIOV)
4883 		msleep(2000);
4884 	i40e_up(vsi);
4885 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4886 }
4887 
4888 /**
4889  * i40e_up - Bring the connection back up after being down
4890  * @vsi: the VSI being configured
4891  **/
4892 int i40e_up(struct i40e_vsi *vsi)
4893 {
4894 	int err;
4895 
4896 	err = i40e_vsi_configure(vsi);
4897 	if (!err)
4898 		err = i40e_up_complete(vsi);
4899 
4900 	return err;
4901 }
4902 
4903 /**
4904  * i40e_down - Shutdown the connection processing
4905  * @vsi: the VSI being stopped
4906  **/
4907 void i40e_down(struct i40e_vsi *vsi)
4908 {
4909 	int i;
4910 
4911 	/* It is assumed that the caller of this function
4912 	 * sets the vsi->state __I40E_DOWN bit.
4913 	 */
4914 	if (vsi->netdev) {
4915 		netif_carrier_off(vsi->netdev);
4916 		netif_tx_disable(vsi->netdev);
4917 	}
4918 	i40e_vsi_disable_irq(vsi);
4919 	i40e_vsi_control_rings(vsi, false);
4920 	i40e_napi_disable_all(vsi);
4921 
4922 	for (i = 0; i < vsi->num_queue_pairs; i++) {
4923 		i40e_clean_tx_ring(vsi->tx_rings[i]);
4924 		i40e_clean_rx_ring(vsi->rx_rings[i]);
4925 	}
4926 }
4927 
4928 /**
4929  * i40e_setup_tc - configure multiple traffic classes
4930  * @netdev: net device to configure
4931  * @tc: number of traffic classes to enable
4932  **/
4933 #ifdef I40E_FCOE
4934 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4935 #else
4936 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4937 #endif
4938 {
4939 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4940 	struct i40e_vsi *vsi = np->vsi;
4941 	struct i40e_pf *pf = vsi->back;
4942 	u8 enabled_tc = 0;
4943 	int ret = -EINVAL;
4944 	int i;
4945 
4946 	/* Check if DCB enabled to continue */
4947 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4948 		netdev_info(netdev, "DCB is not enabled for adapter\n");
4949 		goto exit;
4950 	}
4951 
4952 	/* Check if MFP enabled */
4953 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4954 		netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4955 		goto exit;
4956 	}
4957 
4958 	/* Check whether tc count is within enabled limit */
4959 	if (tc > i40e_pf_get_num_tc(pf)) {
4960 		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4961 		goto exit;
4962 	}
4963 
4964 	/* Generate TC map for number of tc requested */
4965 	for (i = 0; i < tc; i++)
4966 		enabled_tc |= BIT_ULL(i);
4967 
4968 	/* Requesting same TC configuration as already enabled */
4969 	if (enabled_tc == vsi->tc_config.enabled_tc)
4970 		return 0;
4971 
4972 	/* Quiesce VSI queues */
4973 	i40e_quiesce_vsi(vsi);
4974 
4975 	/* Configure VSI for enabled TCs */
4976 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
4977 	if (ret) {
4978 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4979 			    vsi->seid);
4980 		goto exit;
4981 	}
4982 
4983 	/* Unquiesce VSI */
4984 	i40e_unquiesce_vsi(vsi);
4985 
4986 exit:
4987 	return ret;
4988 }
4989 
4990 /**
4991  * i40e_open - Called when a network interface is made active
4992  * @netdev: network interface device structure
4993  *
4994  * The open entry point is called when a network interface is made
4995  * active by the system (IFF_UP).  At this point all resources needed
4996  * for transmit and receive operations are allocated, the interrupt
4997  * handler is registered with the OS, the netdev watchdog subtask is
4998  * enabled, and the stack is notified that the interface is ready.
4999  *
5000  * Returns 0 on success, negative value on failure
5001  **/
5002 int i40e_open(struct net_device *netdev)
5003 {
5004 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5005 	struct i40e_vsi *vsi = np->vsi;
5006 	struct i40e_pf *pf = vsi->back;
5007 	int err;
5008 
5009 	/* disallow open during test or if eeprom is broken */
5010 	if (test_bit(__I40E_TESTING, &pf->state) ||
5011 	    test_bit(__I40E_BAD_EEPROM, &pf->state))
5012 		return -EBUSY;
5013 
5014 	netif_carrier_off(netdev);
5015 
5016 	err = i40e_vsi_open(vsi);
5017 	if (err)
5018 		return err;
5019 
5020 	/* configure global TSO hardware offload settings */
5021 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5022 						       TCP_FLAG_FIN) >> 16);
5023 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5024 						       TCP_FLAG_FIN |
5025 						       TCP_FLAG_CWR) >> 16);
5026 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5027 
5028 #ifdef CONFIG_I40E_VXLAN
5029 	vxlan_get_rx_port(netdev);
5030 #endif
5031 
5032 	return 0;
5033 }
5034 
5035 /**
5036  * i40e_vsi_open -
5037  * @vsi: the VSI to open
5038  *
5039  * Finish initialization of the VSI.
5040  *
5041  * Returns 0 on success, negative value on failure
5042  **/
5043 int i40e_vsi_open(struct i40e_vsi *vsi)
5044 {
5045 	struct i40e_pf *pf = vsi->back;
5046 	char int_name[I40E_INT_NAME_STR_LEN];
5047 	int err;
5048 
5049 	/* allocate descriptors */
5050 	err = i40e_vsi_setup_tx_resources(vsi);
5051 	if (err)
5052 		goto err_setup_tx;
5053 	err = i40e_vsi_setup_rx_resources(vsi);
5054 	if (err)
5055 		goto err_setup_rx;
5056 
5057 	err = i40e_vsi_configure(vsi);
5058 	if (err)
5059 		goto err_setup_rx;
5060 
5061 	if (vsi->netdev) {
5062 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5063 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5064 		err = i40e_vsi_request_irq(vsi, int_name);
5065 		if (err)
5066 			goto err_setup_rx;
5067 
5068 		/* Notify the stack of the actual queue counts. */
5069 		err = netif_set_real_num_tx_queues(vsi->netdev,
5070 						   vsi->num_queue_pairs);
5071 		if (err)
5072 			goto err_set_queues;
5073 
5074 		err = netif_set_real_num_rx_queues(vsi->netdev,
5075 						   vsi->num_queue_pairs);
5076 		if (err)
5077 			goto err_set_queues;
5078 
5079 	} else if (vsi->type == I40E_VSI_FDIR) {
5080 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5081 			 dev_driver_string(&pf->pdev->dev),
5082 			 dev_name(&pf->pdev->dev));
5083 		err = i40e_vsi_request_irq(vsi, int_name);
5084 
5085 	} else {
5086 		err = -EINVAL;
5087 		goto err_setup_rx;
5088 	}
5089 
5090 	err = i40e_up_complete(vsi);
5091 	if (err)
5092 		goto err_up_complete;
5093 
5094 	return 0;
5095 
5096 err_up_complete:
5097 	i40e_down(vsi);
5098 err_set_queues:
5099 	i40e_vsi_free_irq(vsi);
5100 err_setup_rx:
5101 	i40e_vsi_free_rx_resources(vsi);
5102 err_setup_tx:
5103 	i40e_vsi_free_tx_resources(vsi);
5104 	if (vsi == pf->vsi[pf->lan_vsi])
5105 		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5106 
5107 	return err;
5108 }
5109 
5110 /**
5111  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5112  * @pf: Pointer to PF
5113  *
5114  * This function destroys the hlist where all the Flow Director
5115  * filters were saved.
5116  **/
5117 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5118 {
5119 	struct i40e_fdir_filter *filter;
5120 	struct hlist_node *node2;
5121 
5122 	hlist_for_each_entry_safe(filter, node2,
5123 				  &pf->fdir_filter_list, fdir_node) {
5124 		hlist_del(&filter->fdir_node);
5125 		kfree(filter);
5126 	}
5127 	pf->fdir_pf_active_filters = 0;
5128 }
5129 
5130 /**
5131  * i40e_close - Disables a network interface
5132  * @netdev: network interface device structure
5133  *
5134  * The close entry point is called when an interface is de-activated
5135  * by the OS.  The hardware is still under the driver's control, but
5136  * this netdev interface is disabled.
5137  *
5138  * Returns 0, this is not allowed to fail
5139  **/
5140 #ifdef I40E_FCOE
5141 int i40e_close(struct net_device *netdev)
5142 #else
5143 static int i40e_close(struct net_device *netdev)
5144 #endif
5145 {
5146 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5147 	struct i40e_vsi *vsi = np->vsi;
5148 
5149 	i40e_vsi_close(vsi);
5150 
5151 	return 0;
5152 }
5153 
5154 /**
5155  * i40e_do_reset - Start a PF or Core Reset sequence
5156  * @pf: board private structure
5157  * @reset_flags: which reset is requested
5158  *
5159  * The essential difference in resets is that the PF Reset
5160  * doesn't clear the packet buffers, doesn't reset the PE
5161  * firmware, and doesn't bother the other PFs on the chip.
5162  **/
5163 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5164 {
5165 	u32 val;
5166 
5167 	WARN_ON(in_interrupt());
5168 
5169 	if (i40e_check_asq_alive(&pf->hw))
5170 		i40e_vc_notify_reset(pf);
5171 
5172 	/* do the biggest reset indicated */
5173 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5174 
5175 		/* Request a Global Reset
5176 		 *
5177 		 * This will start the chip's countdown to the actual full
5178 		 * chip reset event, and a warning interrupt to be sent
5179 		 * to all PFs, including the requestor.  Our handler
5180 		 * for the warning interrupt will deal with the shutdown
5181 		 * and recovery of the switch setup.
5182 		 */
5183 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5184 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5185 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5186 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5187 
5188 	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5189 
5190 		/* Request a Core Reset
5191 		 *
5192 		 * Same as Global Reset, except does *not* include the MAC/PHY
5193 		 */
5194 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5195 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5196 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
5197 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5198 		i40e_flush(&pf->hw);
5199 
5200 	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5201 
5202 		/* Request a PF Reset
5203 		 *
5204 		 * Resets only the PF-specific registers
5205 		 *
5206 		 * This goes directly to the tear-down and rebuild of
5207 		 * the switch, since we need to do all the recovery as
5208 		 * for the Core Reset.
5209 		 */
5210 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
5211 		i40e_handle_reset_warning(pf);
5212 
5213 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5214 		int v;
5215 
5216 		/* Find the VSI(s) that requested a re-init */
5217 		dev_info(&pf->pdev->dev,
5218 			 "VSI reinit requested\n");
5219 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5220 			struct i40e_vsi *vsi = pf->vsi[v];
5221 			if (vsi != NULL &&
5222 			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5223 				i40e_vsi_reinit_locked(pf->vsi[v]);
5224 				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5225 			}
5226 		}
5227 
5228 		/* no further action needed, so return now */
5229 		return;
5230 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5231 		int v;
5232 
5233 		/* Find the VSI(s) that needs to be brought down */
5234 		dev_info(&pf->pdev->dev, "VSI down requested\n");
5235 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5236 			struct i40e_vsi *vsi = pf->vsi[v];
5237 			if (vsi != NULL &&
5238 			    test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5239 				set_bit(__I40E_DOWN, &vsi->state);
5240 				i40e_down(vsi);
5241 				clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5242 			}
5243 		}
5244 
5245 		/* no further action needed, so return now */
5246 		return;
5247 	} else {
5248 		dev_info(&pf->pdev->dev,
5249 			 "bad reset request 0x%08x\n", reset_flags);
5250 		return;
5251 	}
5252 }
5253 
5254 #ifdef CONFIG_I40E_DCB
5255 /**
5256  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5257  * @pf: board private structure
5258  * @old_cfg: current DCB config
5259  * @new_cfg: new DCB config
5260  **/
5261 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5262 			    struct i40e_dcbx_config *old_cfg,
5263 			    struct i40e_dcbx_config *new_cfg)
5264 {
5265 	bool need_reconfig = false;
5266 
5267 	/* Check if ETS configuration has changed */
5268 	if (memcmp(&new_cfg->etscfg,
5269 		   &old_cfg->etscfg,
5270 		   sizeof(new_cfg->etscfg))) {
5271 		/* If Priority Table has changed reconfig is needed */
5272 		if (memcmp(&new_cfg->etscfg.prioritytable,
5273 			   &old_cfg->etscfg.prioritytable,
5274 			   sizeof(new_cfg->etscfg.prioritytable))) {
5275 			need_reconfig = true;
5276 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5277 		}
5278 
5279 		if (memcmp(&new_cfg->etscfg.tcbwtable,
5280 			   &old_cfg->etscfg.tcbwtable,
5281 			   sizeof(new_cfg->etscfg.tcbwtable)))
5282 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5283 
5284 		if (memcmp(&new_cfg->etscfg.tsatable,
5285 			   &old_cfg->etscfg.tsatable,
5286 			   sizeof(new_cfg->etscfg.tsatable)))
5287 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5288 	}
5289 
5290 	/* Check if PFC configuration has changed */
5291 	if (memcmp(&new_cfg->pfc,
5292 		   &old_cfg->pfc,
5293 		   sizeof(new_cfg->pfc))) {
5294 		need_reconfig = true;
5295 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5296 	}
5297 
5298 	/* Check if APP Table has changed */
5299 	if (memcmp(&new_cfg->app,
5300 		   &old_cfg->app,
5301 		   sizeof(new_cfg->app))) {
5302 		need_reconfig = true;
5303 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5304 	}
5305 
5306 	dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5307 		need_reconfig);
5308 	return need_reconfig;
5309 }
5310 
5311 /**
5312  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5313  * @pf: board private structure
5314  * @e: event info posted on ARQ
5315  **/
5316 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5317 				  struct i40e_arq_event_info *e)
5318 {
5319 	struct i40e_aqc_lldp_get_mib *mib =
5320 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5321 	struct i40e_hw *hw = &pf->hw;
5322 	struct i40e_dcbx_config tmp_dcbx_cfg;
5323 	bool need_reconfig = false;
5324 	int ret = 0;
5325 	u8 type;
5326 
5327 	/* Not DCB capable or capability disabled */
5328 	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5329 		return ret;
5330 
5331 	/* Ignore if event is not for Nearest Bridge */
5332 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5333 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5334 	dev_dbg(&pf->pdev->dev,
5335 		"%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5336 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5337 		return ret;
5338 
5339 	/* Check MIB Type and return if event for Remote MIB update */
5340 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5341 	dev_dbg(&pf->pdev->dev,
5342 		"%s: LLDP event mib type %s\n", __func__,
5343 		type ? "remote" : "local");
5344 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5345 		/* Update the remote cached instance and return */
5346 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5347 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5348 				&hw->remote_dcbx_config);
5349 		goto exit;
5350 	}
5351 
5352 	/* Store the old configuration */
5353 	tmp_dcbx_cfg = hw->local_dcbx_config;
5354 
5355 	/* Reset the old DCBx configuration data */
5356 	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5357 	/* Get updated DCBX data from firmware */
5358 	ret = i40e_get_dcb_config(&pf->hw);
5359 	if (ret) {
5360 		dev_info(&pf->pdev->dev,
5361 			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5362 			 i40e_stat_str(&pf->hw, ret),
5363 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5364 		goto exit;
5365 	}
5366 
5367 	/* No change detected in DCBX configs */
5368 	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5369 		    sizeof(tmp_dcbx_cfg))) {
5370 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5371 		goto exit;
5372 	}
5373 
5374 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5375 					       &hw->local_dcbx_config);
5376 
5377 	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5378 
5379 	if (!need_reconfig)
5380 		goto exit;
5381 
5382 	/* Enable DCB tagging only when more than one TC */
5383 	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5384 		pf->flags |= I40E_FLAG_DCB_ENABLED;
5385 	else
5386 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5387 
5388 	set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5389 	/* Reconfiguration needed quiesce all VSIs */
5390 	i40e_pf_quiesce_all_vsi(pf);
5391 
5392 	/* Changes in configuration update VEB/VSI */
5393 	i40e_dcb_reconfigure(pf);
5394 
5395 	ret = i40e_resume_port_tx(pf);
5396 
5397 	clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5398 	/* In case of error no point in resuming VSIs */
5399 	if (ret)
5400 		goto exit;
5401 
5402 	/* Wait for the PF's Tx queues to be disabled */
5403 	ret = i40e_pf_wait_txq_disabled(pf);
5404 	if (ret) {
5405 		/* Schedule PF reset to recover */
5406 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5407 		i40e_service_event_schedule(pf);
5408 	} else {
5409 		i40e_pf_unquiesce_all_vsi(pf);
5410 	}
5411 
5412 exit:
5413 	return ret;
5414 }
5415 #endif /* CONFIG_I40E_DCB */
5416 
5417 /**
5418  * i40e_do_reset_safe - Protected reset path for userland calls.
5419  * @pf: board private structure
5420  * @reset_flags: which reset is requested
5421  *
5422  **/
5423 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5424 {
5425 	rtnl_lock();
5426 	i40e_do_reset(pf, reset_flags);
5427 	rtnl_unlock();
5428 }
5429 
5430 /**
5431  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5432  * @pf: board private structure
5433  * @e: event info posted on ARQ
5434  *
5435  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5436  * and VF queues
5437  **/
5438 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5439 					   struct i40e_arq_event_info *e)
5440 {
5441 	struct i40e_aqc_lan_overflow *data =
5442 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5443 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
5444 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5445 	struct i40e_hw *hw = &pf->hw;
5446 	struct i40e_vf *vf;
5447 	u16 vf_id;
5448 
5449 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5450 		queue, qtx_ctl);
5451 
5452 	/* Queue belongs to VF, find the VF and issue VF reset */
5453 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5454 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5455 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5456 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5457 		vf_id -= hw->func_caps.vf_base_id;
5458 		vf = &pf->vf[vf_id];
5459 		i40e_vc_notify_vf_reset(vf);
5460 		/* Allow VF to process pending reset notification */
5461 		msleep(20);
5462 		i40e_reset_vf(vf, false);
5463 	}
5464 }
5465 
5466 /**
5467  * i40e_service_event_complete - Finish up the service event
5468  * @pf: board private structure
5469  **/
5470 static void i40e_service_event_complete(struct i40e_pf *pf)
5471 {
5472 	BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5473 
5474 	/* flush memory to make sure state is correct before next watchog */
5475 	smp_mb__before_atomic();
5476 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5477 }
5478 
5479 /**
5480  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5481  * @pf: board private structure
5482  **/
5483 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5484 {
5485 	u32 val, fcnt_prog;
5486 
5487 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5488 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5489 	return fcnt_prog;
5490 }
5491 
5492 /**
5493  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5494  * @pf: board private structure
5495  **/
5496 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5497 {
5498 	u32 val, fcnt_prog;
5499 
5500 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5501 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5502 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5503 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5504 	return fcnt_prog;
5505 }
5506 
5507 /**
5508  * i40e_get_global_fd_count - Get total FD filters programmed on device
5509  * @pf: board private structure
5510  **/
5511 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5512 {
5513 	u32 val, fcnt_prog;
5514 
5515 	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5516 	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5517 		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5518 		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5519 	return fcnt_prog;
5520 }
5521 
5522 /**
5523  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5524  * @pf: board private structure
5525  **/
5526 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5527 {
5528 	u32 fcnt_prog, fcnt_avail;
5529 
5530 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5531 		return;
5532 
5533 	/* Check if, FD SB or ATR was auto disabled and if there is enough room
5534 	 * to re-enable
5535 	 */
5536 	fcnt_prog = i40e_get_global_fd_count(pf);
5537 	fcnt_avail = pf->fdir_pf_filter_count;
5538 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5539 	    (pf->fd_add_err == 0) ||
5540 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5541 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5542 		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5543 			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5544 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5545 				dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5546 		}
5547 	}
5548 	/* Wait for some more space to be available to turn on ATR */
5549 	if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5550 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5551 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5552 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5553 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5554 				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5555 		}
5556 	}
5557 }
5558 
5559 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5560 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5561 /**
5562  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5563  * @pf: board private structure
5564  **/
5565 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5566 {
5567 	unsigned long min_flush_time;
5568 	int flush_wait_retry = 50;
5569 	bool disable_atr = false;
5570 	int fd_room;
5571 	int reg;
5572 
5573 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5574 		return;
5575 
5576 	if (time_after(jiffies, pf->fd_flush_timestamp +
5577 				(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5578 		/* If the flush is happening too quick and we have mostly
5579 		 * SB rules we should not re-enable ATR for some time.
5580 		 */
5581 		min_flush_time = pf->fd_flush_timestamp
5582 				+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5583 		fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5584 
5585 		if (!(time_after(jiffies, min_flush_time)) &&
5586 		    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5587 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5588 				dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5589 			disable_atr = true;
5590 		}
5591 
5592 		pf->fd_flush_timestamp = jiffies;
5593 		pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5594 		/* flush all filters */
5595 		wr32(&pf->hw, I40E_PFQF_CTL_1,
5596 		     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5597 		i40e_flush(&pf->hw);
5598 		pf->fd_flush_cnt++;
5599 		pf->fd_add_err = 0;
5600 		do {
5601 			/* Check FD flush status every 5-6msec */
5602 			usleep_range(5000, 6000);
5603 			reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5604 			if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5605 				break;
5606 		} while (flush_wait_retry--);
5607 		if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5608 			dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5609 		} else {
5610 			/* replay sideband filters */
5611 			i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5612 			if (!disable_atr)
5613 				pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5614 			clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5615 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5616 				dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5617 		}
5618 	}
5619 }
5620 
5621 /**
5622  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5623  * @pf: board private structure
5624  **/
5625 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5626 {
5627 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5628 }
5629 
5630 /* We can see up to 256 filter programming desc in transit if the filters are
5631  * being applied really fast; before we see the first
5632  * filter miss error on Rx queue 0. Accumulating enough error messages before
5633  * reacting will make sure we don't cause flush too often.
5634  */
5635 #define I40E_MAX_FD_PROGRAM_ERROR 256
5636 
5637 /**
5638  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5639  * @pf: board private structure
5640  **/
5641 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5642 {
5643 
5644 	/* if interface is down do nothing */
5645 	if (test_bit(__I40E_DOWN, &pf->state))
5646 		return;
5647 
5648 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5649 		return;
5650 
5651 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5652 		i40e_fdir_flush_and_replay(pf);
5653 
5654 	i40e_fdir_check_and_reenable(pf);
5655 
5656 }
5657 
5658 /**
5659  * i40e_vsi_link_event - notify VSI of a link event
5660  * @vsi: vsi to be notified
5661  * @link_up: link up or down
5662  **/
5663 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5664 {
5665 	if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5666 		return;
5667 
5668 	switch (vsi->type) {
5669 	case I40E_VSI_MAIN:
5670 #ifdef I40E_FCOE
5671 	case I40E_VSI_FCOE:
5672 #endif
5673 		if (!vsi->netdev || !vsi->netdev_registered)
5674 			break;
5675 
5676 		if (link_up) {
5677 			netif_carrier_on(vsi->netdev);
5678 			netif_tx_wake_all_queues(vsi->netdev);
5679 		} else {
5680 			netif_carrier_off(vsi->netdev);
5681 			netif_tx_stop_all_queues(vsi->netdev);
5682 		}
5683 		break;
5684 
5685 	case I40E_VSI_SRIOV:
5686 	case I40E_VSI_VMDQ2:
5687 	case I40E_VSI_CTRL:
5688 	case I40E_VSI_MIRROR:
5689 	default:
5690 		/* there is no notification for other VSIs */
5691 		break;
5692 	}
5693 }
5694 
5695 /**
5696  * i40e_veb_link_event - notify elements on the veb of a link event
5697  * @veb: veb to be notified
5698  * @link_up: link up or down
5699  **/
5700 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5701 {
5702 	struct i40e_pf *pf;
5703 	int i;
5704 
5705 	if (!veb || !veb->pf)
5706 		return;
5707 	pf = veb->pf;
5708 
5709 	/* depth first... */
5710 	for (i = 0; i < I40E_MAX_VEB; i++)
5711 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5712 			i40e_veb_link_event(pf->veb[i], link_up);
5713 
5714 	/* ... now the local VSIs */
5715 	for (i = 0; i < pf->num_alloc_vsi; i++)
5716 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5717 			i40e_vsi_link_event(pf->vsi[i], link_up);
5718 }
5719 
5720 /**
5721  * i40e_link_event - Update netif_carrier status
5722  * @pf: board private structure
5723  **/
5724 static void i40e_link_event(struct i40e_pf *pf)
5725 {
5726 	bool new_link, old_link;
5727 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5728 	u8 new_link_speed, old_link_speed;
5729 
5730 	/* set this to force the get_link_status call to refresh state */
5731 	pf->hw.phy.get_link_info = true;
5732 
5733 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5734 	new_link = i40e_get_link_status(&pf->hw);
5735 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
5736 	new_link_speed = pf->hw.phy.link_info.link_speed;
5737 
5738 	if (new_link == old_link &&
5739 	    new_link_speed == old_link_speed &&
5740 	    (test_bit(__I40E_DOWN, &vsi->state) ||
5741 	     new_link == netif_carrier_ok(vsi->netdev)))
5742 		return;
5743 
5744 	if (!test_bit(__I40E_DOWN, &vsi->state))
5745 		i40e_print_link_message(vsi, new_link);
5746 
5747 	/* Notify the base of the switch tree connected to
5748 	 * the link.  Floating VEBs are not notified.
5749 	 */
5750 	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5751 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5752 	else
5753 		i40e_vsi_link_event(vsi, new_link);
5754 
5755 	if (pf->vf)
5756 		i40e_vc_notify_link_state(pf);
5757 
5758 	if (pf->flags & I40E_FLAG_PTP)
5759 		i40e_ptp_set_increment(pf);
5760 }
5761 
5762 /**
5763  * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5764  * @pf: board private structure
5765  *
5766  * Set the per-queue flags to request a check for stuck queues in the irq
5767  * clean functions, then force interrupts to be sure the irq clean is called.
5768  **/
5769 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5770 {
5771 	int i, v;
5772 
5773 	/* If we're down or resetting, just bail */
5774 	if (test_bit(__I40E_DOWN, &pf->state) ||
5775 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
5776 		return;
5777 
5778 	/* for each VSI/netdev
5779 	 *     for each Tx queue
5780 	 *         set the check flag
5781 	 *     for each q_vector
5782 	 *         force an interrupt
5783 	 */
5784 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5785 		struct i40e_vsi *vsi = pf->vsi[v];
5786 		int armed = 0;
5787 
5788 		if (!pf->vsi[v] ||
5789 		    test_bit(__I40E_DOWN, &vsi->state) ||
5790 		    (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5791 			continue;
5792 
5793 		for (i = 0; i < vsi->num_queue_pairs; i++) {
5794 			set_check_for_tx_hang(vsi->tx_rings[i]);
5795 			if (test_bit(__I40E_HANG_CHECK_ARMED,
5796 				     &vsi->tx_rings[i]->state))
5797 				armed++;
5798 		}
5799 
5800 		if (armed) {
5801 			if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5802 				wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5803 				     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5804 				      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5805 				      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5806 				      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5807 				      I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5808 			} else {
5809 				u16 vec = vsi->base_vector - 1;
5810 				u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5811 				      I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5812 				      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5813 				      I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5814 				      I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5815 				for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5816 					wr32(&vsi->back->hw,
5817 					     I40E_PFINT_DYN_CTLN(vec), val);
5818 			}
5819 			i40e_flush(&vsi->back->hw);
5820 		}
5821 	}
5822 }
5823 
5824 /**
5825  * i40e_watchdog_subtask - periodic checks not using event driven response
5826  * @pf: board private structure
5827  **/
5828 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5829 {
5830 	int i;
5831 
5832 	/* if interface is down do nothing */
5833 	if (test_bit(__I40E_DOWN, &pf->state) ||
5834 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
5835 		return;
5836 
5837 	/* make sure we don't do these things too often */
5838 	if (time_before(jiffies, (pf->service_timer_previous +
5839 				  pf->service_timer_period)))
5840 		return;
5841 	pf->service_timer_previous = jiffies;
5842 
5843 	i40e_check_hang_subtask(pf);
5844 	i40e_link_event(pf);
5845 
5846 	/* Update the stats for active netdevs so the network stack
5847 	 * can look at updated numbers whenever it cares to
5848 	 */
5849 	for (i = 0; i < pf->num_alloc_vsi; i++)
5850 		if (pf->vsi[i] && pf->vsi[i]->netdev)
5851 			i40e_update_stats(pf->vsi[i]);
5852 
5853 	/* Update the stats for the active switching components */
5854 	for (i = 0; i < I40E_MAX_VEB; i++)
5855 		if (pf->veb[i])
5856 			i40e_update_veb_stats(pf->veb[i]);
5857 
5858 	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5859 }
5860 
5861 /**
5862  * i40e_reset_subtask - Set up for resetting the device and driver
5863  * @pf: board private structure
5864  **/
5865 static void i40e_reset_subtask(struct i40e_pf *pf)
5866 {
5867 	u32 reset_flags = 0;
5868 
5869 	rtnl_lock();
5870 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5871 		reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
5872 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5873 	}
5874 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5875 		reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
5876 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5877 	}
5878 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5879 		reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
5880 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5881 	}
5882 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5883 		reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
5884 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5885 	}
5886 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5887 		reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
5888 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5889 	}
5890 
5891 	/* If there's a recovery already waiting, it takes
5892 	 * precedence before starting a new reset sequence.
5893 	 */
5894 	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5895 		i40e_handle_reset_warning(pf);
5896 		goto unlock;
5897 	}
5898 
5899 	/* If we're already down or resetting, just bail */
5900 	if (reset_flags &&
5901 	    !test_bit(__I40E_DOWN, &pf->state) &&
5902 	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5903 		i40e_do_reset(pf, reset_flags);
5904 
5905 unlock:
5906 	rtnl_unlock();
5907 }
5908 
5909 /**
5910  * i40e_handle_link_event - Handle link event
5911  * @pf: board private structure
5912  * @e: event info posted on ARQ
5913  **/
5914 static void i40e_handle_link_event(struct i40e_pf *pf,
5915 				   struct i40e_arq_event_info *e)
5916 {
5917 	struct i40e_hw *hw = &pf->hw;
5918 	struct i40e_aqc_get_link_status *status =
5919 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5920 
5921 	/* save off old link status information */
5922 	hw->phy.link_info_old = hw->phy.link_info;
5923 
5924 	/* Do a new status request to re-enable LSE reporting
5925 	 * and load new status information into the hw struct
5926 	 * This completely ignores any state information
5927 	 * in the ARQ event info, instead choosing to always
5928 	 * issue the AQ update link status command.
5929 	 */
5930 	i40e_link_event(pf);
5931 
5932 	/* check for unqualified module, if link is down */
5933 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5934 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5935 	    (!(status->link_info & I40E_AQ_LINK_UP)))
5936 		dev_err(&pf->pdev->dev,
5937 			"The driver failed to link because an unqualified module was detected.\n");
5938 }
5939 
5940 /**
5941  * i40e_clean_adminq_subtask - Clean the AdminQ rings
5942  * @pf: board private structure
5943  **/
5944 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5945 {
5946 	struct i40e_arq_event_info event;
5947 	struct i40e_hw *hw = &pf->hw;
5948 	u16 pending, i = 0;
5949 	i40e_status ret;
5950 	u16 opcode;
5951 	u32 oldval;
5952 	u32 val;
5953 
5954 	/* Do not run clean AQ when PF reset fails */
5955 	if (test_bit(__I40E_RESET_FAILED, &pf->state))
5956 		return;
5957 
5958 	/* check for error indications */
5959 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
5960 	oldval = val;
5961 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5962 		dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5963 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5964 	}
5965 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5966 		dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5967 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5968 	}
5969 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5970 		dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5971 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5972 	}
5973 	if (oldval != val)
5974 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
5975 
5976 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
5977 	oldval = val;
5978 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5979 		dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5980 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5981 	}
5982 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5983 		dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5984 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5985 	}
5986 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5987 		dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5988 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5989 	}
5990 	if (oldval != val)
5991 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
5992 
5993 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5994 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5995 	if (!event.msg_buf)
5996 		return;
5997 
5998 	do {
5999 		ret = i40e_clean_arq_element(hw, &event, &pending);
6000 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6001 			break;
6002 		else if (ret) {
6003 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6004 			break;
6005 		}
6006 
6007 		opcode = le16_to_cpu(event.desc.opcode);
6008 		switch (opcode) {
6009 
6010 		case i40e_aqc_opc_get_link_status:
6011 			i40e_handle_link_event(pf, &event);
6012 			break;
6013 		case i40e_aqc_opc_send_msg_to_pf:
6014 			ret = i40e_vc_process_vf_msg(pf,
6015 					le16_to_cpu(event.desc.retval),
6016 					le32_to_cpu(event.desc.cookie_high),
6017 					le32_to_cpu(event.desc.cookie_low),
6018 					event.msg_buf,
6019 					event.msg_len);
6020 			break;
6021 		case i40e_aqc_opc_lldp_update_mib:
6022 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6023 #ifdef CONFIG_I40E_DCB
6024 			rtnl_lock();
6025 			ret = i40e_handle_lldp_event(pf, &event);
6026 			rtnl_unlock();
6027 #endif /* CONFIG_I40E_DCB */
6028 			break;
6029 		case i40e_aqc_opc_event_lan_overflow:
6030 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6031 			i40e_handle_lan_overflow_event(pf, &event);
6032 			break;
6033 		case i40e_aqc_opc_send_msg_to_peer:
6034 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6035 			break;
6036 		case i40e_aqc_opc_nvm_erase:
6037 		case i40e_aqc_opc_nvm_update:
6038 			i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
6039 			break;
6040 		default:
6041 			dev_info(&pf->pdev->dev,
6042 				 "ARQ Error: Unknown event 0x%04x received\n",
6043 				 opcode);
6044 			break;
6045 		}
6046 	} while (pending && (i++ < pf->adminq_work_limit));
6047 
6048 	clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6049 	/* re-enable Admin queue interrupt cause */
6050 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
6051 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6052 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
6053 	i40e_flush(hw);
6054 
6055 	kfree(event.msg_buf);
6056 }
6057 
6058 /**
6059  * i40e_verify_eeprom - make sure eeprom is good to use
6060  * @pf: board private structure
6061  **/
6062 static void i40e_verify_eeprom(struct i40e_pf *pf)
6063 {
6064 	int err;
6065 
6066 	err = i40e_diag_eeprom_test(&pf->hw);
6067 	if (err) {
6068 		/* retry in case of garbage read */
6069 		err = i40e_diag_eeprom_test(&pf->hw);
6070 		if (err) {
6071 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6072 				 err);
6073 			set_bit(__I40E_BAD_EEPROM, &pf->state);
6074 		}
6075 	}
6076 
6077 	if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6078 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6079 		clear_bit(__I40E_BAD_EEPROM, &pf->state);
6080 	}
6081 }
6082 
6083 /**
6084  * i40e_enable_pf_switch_lb
6085  * @pf: pointer to the PF structure
6086  *
6087  * enable switch loop back or die - no point in a return value
6088  **/
6089 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6090 {
6091 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6092 	struct i40e_vsi_context ctxt;
6093 	int ret;
6094 
6095 	ctxt.seid = pf->main_vsi_seid;
6096 	ctxt.pf_num = pf->hw.pf_id;
6097 	ctxt.vf_num = 0;
6098 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6099 	if (ret) {
6100 		dev_info(&pf->pdev->dev,
6101 			 "couldn't get PF vsi config, err %s aq_err %s\n",
6102 			 i40e_stat_str(&pf->hw, ret),
6103 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6104 		return;
6105 	}
6106 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6107 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6108 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6109 
6110 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6111 	if (ret) {
6112 		dev_info(&pf->pdev->dev,
6113 			 "update vsi switch failed, err %s aq_err %s\n",
6114 			 i40e_stat_str(&pf->hw, ret),
6115 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6116 	}
6117 }
6118 
6119 /**
6120  * i40e_disable_pf_switch_lb
6121  * @pf: pointer to the PF structure
6122  *
6123  * disable switch loop back or die - no point in a return value
6124  **/
6125 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6126 {
6127 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6128 	struct i40e_vsi_context ctxt;
6129 	int ret;
6130 
6131 	ctxt.seid = pf->main_vsi_seid;
6132 	ctxt.pf_num = pf->hw.pf_id;
6133 	ctxt.vf_num = 0;
6134 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6135 	if (ret) {
6136 		dev_info(&pf->pdev->dev,
6137 			 "couldn't get PF vsi config, err %s aq_err %s\n",
6138 			 i40e_stat_str(&pf->hw, ret),
6139 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6140 		return;
6141 	}
6142 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6143 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6144 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6145 
6146 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6147 	if (ret) {
6148 		dev_info(&pf->pdev->dev,
6149 			 "update vsi switch failed, err %s aq_err %s\n",
6150 			 i40e_stat_str(&pf->hw, ret),
6151 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6152 	}
6153 }
6154 
6155 /**
6156  * i40e_config_bridge_mode - Configure the HW bridge mode
6157  * @veb: pointer to the bridge instance
6158  *
6159  * Configure the loop back mode for the LAN VSI that is downlink to the
6160  * specified HW bridge instance. It is expected this function is called
6161  * when a new HW bridge is instantiated.
6162  **/
6163 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6164 {
6165 	struct i40e_pf *pf = veb->pf;
6166 
6167 	dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6168 		 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6169 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6170 		i40e_disable_pf_switch_lb(pf);
6171 	else
6172 		i40e_enable_pf_switch_lb(pf);
6173 }
6174 
6175 /**
6176  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6177  * @veb: pointer to the VEB instance
6178  *
6179  * This is a recursive function that first builds the attached VSIs then
6180  * recurses in to build the next layer of VEB.  We track the connections
6181  * through our own index numbers because the seid's from the HW could
6182  * change across the reset.
6183  **/
6184 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6185 {
6186 	struct i40e_vsi *ctl_vsi = NULL;
6187 	struct i40e_pf *pf = veb->pf;
6188 	int v, veb_idx;
6189 	int ret;
6190 
6191 	/* build VSI that owns this VEB, temporarily attached to base VEB */
6192 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6193 		if (pf->vsi[v] &&
6194 		    pf->vsi[v]->veb_idx == veb->idx &&
6195 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6196 			ctl_vsi = pf->vsi[v];
6197 			break;
6198 		}
6199 	}
6200 	if (!ctl_vsi) {
6201 		dev_info(&pf->pdev->dev,
6202 			 "missing owner VSI for veb_idx %d\n", veb->idx);
6203 		ret = -ENOENT;
6204 		goto end_reconstitute;
6205 	}
6206 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
6207 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6208 	ret = i40e_add_vsi(ctl_vsi);
6209 	if (ret) {
6210 		dev_info(&pf->pdev->dev,
6211 			 "rebuild of veb_idx %d owner VSI failed: %d\n",
6212 			 veb->idx, ret);
6213 		goto end_reconstitute;
6214 	}
6215 	i40e_vsi_reset_stats(ctl_vsi);
6216 
6217 	/* create the VEB in the switch and move the VSI onto the VEB */
6218 	ret = i40e_add_veb(veb, ctl_vsi);
6219 	if (ret)
6220 		goto end_reconstitute;
6221 
6222 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6223 		veb->bridge_mode = BRIDGE_MODE_VEB;
6224 	else
6225 		veb->bridge_mode = BRIDGE_MODE_VEPA;
6226 	i40e_config_bridge_mode(veb);
6227 
6228 	/* create the remaining VSIs attached to this VEB */
6229 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6230 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6231 			continue;
6232 
6233 		if (pf->vsi[v]->veb_idx == veb->idx) {
6234 			struct i40e_vsi *vsi = pf->vsi[v];
6235 			vsi->uplink_seid = veb->seid;
6236 			ret = i40e_add_vsi(vsi);
6237 			if (ret) {
6238 				dev_info(&pf->pdev->dev,
6239 					 "rebuild of vsi_idx %d failed: %d\n",
6240 					 v, ret);
6241 				goto end_reconstitute;
6242 			}
6243 			i40e_vsi_reset_stats(vsi);
6244 		}
6245 	}
6246 
6247 	/* create any VEBs attached to this VEB - RECURSION */
6248 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6249 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6250 			pf->veb[veb_idx]->uplink_seid = veb->seid;
6251 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6252 			if (ret)
6253 				break;
6254 		}
6255 	}
6256 
6257 end_reconstitute:
6258 	return ret;
6259 }
6260 
6261 /**
6262  * i40e_get_capabilities - get info about the HW
6263  * @pf: the PF struct
6264  **/
6265 static int i40e_get_capabilities(struct i40e_pf *pf)
6266 {
6267 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6268 	u16 data_size;
6269 	int buf_len;
6270 	int err;
6271 
6272 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6273 	do {
6274 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
6275 		if (!cap_buf)
6276 			return -ENOMEM;
6277 
6278 		/* this loads the data into the hw struct for us */
6279 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6280 					    &data_size,
6281 					    i40e_aqc_opc_list_func_capabilities,
6282 					    NULL);
6283 		/* data loaded, buffer no longer needed */
6284 		kfree(cap_buf);
6285 
6286 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6287 			/* retry with a larger buffer */
6288 			buf_len = data_size;
6289 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6290 			dev_info(&pf->pdev->dev,
6291 				 "capability discovery failed, err %s aq_err %s\n",
6292 				 i40e_stat_str(&pf->hw, err),
6293 				 i40e_aq_str(&pf->hw,
6294 					     pf->hw.aq.asq_last_status));
6295 			return -ENODEV;
6296 		}
6297 	} while (err);
6298 
6299 	if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6300 	    (pf->hw.aq.fw_maj_ver < 2)) {
6301 		pf->hw.func_caps.num_msix_vectors++;
6302 		pf->hw.func_caps.num_msix_vectors_vf++;
6303 	}
6304 
6305 	if (pf->hw.debug_mask & I40E_DEBUG_USER)
6306 		dev_info(&pf->pdev->dev,
6307 			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6308 			 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6309 			 pf->hw.func_caps.num_msix_vectors,
6310 			 pf->hw.func_caps.num_msix_vectors_vf,
6311 			 pf->hw.func_caps.fd_filters_guaranteed,
6312 			 pf->hw.func_caps.fd_filters_best_effort,
6313 			 pf->hw.func_caps.num_tx_qp,
6314 			 pf->hw.func_caps.num_vsis);
6315 
6316 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6317 		       + pf->hw.func_caps.num_vfs)
6318 	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6319 		dev_info(&pf->pdev->dev,
6320 			 "got num_vsis %d, setting num_vsis to %d\n",
6321 			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6322 		pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6323 	}
6324 
6325 	return 0;
6326 }
6327 
6328 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6329 
6330 /**
6331  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6332  * @pf: board private structure
6333  **/
6334 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6335 {
6336 	struct i40e_vsi *vsi;
6337 	int i;
6338 
6339 	/* quick workaround for an NVM issue that leaves a critical register
6340 	 * uninitialized
6341 	 */
6342 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6343 		static const u32 hkey[] = {
6344 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6345 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6346 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6347 			0x95b3a76d};
6348 
6349 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6350 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6351 	}
6352 
6353 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6354 		return;
6355 
6356 	/* find existing VSI and see if it needs configuring */
6357 	vsi = NULL;
6358 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6359 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6360 			vsi = pf->vsi[i];
6361 			break;
6362 		}
6363 	}
6364 
6365 	/* create a new VSI if none exists */
6366 	if (!vsi) {
6367 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6368 				     pf->vsi[pf->lan_vsi]->seid, 0);
6369 		if (!vsi) {
6370 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6371 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6372 			return;
6373 		}
6374 	}
6375 
6376 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6377 }
6378 
6379 /**
6380  * i40e_fdir_teardown - release the Flow Director resources
6381  * @pf: board private structure
6382  **/
6383 static void i40e_fdir_teardown(struct i40e_pf *pf)
6384 {
6385 	int i;
6386 
6387 	i40e_fdir_filter_exit(pf);
6388 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6389 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6390 			i40e_vsi_release(pf->vsi[i]);
6391 			break;
6392 		}
6393 	}
6394 }
6395 
6396 /**
6397  * i40e_prep_for_reset - prep for the core to reset
6398  * @pf: board private structure
6399  *
6400  * Close up the VFs and other things in prep for PF Reset.
6401   **/
6402 static void i40e_prep_for_reset(struct i40e_pf *pf)
6403 {
6404 	struct i40e_hw *hw = &pf->hw;
6405 	i40e_status ret = 0;
6406 	u32 v;
6407 
6408 	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6409 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6410 		return;
6411 
6412 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6413 
6414 	/* quiesce the VSIs and their queues that are not already DOWN */
6415 	i40e_pf_quiesce_all_vsi(pf);
6416 
6417 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6418 		if (pf->vsi[v])
6419 			pf->vsi[v]->seid = 0;
6420 	}
6421 
6422 	i40e_shutdown_adminq(&pf->hw);
6423 
6424 	/* call shutdown HMC */
6425 	if (hw->hmc.hmc_obj) {
6426 		ret = i40e_shutdown_lan_hmc(hw);
6427 		if (ret)
6428 			dev_warn(&pf->pdev->dev,
6429 				 "shutdown_lan_hmc failed: %d\n", ret);
6430 	}
6431 }
6432 
6433 /**
6434  * i40e_send_version - update firmware with driver version
6435  * @pf: PF struct
6436  */
6437 static void i40e_send_version(struct i40e_pf *pf)
6438 {
6439 	struct i40e_driver_version dv;
6440 
6441 	dv.major_version = DRV_VERSION_MAJOR;
6442 	dv.minor_version = DRV_VERSION_MINOR;
6443 	dv.build_version = DRV_VERSION_BUILD;
6444 	dv.subbuild_version = 0;
6445 	strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6446 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6447 }
6448 
6449 /**
6450  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6451  * @pf: board private structure
6452  * @reinit: if the Main VSI needs to re-initialized.
6453  **/
6454 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6455 {
6456 	struct i40e_hw *hw = &pf->hw;
6457 	u8 set_fc_aq_fail = 0;
6458 	i40e_status ret;
6459 	u32 v;
6460 
6461 	/* Now we wait for GRST to settle out.
6462 	 * We don't have to delete the VEBs or VSIs from the hw switch
6463 	 * because the reset will make them disappear.
6464 	 */
6465 	ret = i40e_pf_reset(hw);
6466 	if (ret) {
6467 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6468 		set_bit(__I40E_RESET_FAILED, &pf->state);
6469 		goto clear_recovery;
6470 	}
6471 	pf->pfr_count++;
6472 
6473 	if (test_bit(__I40E_DOWN, &pf->state))
6474 		goto clear_recovery;
6475 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6476 
6477 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6478 	ret = i40e_init_adminq(&pf->hw);
6479 	if (ret) {
6480 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6481 			 i40e_stat_str(&pf->hw, ret),
6482 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6483 		goto clear_recovery;
6484 	}
6485 
6486 	/* re-verify the eeprom if we just had an EMP reset */
6487 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6488 		i40e_verify_eeprom(pf);
6489 
6490 	i40e_clear_pxe_mode(hw);
6491 	ret = i40e_get_capabilities(pf);
6492 	if (ret)
6493 		goto end_core_reset;
6494 
6495 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6496 				hw->func_caps.num_rx_qp,
6497 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6498 	if (ret) {
6499 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6500 		goto end_core_reset;
6501 	}
6502 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6503 	if (ret) {
6504 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6505 		goto end_core_reset;
6506 	}
6507 
6508 #ifdef CONFIG_I40E_DCB
6509 	ret = i40e_init_pf_dcb(pf);
6510 	if (ret) {
6511 		dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6512 		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6513 		/* Continue without DCB enabled */
6514 	}
6515 #endif /* CONFIG_I40E_DCB */
6516 #ifdef I40E_FCOE
6517 	ret = i40e_init_pf_fcoe(pf);
6518 	if (ret)
6519 		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6520 
6521 #endif
6522 	/* do basic switch setup */
6523 	ret = i40e_setup_pf_switch(pf, reinit);
6524 	if (ret)
6525 		goto end_core_reset;
6526 
6527 	/* driver is only interested in link up/down and module qualification
6528 	 * reports from firmware
6529 	 */
6530 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
6531 				       I40E_AQ_EVENT_LINK_UPDOWN |
6532 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6533 	if (ret)
6534 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6535 			 i40e_stat_str(&pf->hw, ret),
6536 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6537 
6538 	/* make sure our flow control settings are restored */
6539 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6540 	if (ret)
6541 		dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
6542 			 i40e_stat_str(&pf->hw, ret),
6543 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6544 
6545 	/* Rebuild the VSIs and VEBs that existed before reset.
6546 	 * They are still in our local switch element arrays, so only
6547 	 * need to rebuild the switch model in the HW.
6548 	 *
6549 	 * If there were VEBs but the reconstitution failed, we'll try
6550 	 * try to recover minimal use by getting the basic PF VSI working.
6551 	 */
6552 	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6553 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6554 		/* find the one VEB connected to the MAC, and find orphans */
6555 		for (v = 0; v < I40E_MAX_VEB; v++) {
6556 			if (!pf->veb[v])
6557 				continue;
6558 
6559 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6560 			    pf->veb[v]->uplink_seid == 0) {
6561 				ret = i40e_reconstitute_veb(pf->veb[v]);
6562 
6563 				if (!ret)
6564 					continue;
6565 
6566 				/* If Main VEB failed, we're in deep doodoo,
6567 				 * so give up rebuilding the switch and set up
6568 				 * for minimal rebuild of PF VSI.
6569 				 * If orphan failed, we'll report the error
6570 				 * but try to keep going.
6571 				 */
6572 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6573 					dev_info(&pf->pdev->dev,
6574 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6575 						 ret);
6576 					pf->vsi[pf->lan_vsi]->uplink_seid
6577 								= pf->mac_seid;
6578 					break;
6579 				} else if (pf->veb[v]->uplink_seid == 0) {
6580 					dev_info(&pf->pdev->dev,
6581 						 "rebuild of orphan VEB failed: %d\n",
6582 						 ret);
6583 				}
6584 			}
6585 		}
6586 	}
6587 
6588 	if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6589 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6590 		/* no VEB, so rebuild only the Main VSI */
6591 		ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6592 		if (ret) {
6593 			dev_info(&pf->pdev->dev,
6594 				 "rebuild of Main VSI failed: %d\n", ret);
6595 			goto end_core_reset;
6596 		}
6597 	}
6598 
6599 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6600 	    (pf->hw.aq.fw_maj_ver < 4)) {
6601 		msleep(75);
6602 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6603 		if (ret)
6604 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6605 				 i40e_stat_str(&pf->hw, ret),
6606 				 i40e_aq_str(&pf->hw,
6607 					     pf->hw.aq.asq_last_status));
6608 	}
6609 	/* reinit the misc interrupt */
6610 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6611 		ret = i40e_setup_misc_vector(pf);
6612 
6613 	/* restart the VSIs that were rebuilt and running before the reset */
6614 	i40e_pf_unquiesce_all_vsi(pf);
6615 
6616 	if (pf->num_alloc_vfs) {
6617 		for (v = 0; v < pf->num_alloc_vfs; v++)
6618 			i40e_reset_vf(&pf->vf[v], true);
6619 	}
6620 
6621 	/* tell the firmware that we're starting */
6622 	i40e_send_version(pf);
6623 
6624 end_core_reset:
6625 	clear_bit(__I40E_RESET_FAILED, &pf->state);
6626 clear_recovery:
6627 	clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6628 }
6629 
6630 /**
6631  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6632  * @pf: board private structure
6633  *
6634  * Close up the VFs and other things in prep for a Core Reset,
6635  * then get ready to rebuild the world.
6636  **/
6637 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6638 {
6639 	i40e_prep_for_reset(pf);
6640 	i40e_reset_and_rebuild(pf, false);
6641 }
6642 
6643 /**
6644  * i40e_handle_mdd_event
6645  * @pf: pointer to the PF structure
6646  *
6647  * Called from the MDD irq handler to identify possibly malicious vfs
6648  **/
6649 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6650 {
6651 	struct i40e_hw *hw = &pf->hw;
6652 	bool mdd_detected = false;
6653 	bool pf_mdd_detected = false;
6654 	struct i40e_vf *vf;
6655 	u32 reg;
6656 	int i;
6657 
6658 	if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6659 		return;
6660 
6661 	/* find what triggered the MDD event */
6662 	reg = rd32(hw, I40E_GL_MDET_TX);
6663 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6664 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6665 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
6666 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6667 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
6668 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6669 				I40E_GL_MDET_TX_EVENT_SHIFT;
6670 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6671 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
6672 				pf->hw.func_caps.base_queue;
6673 		if (netif_msg_tx_err(pf))
6674 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6675 				 event, queue, pf_num, vf_num);
6676 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6677 		mdd_detected = true;
6678 	}
6679 	reg = rd32(hw, I40E_GL_MDET_RX);
6680 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6681 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6682 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
6683 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6684 				I40E_GL_MDET_RX_EVENT_SHIFT;
6685 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6686 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
6687 				pf->hw.func_caps.base_queue;
6688 		if (netif_msg_rx_err(pf))
6689 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6690 				 event, queue, func);
6691 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6692 		mdd_detected = true;
6693 	}
6694 
6695 	if (mdd_detected) {
6696 		reg = rd32(hw, I40E_PF_MDET_TX);
6697 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6698 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6699 			dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6700 			pf_mdd_detected = true;
6701 		}
6702 		reg = rd32(hw, I40E_PF_MDET_RX);
6703 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6704 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6705 			dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6706 			pf_mdd_detected = true;
6707 		}
6708 		/* Queue belongs to the PF, initiate a reset */
6709 		if (pf_mdd_detected) {
6710 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6711 			i40e_service_event_schedule(pf);
6712 		}
6713 	}
6714 
6715 	/* see if one of the VFs needs its hand slapped */
6716 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6717 		vf = &(pf->vf[i]);
6718 		reg = rd32(hw, I40E_VP_MDET_TX(i));
6719 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6720 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6721 			vf->num_mdd_events++;
6722 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6723 				 i);
6724 		}
6725 
6726 		reg = rd32(hw, I40E_VP_MDET_RX(i));
6727 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6728 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6729 			vf->num_mdd_events++;
6730 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6731 				 i);
6732 		}
6733 
6734 		if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6735 			dev_info(&pf->pdev->dev,
6736 				 "Too many MDD events on VF %d, disabled\n", i);
6737 			dev_info(&pf->pdev->dev,
6738 				 "Use PF Control I/F to re-enable the VF\n");
6739 			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6740 		}
6741 	}
6742 
6743 	/* re-enable mdd interrupt cause */
6744 	clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6745 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6746 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6747 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6748 	i40e_flush(hw);
6749 }
6750 
6751 #ifdef CONFIG_I40E_VXLAN
6752 /**
6753  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6754  * @pf: board private structure
6755  **/
6756 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6757 {
6758 	struct i40e_hw *hw = &pf->hw;
6759 	i40e_status ret;
6760 	__be16 port;
6761 	int i;
6762 
6763 	if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6764 		return;
6765 
6766 	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6767 
6768 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6769 		if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
6770 			pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
6771 			port = pf->vxlan_ports[i];
6772 			if (port)
6773 				ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6774 						     I40E_AQC_TUNNEL_TYPE_VXLAN,
6775 						     NULL, NULL);
6776 			else
6777 				ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6778 
6779 			if (ret) {
6780 				dev_info(&pf->pdev->dev,
6781 					 "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
6782 					 port ? "add" : "delete",
6783 					 ntohs(port), i,
6784 					 i40e_stat_str(&pf->hw, ret),
6785 					 i40e_aq_str(&pf->hw,
6786 						    pf->hw.aq.asq_last_status));
6787 				pf->vxlan_ports[i] = 0;
6788 			}
6789 		}
6790 	}
6791 }
6792 
6793 #endif
6794 /**
6795  * i40e_service_task - Run the driver's async subtasks
6796  * @work: pointer to work_struct containing our data
6797  **/
6798 static void i40e_service_task(struct work_struct *work)
6799 {
6800 	struct i40e_pf *pf = container_of(work,
6801 					  struct i40e_pf,
6802 					  service_task);
6803 	unsigned long start_time = jiffies;
6804 
6805 	/* don't bother with service tasks if a reset is in progress */
6806 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6807 		i40e_service_event_complete(pf);
6808 		return;
6809 	}
6810 
6811 	i40e_reset_subtask(pf);
6812 	i40e_handle_mdd_event(pf);
6813 	i40e_vc_process_vflr_event(pf);
6814 	i40e_watchdog_subtask(pf);
6815 	i40e_fdir_reinit_subtask(pf);
6816 	i40e_sync_filters_subtask(pf);
6817 #ifdef CONFIG_I40E_VXLAN
6818 	i40e_sync_vxlan_filters_subtask(pf);
6819 #endif
6820 	i40e_clean_adminq_subtask(pf);
6821 
6822 	i40e_service_event_complete(pf);
6823 
6824 	/* If the tasks have taken longer than one timer cycle or there
6825 	 * is more work to be done, reschedule the service task now
6826 	 * rather than wait for the timer to tick again.
6827 	 */
6828 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6829 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)		 ||
6830 	    test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)		 ||
6831 	    test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6832 		i40e_service_event_schedule(pf);
6833 }
6834 
6835 /**
6836  * i40e_service_timer - timer callback
6837  * @data: pointer to PF struct
6838  **/
6839 static void i40e_service_timer(unsigned long data)
6840 {
6841 	struct i40e_pf *pf = (struct i40e_pf *)data;
6842 
6843 	mod_timer(&pf->service_timer,
6844 		  round_jiffies(jiffies + pf->service_timer_period));
6845 	i40e_service_event_schedule(pf);
6846 }
6847 
6848 /**
6849  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6850  * @vsi: the VSI being configured
6851  **/
6852 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6853 {
6854 	struct i40e_pf *pf = vsi->back;
6855 
6856 	switch (vsi->type) {
6857 	case I40E_VSI_MAIN:
6858 		vsi->alloc_queue_pairs = pf->num_lan_qps;
6859 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6860 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6861 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6862 			vsi->num_q_vectors = pf->num_lan_msix;
6863 		else
6864 			vsi->num_q_vectors = 1;
6865 
6866 		break;
6867 
6868 	case I40E_VSI_FDIR:
6869 		vsi->alloc_queue_pairs = 1;
6870 		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6871 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6872 		vsi->num_q_vectors = 1;
6873 		break;
6874 
6875 	case I40E_VSI_VMDQ2:
6876 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6877 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6878 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6879 		vsi->num_q_vectors = pf->num_vmdq_msix;
6880 		break;
6881 
6882 	case I40E_VSI_SRIOV:
6883 		vsi->alloc_queue_pairs = pf->num_vf_qps;
6884 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6885 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6886 		break;
6887 
6888 #ifdef I40E_FCOE
6889 	case I40E_VSI_FCOE:
6890 		vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6891 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6892 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
6893 		vsi->num_q_vectors = pf->num_fcoe_msix;
6894 		break;
6895 
6896 #endif /* I40E_FCOE */
6897 	default:
6898 		WARN_ON(1);
6899 		return -ENODATA;
6900 	}
6901 
6902 	return 0;
6903 }
6904 
6905 /**
6906  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6907  * @type: VSI pointer
6908  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6909  *
6910  * On error: returns error code (negative)
6911  * On success: returns 0
6912  **/
6913 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6914 {
6915 	int size;
6916 	int ret = 0;
6917 
6918 	/* allocate memory for both Tx and Rx ring pointers */
6919 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6920 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6921 	if (!vsi->tx_rings)
6922 		return -ENOMEM;
6923 	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6924 
6925 	if (alloc_qvectors) {
6926 		/* allocate memory for q_vector pointers */
6927 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6928 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6929 		if (!vsi->q_vectors) {
6930 			ret = -ENOMEM;
6931 			goto err_vectors;
6932 		}
6933 	}
6934 	return ret;
6935 
6936 err_vectors:
6937 	kfree(vsi->tx_rings);
6938 	return ret;
6939 }
6940 
6941 /**
6942  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6943  * @pf: board private structure
6944  * @type: type of VSI
6945  *
6946  * On error: returns error code (negative)
6947  * On success: returns vsi index in PF (positive)
6948  **/
6949 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6950 {
6951 	int ret = -ENODEV;
6952 	struct i40e_vsi *vsi;
6953 	int vsi_idx;
6954 	int i;
6955 
6956 	/* Need to protect the allocation of the VSIs at the PF level */
6957 	mutex_lock(&pf->switch_mutex);
6958 
6959 	/* VSI list may be fragmented if VSI creation/destruction has
6960 	 * been happening.  We can afford to do a quick scan to look
6961 	 * for any free VSIs in the list.
6962 	 *
6963 	 * find next empty vsi slot, looping back around if necessary
6964 	 */
6965 	i = pf->next_vsi;
6966 	while (i < pf->num_alloc_vsi && pf->vsi[i])
6967 		i++;
6968 	if (i >= pf->num_alloc_vsi) {
6969 		i = 0;
6970 		while (i < pf->next_vsi && pf->vsi[i])
6971 			i++;
6972 	}
6973 
6974 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6975 		vsi_idx = i;             /* Found one! */
6976 	} else {
6977 		ret = -ENODEV;
6978 		goto unlock_pf;  /* out of VSI slots! */
6979 	}
6980 	pf->next_vsi = ++i;
6981 
6982 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6983 	if (!vsi) {
6984 		ret = -ENOMEM;
6985 		goto unlock_pf;
6986 	}
6987 	vsi->type = type;
6988 	vsi->back = pf;
6989 	set_bit(__I40E_DOWN, &vsi->state);
6990 	vsi->flags = 0;
6991 	vsi->idx = vsi_idx;
6992 	vsi->rx_itr_setting = pf->rx_itr_default;
6993 	vsi->tx_itr_setting = pf->tx_itr_default;
6994 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
6995 				pf->rss_table_size : 64;
6996 	vsi->netdev_registered = false;
6997 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6998 	INIT_LIST_HEAD(&vsi->mac_filter_list);
6999 	vsi->irqs_ready = false;
7000 
7001 	ret = i40e_set_num_rings_in_vsi(vsi);
7002 	if (ret)
7003 		goto err_rings;
7004 
7005 	ret = i40e_vsi_alloc_arrays(vsi, true);
7006 	if (ret)
7007 		goto err_rings;
7008 
7009 	/* Setup default MSIX irq handler for VSI */
7010 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7011 
7012 	pf->vsi[vsi_idx] = vsi;
7013 	ret = vsi_idx;
7014 	goto unlock_pf;
7015 
7016 err_rings:
7017 	pf->next_vsi = i - 1;
7018 	kfree(vsi);
7019 unlock_pf:
7020 	mutex_unlock(&pf->switch_mutex);
7021 	return ret;
7022 }
7023 
7024 /**
7025  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7026  * @type: VSI pointer
7027  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7028  *
7029  * On error: returns error code (negative)
7030  * On success: returns 0
7031  **/
7032 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7033 {
7034 	/* free the ring and vector containers */
7035 	if (free_qvectors) {
7036 		kfree(vsi->q_vectors);
7037 		vsi->q_vectors = NULL;
7038 	}
7039 	kfree(vsi->tx_rings);
7040 	vsi->tx_rings = NULL;
7041 	vsi->rx_rings = NULL;
7042 }
7043 
7044 /**
7045  * i40e_vsi_clear - Deallocate the VSI provided
7046  * @vsi: the VSI being un-configured
7047  **/
7048 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7049 {
7050 	struct i40e_pf *pf;
7051 
7052 	if (!vsi)
7053 		return 0;
7054 
7055 	if (!vsi->back)
7056 		goto free_vsi;
7057 	pf = vsi->back;
7058 
7059 	mutex_lock(&pf->switch_mutex);
7060 	if (!pf->vsi[vsi->idx]) {
7061 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7062 			vsi->idx, vsi->idx, vsi, vsi->type);
7063 		goto unlock_vsi;
7064 	}
7065 
7066 	if (pf->vsi[vsi->idx] != vsi) {
7067 		dev_err(&pf->pdev->dev,
7068 			"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7069 			pf->vsi[vsi->idx]->idx,
7070 			pf->vsi[vsi->idx],
7071 			pf->vsi[vsi->idx]->type,
7072 			vsi->idx, vsi, vsi->type);
7073 		goto unlock_vsi;
7074 	}
7075 
7076 	/* updates the PF for this cleared vsi */
7077 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7078 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7079 
7080 	i40e_vsi_free_arrays(vsi, true);
7081 
7082 	pf->vsi[vsi->idx] = NULL;
7083 	if (vsi->idx < pf->next_vsi)
7084 		pf->next_vsi = vsi->idx;
7085 
7086 unlock_vsi:
7087 	mutex_unlock(&pf->switch_mutex);
7088 free_vsi:
7089 	kfree(vsi);
7090 
7091 	return 0;
7092 }
7093 
7094 /**
7095  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7096  * @vsi: the VSI being cleaned
7097  **/
7098 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7099 {
7100 	int i;
7101 
7102 	if (vsi->tx_rings && vsi->tx_rings[0]) {
7103 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7104 			kfree_rcu(vsi->tx_rings[i], rcu);
7105 			vsi->tx_rings[i] = NULL;
7106 			vsi->rx_rings[i] = NULL;
7107 		}
7108 	}
7109 }
7110 
7111 /**
7112  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7113  * @vsi: the VSI being configured
7114  **/
7115 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7116 {
7117 	struct i40e_ring *tx_ring, *rx_ring;
7118 	struct i40e_pf *pf = vsi->back;
7119 	int i;
7120 
7121 	/* Set basic values in the rings to be used later during open() */
7122 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7123 		/* allocate space for both Tx and Rx in one shot */
7124 		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7125 		if (!tx_ring)
7126 			goto err_out;
7127 
7128 		tx_ring->queue_index = i;
7129 		tx_ring->reg_idx = vsi->base_queue + i;
7130 		tx_ring->ring_active = false;
7131 		tx_ring->vsi = vsi;
7132 		tx_ring->netdev = vsi->netdev;
7133 		tx_ring->dev = &pf->pdev->dev;
7134 		tx_ring->count = vsi->num_desc;
7135 		tx_ring->size = 0;
7136 		tx_ring->dcb_tc = 0;
7137 		if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7138 			tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7139 		if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7140 			tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7141 		vsi->tx_rings[i] = tx_ring;
7142 
7143 		rx_ring = &tx_ring[1];
7144 		rx_ring->queue_index = i;
7145 		rx_ring->reg_idx = vsi->base_queue + i;
7146 		rx_ring->ring_active = false;
7147 		rx_ring->vsi = vsi;
7148 		rx_ring->netdev = vsi->netdev;
7149 		rx_ring->dev = &pf->pdev->dev;
7150 		rx_ring->count = vsi->num_desc;
7151 		rx_ring->size = 0;
7152 		rx_ring->dcb_tc = 0;
7153 		if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7154 			set_ring_16byte_desc_enabled(rx_ring);
7155 		else
7156 			clear_ring_16byte_desc_enabled(rx_ring);
7157 		vsi->rx_rings[i] = rx_ring;
7158 	}
7159 
7160 	return 0;
7161 
7162 err_out:
7163 	i40e_vsi_clear_rings(vsi);
7164 	return -ENOMEM;
7165 }
7166 
7167 /**
7168  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7169  * @pf: board private structure
7170  * @vectors: the number of MSI-X vectors to request
7171  *
7172  * Returns the number of vectors reserved, or error
7173  **/
7174 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7175 {
7176 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7177 					I40E_MIN_MSIX, vectors);
7178 	if (vectors < 0) {
7179 		dev_info(&pf->pdev->dev,
7180 			 "MSI-X vector reservation failed: %d\n", vectors);
7181 		vectors = 0;
7182 	}
7183 
7184 	return vectors;
7185 }
7186 
7187 /**
7188  * i40e_init_msix - Setup the MSIX capability
7189  * @pf: board private structure
7190  *
7191  * Work with the OS to set up the MSIX vectors needed.
7192  *
7193  * Returns the number of vectors reserved or negative on failure
7194  **/
7195 static int i40e_init_msix(struct i40e_pf *pf)
7196 {
7197 	struct i40e_hw *hw = &pf->hw;
7198 	int vectors_left;
7199 	int v_budget, i;
7200 	int v_actual;
7201 
7202 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7203 		return -ENODEV;
7204 
7205 	/* The number of vectors we'll request will be comprised of:
7206 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
7207 	 *   - The number of LAN queue pairs
7208 	 *	- Queues being used for RSS.
7209 	 *		We don't need as many as max_rss_size vectors.
7210 	 *		use rss_size instead in the calculation since that
7211 	 *		is governed by number of cpus in the system.
7212 	 *	- assumes symmetric Tx/Rx pairing
7213 	 *   - The number of VMDq pairs
7214 #ifdef I40E_FCOE
7215 	 *   - The number of FCOE qps.
7216 #endif
7217 	 * Once we count this up, try the request.
7218 	 *
7219 	 * If we can't get what we want, we'll simplify to nearly nothing
7220 	 * and try again.  If that still fails, we punt.
7221 	 */
7222 	vectors_left = hw->func_caps.num_msix_vectors;
7223 	v_budget = 0;
7224 
7225 	/* reserve one vector for miscellaneous handler */
7226 	if (vectors_left) {
7227 		v_budget++;
7228 		vectors_left--;
7229 	}
7230 
7231 	/* reserve vectors for the main PF traffic queues */
7232 	pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7233 	vectors_left -= pf->num_lan_msix;
7234 	v_budget += pf->num_lan_msix;
7235 
7236 	/* reserve one vector for sideband flow director */
7237 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7238 		if (vectors_left) {
7239 			v_budget++;
7240 			vectors_left--;
7241 		} else {
7242 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7243 		}
7244 	}
7245 
7246 #ifdef I40E_FCOE
7247 	/* can we reserve enough for FCoE? */
7248 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7249 		if (!vectors_left)
7250 			pf->num_fcoe_msix = 0;
7251 		else if (vectors_left >= pf->num_fcoe_qps)
7252 			pf->num_fcoe_msix = pf->num_fcoe_qps;
7253 		else
7254 			pf->num_fcoe_msix = 1;
7255 		v_budget += pf->num_fcoe_msix;
7256 		vectors_left -= pf->num_fcoe_msix;
7257 	}
7258 
7259 #endif
7260 	/* any vectors left over go for VMDq support */
7261 	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7262 		int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7263 		int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7264 
7265 		/* if we're short on vectors for what's desired, we limit
7266 		 * the queues per vmdq.  If this is still more than are
7267 		 * available, the user will need to change the number of
7268 		 * queues/vectors used by the PF later with the ethtool
7269 		 * channels command
7270 		 */
7271 		if (vmdq_vecs < vmdq_vecs_wanted)
7272 			pf->num_vmdq_qps = 1;
7273 		pf->num_vmdq_msix = pf->num_vmdq_qps;
7274 
7275 		v_budget += vmdq_vecs;
7276 		vectors_left -= vmdq_vecs;
7277 	}
7278 
7279 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7280 				   GFP_KERNEL);
7281 	if (!pf->msix_entries)
7282 		return -ENOMEM;
7283 
7284 	for (i = 0; i < v_budget; i++)
7285 		pf->msix_entries[i].entry = i;
7286 	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7287 
7288 	if (v_actual != v_budget) {
7289 		/* If we have limited resources, we will start with no vectors
7290 		 * for the special features and then allocate vectors to some
7291 		 * of these features based on the policy and at the end disable
7292 		 * the features that did not get any vectors.
7293 		 */
7294 #ifdef I40E_FCOE
7295 		pf->num_fcoe_qps = 0;
7296 		pf->num_fcoe_msix = 0;
7297 #endif
7298 		pf->num_vmdq_msix = 0;
7299 	}
7300 
7301 	if (v_actual < I40E_MIN_MSIX) {
7302 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7303 		kfree(pf->msix_entries);
7304 		pf->msix_entries = NULL;
7305 		return -ENODEV;
7306 
7307 	} else if (v_actual == I40E_MIN_MSIX) {
7308 		/* Adjust for minimal MSIX use */
7309 		pf->num_vmdq_vsis = 0;
7310 		pf->num_vmdq_qps = 0;
7311 		pf->num_lan_qps = 1;
7312 		pf->num_lan_msix = 1;
7313 
7314 	} else if (v_actual != v_budget) {
7315 		int vec;
7316 
7317 		/* reserve the misc vector */
7318 		vec = v_actual - 1;
7319 
7320 		/* Scale vector usage down */
7321 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7322 		pf->num_vmdq_vsis = 1;
7323 		pf->num_vmdq_qps = 1;
7324 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7325 
7326 		/* partition out the remaining vectors */
7327 		switch (vec) {
7328 		case 2:
7329 			pf->num_lan_msix = 1;
7330 			break;
7331 		case 3:
7332 #ifdef I40E_FCOE
7333 			/* give one vector to FCoE */
7334 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7335 				pf->num_lan_msix = 1;
7336 				pf->num_fcoe_msix = 1;
7337 			}
7338 #else
7339 			pf->num_lan_msix = 2;
7340 #endif
7341 			break;
7342 		default:
7343 #ifdef I40E_FCOE
7344 			/* give one vector to FCoE */
7345 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7346 				pf->num_fcoe_msix = 1;
7347 				vec--;
7348 			}
7349 #endif
7350 			/* give the rest to the PF */
7351 			pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7352 			break;
7353 		}
7354 	}
7355 
7356 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7357 	    (pf->num_vmdq_msix == 0)) {
7358 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7359 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7360 	}
7361 #ifdef I40E_FCOE
7362 
7363 	if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7364 		dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7365 		pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7366 	}
7367 #endif
7368 	return v_actual;
7369 }
7370 
7371 /**
7372  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7373  * @vsi: the VSI being configured
7374  * @v_idx: index of the vector in the vsi struct
7375  *
7376  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7377  **/
7378 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7379 {
7380 	struct i40e_q_vector *q_vector;
7381 
7382 	/* allocate q_vector */
7383 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7384 	if (!q_vector)
7385 		return -ENOMEM;
7386 
7387 	q_vector->vsi = vsi;
7388 	q_vector->v_idx = v_idx;
7389 	cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7390 	if (vsi->netdev)
7391 		netif_napi_add(vsi->netdev, &q_vector->napi,
7392 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
7393 
7394 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
7395 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
7396 
7397 	/* tie q_vector and vsi together */
7398 	vsi->q_vectors[v_idx] = q_vector;
7399 
7400 	return 0;
7401 }
7402 
7403 /**
7404  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7405  * @vsi: the VSI being configured
7406  *
7407  * We allocate one q_vector per queue interrupt.  If allocation fails we
7408  * return -ENOMEM.
7409  **/
7410 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7411 {
7412 	struct i40e_pf *pf = vsi->back;
7413 	int v_idx, num_q_vectors;
7414 	int err;
7415 
7416 	/* if not MSIX, give the one vector only to the LAN VSI */
7417 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7418 		num_q_vectors = vsi->num_q_vectors;
7419 	else if (vsi == pf->vsi[pf->lan_vsi])
7420 		num_q_vectors = 1;
7421 	else
7422 		return -EINVAL;
7423 
7424 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7425 		err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7426 		if (err)
7427 			goto err_out;
7428 	}
7429 
7430 	return 0;
7431 
7432 err_out:
7433 	while (v_idx--)
7434 		i40e_free_q_vector(vsi, v_idx);
7435 
7436 	return err;
7437 }
7438 
7439 /**
7440  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7441  * @pf: board private structure to initialize
7442  **/
7443 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7444 {
7445 	int vectors = 0;
7446 	ssize_t size;
7447 
7448 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7449 		vectors = i40e_init_msix(pf);
7450 		if (vectors < 0) {
7451 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
7452 #ifdef I40E_FCOE
7453 				       I40E_FLAG_FCOE_ENABLED	|
7454 #endif
7455 				       I40E_FLAG_RSS_ENABLED	|
7456 				       I40E_FLAG_DCB_CAPABLE	|
7457 				       I40E_FLAG_SRIOV_ENABLED	|
7458 				       I40E_FLAG_FD_SB_ENABLED	|
7459 				       I40E_FLAG_FD_ATR_ENABLED	|
7460 				       I40E_FLAG_VMDQ_ENABLED);
7461 
7462 			/* rework the queue expectations without MSIX */
7463 			i40e_determine_queue_usage(pf);
7464 		}
7465 	}
7466 
7467 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7468 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7469 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7470 		vectors = pci_enable_msi(pf->pdev);
7471 		if (vectors < 0) {
7472 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7473 				 vectors);
7474 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7475 		}
7476 		vectors = 1;  /* one MSI or Legacy vector */
7477 	}
7478 
7479 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7480 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7481 
7482 	/* set up vector assignment tracking */
7483 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7484 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
7485 	if (!pf->irq_pile) {
7486 		dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7487 		return -ENOMEM;
7488 	}
7489 	pf->irq_pile->num_entries = vectors;
7490 	pf->irq_pile->search_hint = 0;
7491 
7492 	/* track first vector for misc interrupts, ignore return */
7493 	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7494 
7495 	return 0;
7496 }
7497 
7498 /**
7499  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7500  * @pf: board private structure
7501  *
7502  * This sets up the handler for MSIX 0, which is used to manage the
7503  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7504  * when in MSI or Legacy interrupt mode.
7505  **/
7506 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7507 {
7508 	struct i40e_hw *hw = &pf->hw;
7509 	int err = 0;
7510 
7511 	/* Only request the irq if this is the first time through, and
7512 	 * not when we're rebuilding after a Reset
7513 	 */
7514 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7515 		err = request_irq(pf->msix_entries[0].vector,
7516 				  i40e_intr, 0, pf->int_name, pf);
7517 		if (err) {
7518 			dev_info(&pf->pdev->dev,
7519 				 "request_irq for %s failed: %d\n",
7520 				 pf->int_name, err);
7521 			return -EFAULT;
7522 		}
7523 	}
7524 
7525 	i40e_enable_misc_int_causes(pf);
7526 
7527 	/* associate no queues to the misc vector */
7528 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7529 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7530 
7531 	i40e_flush(hw);
7532 
7533 	i40e_irq_dynamic_enable_icr0(pf);
7534 
7535 	return err;
7536 }
7537 
7538 /**
7539  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7540  * @vsi: vsi structure
7541  * @seed: RSS hash seed
7542  **/
7543 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
7544 {
7545 	struct i40e_aqc_get_set_rss_key_data rss_key;
7546 	struct i40e_pf *pf = vsi->back;
7547 	struct i40e_hw *hw = &pf->hw;
7548 	bool pf_lut = false;
7549 	u8 *rss_lut;
7550 	int ret, i;
7551 
7552 	memset(&rss_key, 0, sizeof(rss_key));
7553 	memcpy(&rss_key, seed, sizeof(rss_key));
7554 
7555 	rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7556 	if (!rss_lut)
7557 		return -ENOMEM;
7558 
7559 	/* Populate the LUT with max no. of queues in round robin fashion */
7560 	for (i = 0; i < vsi->rss_table_size; i++)
7561 		rss_lut[i] = i % vsi->rss_size;
7562 
7563 	ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7564 	if (ret) {
7565 		dev_info(&pf->pdev->dev,
7566 			 "Cannot set RSS key, err %s aq_err %s\n",
7567 			 i40e_stat_str(&pf->hw, ret),
7568 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7569 		return ret;
7570 	}
7571 
7572 	if (vsi->type == I40E_VSI_MAIN)
7573 		pf_lut = true;
7574 
7575 	ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7576 				  vsi->rss_table_size);
7577 	if (ret)
7578 		dev_info(&pf->pdev->dev,
7579 			 "Cannot set RSS lut, err %s aq_err %s\n",
7580 			 i40e_stat_str(&pf->hw, ret),
7581 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7582 
7583 	return ret;
7584 }
7585 
7586 /**
7587  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7588  * @vsi: VSI structure
7589  **/
7590 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7591 {
7592 	u8 seed[I40E_HKEY_ARRAY_SIZE];
7593 	struct i40e_pf *pf = vsi->back;
7594 
7595 	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7596 	vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7597 
7598 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7599 		return i40e_config_rss_aq(vsi, seed);
7600 
7601 	return 0;
7602 }
7603 
7604 /**
7605  * i40e_config_rss_reg - Prepare for RSS if used
7606  * @pf: board private structure
7607  * @seed: RSS hash seed
7608  **/
7609 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
7610 {
7611 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7612 	struct i40e_hw *hw = &pf->hw;
7613 	u32 *seed_dw = (u32 *)seed;
7614 	u32 current_queue = 0;
7615 	u32 lut = 0;
7616 	int i, j;
7617 
7618 	/* Fill out hash function seed */
7619 	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7620 		wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7621 
7622 	for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
7623 		lut = 0;
7624 		for (j = 0; j < 4; j++) {
7625 			if (current_queue == vsi->rss_size)
7626 				current_queue = 0;
7627 			lut |= ((current_queue) << (8 * j));
7628 			current_queue++;
7629 		}
7630 		wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
7631 	}
7632 	i40e_flush(hw);
7633 
7634 	return 0;
7635 }
7636 
7637 /**
7638  * i40e_config_rss - Prepare for RSS if used
7639  * @pf: board private structure
7640  **/
7641 static int i40e_config_rss(struct i40e_pf *pf)
7642 {
7643 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7644 	u8 seed[I40E_HKEY_ARRAY_SIZE];
7645 	struct i40e_hw *hw = &pf->hw;
7646 	u32 reg_val;
7647 	u64 hena;
7648 
7649 	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7650 
7651 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7652 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7653 		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7654 	hena |= i40e_pf_get_default_rss_hena(pf);
7655 
7656 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7657 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7658 
7659 	vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7660 
7661 	/* Determine the RSS table size based on the hardware capabilities */
7662 	reg_val = rd32(hw, I40E_PFQF_CTL_0);
7663 	reg_val = (pf->rss_table_size == 512) ?
7664 			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
7665 			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
7666 	wr32(hw, I40E_PFQF_CTL_0, reg_val);
7667 
7668 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7669 		return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
7670 	else
7671 		return i40e_config_rss_reg(pf, seed);
7672 }
7673 
7674 /**
7675  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7676  * @pf: board private structure
7677  * @queue_count: the requested queue count for rss.
7678  *
7679  * returns 0 if rss is not enabled, if enabled returns the final rss queue
7680  * count which may be different from the requested queue count.
7681  **/
7682 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7683 {
7684 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7685 	int new_rss_size;
7686 
7687 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7688 		return 0;
7689 
7690 	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7691 
7692 	if (queue_count != vsi->num_queue_pairs) {
7693 		vsi->req_queue_pairs = queue_count;
7694 		i40e_prep_for_reset(pf);
7695 
7696 		pf->rss_size = new_rss_size;
7697 
7698 		i40e_reset_and_rebuild(pf, true);
7699 		i40e_config_rss(pf);
7700 	}
7701 	dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
7702 	return pf->rss_size;
7703 }
7704 
7705 /**
7706  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7707  * @pf: board private structure
7708  **/
7709 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7710 {
7711 	i40e_status status;
7712 	bool min_valid, max_valid;
7713 	u32 max_bw, min_bw;
7714 
7715 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7716 					   &min_valid, &max_valid);
7717 
7718 	if (!status) {
7719 		if (min_valid)
7720 			pf->npar_min_bw = min_bw;
7721 		if (max_valid)
7722 			pf->npar_max_bw = max_bw;
7723 	}
7724 
7725 	return status;
7726 }
7727 
7728 /**
7729  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7730  * @pf: board private structure
7731  **/
7732 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7733 {
7734 	struct i40e_aqc_configure_partition_bw_data bw_data;
7735 	i40e_status status;
7736 
7737 	/* Set the valid bit for this PF */
7738 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
7739 	bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7740 	bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7741 
7742 	/* Set the new bandwidths */
7743 	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7744 
7745 	return status;
7746 }
7747 
7748 /**
7749  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7750  * @pf: board private structure
7751  **/
7752 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7753 {
7754 	/* Commit temporary BW setting to permanent NVM image */
7755 	enum i40e_admin_queue_err last_aq_status;
7756 	i40e_status ret;
7757 	u16 nvm_word;
7758 
7759 	if (pf->hw.partition_id != 1) {
7760 		dev_info(&pf->pdev->dev,
7761 			 "Commit BW only works on partition 1! This is partition %d",
7762 			 pf->hw.partition_id);
7763 		ret = I40E_NOT_SUPPORTED;
7764 		goto bw_commit_out;
7765 	}
7766 
7767 	/* Acquire NVM for read access */
7768 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7769 	last_aq_status = pf->hw.aq.asq_last_status;
7770 	if (ret) {
7771 		dev_info(&pf->pdev->dev,
7772 			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
7773 			 i40e_stat_str(&pf->hw, ret),
7774 			 i40e_aq_str(&pf->hw, last_aq_status));
7775 		goto bw_commit_out;
7776 	}
7777 
7778 	/* Read word 0x10 of NVM - SW compatibility word 1 */
7779 	ret = i40e_aq_read_nvm(&pf->hw,
7780 			       I40E_SR_NVM_CONTROL_WORD,
7781 			       0x10, sizeof(nvm_word), &nvm_word,
7782 			       false, NULL);
7783 	/* Save off last admin queue command status before releasing
7784 	 * the NVM
7785 	 */
7786 	last_aq_status = pf->hw.aq.asq_last_status;
7787 	i40e_release_nvm(&pf->hw);
7788 	if (ret) {
7789 		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
7790 			 i40e_stat_str(&pf->hw, ret),
7791 			 i40e_aq_str(&pf->hw, last_aq_status));
7792 		goto bw_commit_out;
7793 	}
7794 
7795 	/* Wait a bit for NVM release to complete */
7796 	msleep(50);
7797 
7798 	/* Acquire NVM for write access */
7799 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7800 	last_aq_status = pf->hw.aq.asq_last_status;
7801 	if (ret) {
7802 		dev_info(&pf->pdev->dev,
7803 			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
7804 			 i40e_stat_str(&pf->hw, ret),
7805 			 i40e_aq_str(&pf->hw, last_aq_status));
7806 		goto bw_commit_out;
7807 	}
7808 	/* Write it back out unchanged to initiate update NVM,
7809 	 * which will force a write of the shadow (alt) RAM to
7810 	 * the NVM - thus storing the bandwidth values permanently.
7811 	 */
7812 	ret = i40e_aq_update_nvm(&pf->hw,
7813 				 I40E_SR_NVM_CONTROL_WORD,
7814 				 0x10, sizeof(nvm_word),
7815 				 &nvm_word, true, NULL);
7816 	/* Save off last admin queue command status before releasing
7817 	 * the NVM
7818 	 */
7819 	last_aq_status = pf->hw.aq.asq_last_status;
7820 	i40e_release_nvm(&pf->hw);
7821 	if (ret)
7822 		dev_info(&pf->pdev->dev,
7823 			 "BW settings NOT SAVED, err %s aq_err %s\n",
7824 			 i40e_stat_str(&pf->hw, ret),
7825 			 i40e_aq_str(&pf->hw, last_aq_status));
7826 bw_commit_out:
7827 
7828 	return ret;
7829 }
7830 
7831 /**
7832  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7833  * @pf: board private structure to initialize
7834  *
7835  * i40e_sw_init initializes the Adapter private data structure.
7836  * Fields are initialized based on PCI device information and
7837  * OS network device settings (MTU size).
7838  **/
7839 static int i40e_sw_init(struct i40e_pf *pf)
7840 {
7841 	int err = 0;
7842 	int size;
7843 
7844 	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7845 				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7846 	pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7847 	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7848 		if (I40E_DEBUG_USER & debug)
7849 			pf->hw.debug_mask = debug;
7850 		pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7851 						I40E_DEFAULT_MSG_ENABLE);
7852 	}
7853 
7854 	/* Set default capability flags */
7855 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7856 		    I40E_FLAG_MSI_ENABLED     |
7857 		    I40E_FLAG_MSIX_ENABLED;
7858 
7859 	if (iommu_present(&pci_bus_type))
7860 		pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7861 	else
7862 		pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7863 
7864 	/* Set default ITR */
7865 	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7866 	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7867 
7868 	/* Depending on PF configurations, it is possible that the RSS
7869 	 * maximum might end up larger than the available queues
7870 	 */
7871 	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
7872 	pf->rss_size = 1;
7873 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7874 	pf->rss_size_max = min_t(int, pf->rss_size_max,
7875 				 pf->hw.func_caps.num_tx_qp);
7876 	if (pf->hw.func_caps.rss) {
7877 		pf->flags |= I40E_FLAG_RSS_ENABLED;
7878 		pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7879 	}
7880 
7881 	/* MFP mode enabled */
7882 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
7883 		pf->flags |= I40E_FLAG_MFP_ENABLED;
7884 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7885 		if (i40e_get_npar_bw_setting(pf))
7886 			dev_warn(&pf->pdev->dev,
7887 				 "Could not get NPAR bw settings\n");
7888 		else
7889 			dev_info(&pf->pdev->dev,
7890 				 "Min BW = %8.8x, Max BW = %8.8x\n",
7891 				 pf->npar_min_bw, pf->npar_max_bw);
7892 	}
7893 
7894 	/* FW/NVM is not yet fixed in this regard */
7895 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7896 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7897 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7898 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7899 		if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7900 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7901 		} else {
7902 			dev_info(&pf->pdev->dev,
7903 				 "Flow Director Sideband mode Disabled in MFP mode\n");
7904 		}
7905 		pf->fdir_pf_filter_count =
7906 				 pf->hw.func_caps.fd_filters_guaranteed;
7907 		pf->hw.fdir_shared_filter_count =
7908 				 pf->hw.func_caps.fd_filters_best_effort;
7909 	}
7910 
7911 	if (pf->hw.func_caps.vmdq) {
7912 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7913 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7914 	}
7915 
7916 #ifdef I40E_FCOE
7917 	err = i40e_init_pf_fcoe(pf);
7918 	if (err)
7919 		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7920 
7921 #endif /* I40E_FCOE */
7922 #ifdef CONFIG_PCI_IOV
7923 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7924 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7925 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7926 		pf->num_req_vfs = min_t(int,
7927 					pf->hw.func_caps.num_vfs,
7928 					I40E_MAX_VF_COUNT);
7929 	}
7930 #endif /* CONFIG_PCI_IOV */
7931 	if (pf->hw.mac.type == I40E_MAC_X722) {
7932 		pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
7933 			     I40E_FLAG_128_QP_RSS_CAPABLE |
7934 			     I40E_FLAG_HW_ATR_EVICT_CAPABLE |
7935 			     I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
7936 			     I40E_FLAG_WB_ON_ITR_CAPABLE |
7937 			     I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
7938 	}
7939 	pf->eeprom_version = 0xDEAD;
7940 	pf->lan_veb = I40E_NO_VEB;
7941 	pf->lan_vsi = I40E_NO_VSI;
7942 
7943 	/* set up queue assignment tracking */
7944 	size = sizeof(struct i40e_lump_tracking)
7945 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7946 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
7947 	if (!pf->qp_pile) {
7948 		err = -ENOMEM;
7949 		goto sw_init_done;
7950 	}
7951 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7952 	pf->qp_pile->search_hint = 0;
7953 
7954 	pf->tx_timeout_recovery_level = 1;
7955 
7956 	mutex_init(&pf->switch_mutex);
7957 
7958 	/* If NPAR is enabled nudge the Tx scheduler */
7959 	if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
7960 		i40e_set_npar_bw_setting(pf);
7961 
7962 sw_init_done:
7963 	return err;
7964 }
7965 
7966 /**
7967  * i40e_set_ntuple - set the ntuple feature flag and take action
7968  * @pf: board private structure to initialize
7969  * @features: the feature set that the stack is suggesting
7970  *
7971  * returns a bool to indicate if reset needs to happen
7972  **/
7973 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7974 {
7975 	bool need_reset = false;
7976 
7977 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
7978 	 * the state changed, we need to reset.
7979 	 */
7980 	if (features & NETIF_F_NTUPLE) {
7981 		/* Enable filters and mark for reset */
7982 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7983 			need_reset = true;
7984 		pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7985 	} else {
7986 		/* turn off filters, mark for reset and clear SW filter list */
7987 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7988 			need_reset = true;
7989 			i40e_fdir_filter_exit(pf);
7990 		}
7991 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7992 		pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7993 		/* reset fd counters */
7994 		pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7995 		pf->fdir_pf_active_filters = 0;
7996 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7997 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
7998 			dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7999 		/* if ATR was auto disabled it can be re-enabled. */
8000 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8001 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8002 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8003 	}
8004 	return need_reset;
8005 }
8006 
8007 /**
8008  * i40e_set_features - set the netdev feature flags
8009  * @netdev: ptr to the netdev being adjusted
8010  * @features: the feature set that the stack is suggesting
8011  **/
8012 static int i40e_set_features(struct net_device *netdev,
8013 			     netdev_features_t features)
8014 {
8015 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8016 	struct i40e_vsi *vsi = np->vsi;
8017 	struct i40e_pf *pf = vsi->back;
8018 	bool need_reset;
8019 
8020 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
8021 		i40e_vlan_stripping_enable(vsi);
8022 	else
8023 		i40e_vlan_stripping_disable(vsi);
8024 
8025 	need_reset = i40e_set_ntuple(pf, features);
8026 
8027 	if (need_reset)
8028 		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8029 
8030 	return 0;
8031 }
8032 
8033 #ifdef CONFIG_I40E_VXLAN
8034 /**
8035  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
8036  * @pf: board private structure
8037  * @port: The UDP port to look up
8038  *
8039  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8040  **/
8041 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
8042 {
8043 	u8 i;
8044 
8045 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8046 		if (pf->vxlan_ports[i] == port)
8047 			return i;
8048 	}
8049 
8050 	return i;
8051 }
8052 
8053 /**
8054  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8055  * @netdev: This physical port's netdev
8056  * @sa_family: Socket Family that VXLAN is notifying us about
8057  * @port: New UDP port number that VXLAN started listening to
8058  **/
8059 static void i40e_add_vxlan_port(struct net_device *netdev,
8060 				sa_family_t sa_family, __be16 port)
8061 {
8062 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8063 	struct i40e_vsi *vsi = np->vsi;
8064 	struct i40e_pf *pf = vsi->back;
8065 	u8 next_idx;
8066 	u8 idx;
8067 
8068 	if (sa_family == AF_INET6)
8069 		return;
8070 
8071 	idx = i40e_get_vxlan_port_idx(pf, port);
8072 
8073 	/* Check if port already exists */
8074 	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8075 		netdev_info(netdev, "vxlan port %d already offloaded\n",
8076 			    ntohs(port));
8077 		return;
8078 	}
8079 
8080 	/* Now check if there is space to add the new port */
8081 	next_idx = i40e_get_vxlan_port_idx(pf, 0);
8082 
8083 	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8084 		netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8085 			    ntohs(port));
8086 		return;
8087 	}
8088 
8089 	/* New port: add it and mark its index in the bitmap */
8090 	pf->vxlan_ports[next_idx] = port;
8091 	pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
8092 	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8093 }
8094 
8095 /**
8096  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8097  * @netdev: This physical port's netdev
8098  * @sa_family: Socket Family that VXLAN is notifying us about
8099  * @port: UDP port number that VXLAN stopped listening to
8100  **/
8101 static void i40e_del_vxlan_port(struct net_device *netdev,
8102 				sa_family_t sa_family, __be16 port)
8103 {
8104 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8105 	struct i40e_vsi *vsi = np->vsi;
8106 	struct i40e_pf *pf = vsi->back;
8107 	u8 idx;
8108 
8109 	if (sa_family == AF_INET6)
8110 		return;
8111 
8112 	idx = i40e_get_vxlan_port_idx(pf, port);
8113 
8114 	/* Check if port already exists */
8115 	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8116 		/* if port exists, set it to 0 (mark for deletion)
8117 		 * and make it pending
8118 		 */
8119 		pf->vxlan_ports[idx] = 0;
8120 		pf->pending_vxlan_bitmap |= BIT_ULL(idx);
8121 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8122 
8123 		dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
8124 			 ntohs(port));
8125 	} else {
8126 		netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8127 			    ntohs(port));
8128 	}
8129 }
8130 
8131 #endif
8132 static int i40e_get_phys_port_id(struct net_device *netdev,
8133 				 struct netdev_phys_item_id *ppid)
8134 {
8135 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8136 	struct i40e_pf *pf = np->vsi->back;
8137 	struct i40e_hw *hw = &pf->hw;
8138 
8139 	if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8140 		return -EOPNOTSUPP;
8141 
8142 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8143 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8144 
8145 	return 0;
8146 }
8147 
8148 /**
8149  * i40e_ndo_fdb_add - add an entry to the hardware database
8150  * @ndm: the input from the stack
8151  * @tb: pointer to array of nladdr (unused)
8152  * @dev: the net device pointer
8153  * @addr: the MAC address entry being added
8154  * @flags: instructions from stack about fdb operation
8155  */
8156 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8157 			    struct net_device *dev,
8158 			    const unsigned char *addr, u16 vid,
8159 			    u16 flags)
8160 {
8161 	struct i40e_netdev_priv *np = netdev_priv(dev);
8162 	struct i40e_pf *pf = np->vsi->back;
8163 	int err = 0;
8164 
8165 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8166 		return -EOPNOTSUPP;
8167 
8168 	if (vid) {
8169 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8170 		return -EINVAL;
8171 	}
8172 
8173 	/* Hardware does not support aging addresses so if a
8174 	 * ndm_state is given only allow permanent addresses
8175 	 */
8176 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8177 		netdev_info(dev, "FDB only supports static addresses\n");
8178 		return -EINVAL;
8179 	}
8180 
8181 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8182 		err = dev_uc_add_excl(dev, addr);
8183 	else if (is_multicast_ether_addr(addr))
8184 		err = dev_mc_add_excl(dev, addr);
8185 	else
8186 		err = -EINVAL;
8187 
8188 	/* Only return duplicate errors if NLM_F_EXCL is set */
8189 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
8190 		err = 0;
8191 
8192 	return err;
8193 }
8194 
8195 /**
8196  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8197  * @dev: the netdev being configured
8198  * @nlh: RTNL message
8199  *
8200  * Inserts a new hardware bridge if not already created and
8201  * enables the bridging mode requested (VEB or VEPA). If the
8202  * hardware bridge has already been inserted and the request
8203  * is to change the mode then that requires a PF reset to
8204  * allow rebuild of the components with required hardware
8205  * bridge mode enabled.
8206  **/
8207 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8208 				   struct nlmsghdr *nlh,
8209 				   u16 flags)
8210 {
8211 	struct i40e_netdev_priv *np = netdev_priv(dev);
8212 	struct i40e_vsi *vsi = np->vsi;
8213 	struct i40e_pf *pf = vsi->back;
8214 	struct i40e_veb *veb = NULL;
8215 	struct nlattr *attr, *br_spec;
8216 	int i, rem;
8217 
8218 	/* Only for PF VSI for now */
8219 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8220 		return -EOPNOTSUPP;
8221 
8222 	/* Find the HW bridge for PF VSI */
8223 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8224 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8225 			veb = pf->veb[i];
8226 	}
8227 
8228 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8229 
8230 	nla_for_each_nested(attr, br_spec, rem) {
8231 		__u16 mode;
8232 
8233 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
8234 			continue;
8235 
8236 		mode = nla_get_u16(attr);
8237 		if ((mode != BRIDGE_MODE_VEPA) &&
8238 		    (mode != BRIDGE_MODE_VEB))
8239 			return -EINVAL;
8240 
8241 		/* Insert a new HW bridge */
8242 		if (!veb) {
8243 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8244 					     vsi->tc_config.enabled_tc);
8245 			if (veb) {
8246 				veb->bridge_mode = mode;
8247 				i40e_config_bridge_mode(veb);
8248 			} else {
8249 				/* No Bridge HW offload available */
8250 				return -ENOENT;
8251 			}
8252 			break;
8253 		} else if (mode != veb->bridge_mode) {
8254 			/* Existing HW bridge but different mode needs reset */
8255 			veb->bridge_mode = mode;
8256 			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8257 			if (mode == BRIDGE_MODE_VEB)
8258 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8259 			else
8260 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8261 			i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8262 			break;
8263 		}
8264 	}
8265 
8266 	return 0;
8267 }
8268 
8269 /**
8270  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8271  * @skb: skb buff
8272  * @pid: process id
8273  * @seq: RTNL message seq #
8274  * @dev: the netdev being configured
8275  * @filter_mask: unused
8276  *
8277  * Return the mode in which the hardware bridge is operating in
8278  * i.e VEB or VEPA.
8279  **/
8280 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8281 				   struct net_device *dev,
8282 				   u32 filter_mask, int nlflags)
8283 {
8284 	struct i40e_netdev_priv *np = netdev_priv(dev);
8285 	struct i40e_vsi *vsi = np->vsi;
8286 	struct i40e_pf *pf = vsi->back;
8287 	struct i40e_veb *veb = NULL;
8288 	int i;
8289 
8290 	/* Only for PF VSI for now */
8291 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8292 		return -EOPNOTSUPP;
8293 
8294 	/* Find the HW bridge for the PF VSI */
8295 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8296 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8297 			veb = pf->veb[i];
8298 	}
8299 
8300 	if (!veb)
8301 		return 0;
8302 
8303 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8304 				       nlflags, 0, 0, filter_mask, NULL);
8305 }
8306 
8307 #define I40E_MAX_TUNNEL_HDR_LEN 80
8308 /**
8309  * i40e_features_check - Validate encapsulated packet conforms to limits
8310  * @skb: skb buff
8311  * @netdev: This physical port's netdev
8312  * @features: Offload features that the stack believes apply
8313  **/
8314 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8315 					     struct net_device *dev,
8316 					     netdev_features_t features)
8317 {
8318 	if (skb->encapsulation &&
8319 	    (skb_inner_mac_header(skb) - skb_transport_header(skb) >
8320 	     I40E_MAX_TUNNEL_HDR_LEN))
8321 		return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
8322 
8323 	return features;
8324 }
8325 
8326 static const struct net_device_ops i40e_netdev_ops = {
8327 	.ndo_open		= i40e_open,
8328 	.ndo_stop		= i40e_close,
8329 	.ndo_start_xmit		= i40e_lan_xmit_frame,
8330 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
8331 	.ndo_set_rx_mode	= i40e_set_rx_mode,
8332 	.ndo_validate_addr	= eth_validate_addr,
8333 	.ndo_set_mac_address	= i40e_set_mac,
8334 	.ndo_change_mtu		= i40e_change_mtu,
8335 	.ndo_do_ioctl		= i40e_ioctl,
8336 	.ndo_tx_timeout		= i40e_tx_timeout,
8337 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
8338 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
8339 #ifdef CONFIG_NET_POLL_CONTROLLER
8340 	.ndo_poll_controller	= i40e_netpoll,
8341 #endif
8342 	.ndo_setup_tc		= i40e_setup_tc,
8343 #ifdef I40E_FCOE
8344 	.ndo_fcoe_enable	= i40e_fcoe_enable,
8345 	.ndo_fcoe_disable	= i40e_fcoe_disable,
8346 #endif
8347 	.ndo_set_features	= i40e_set_features,
8348 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
8349 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
8350 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
8351 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
8352 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
8353 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
8354 #ifdef CONFIG_I40E_VXLAN
8355 	.ndo_add_vxlan_port	= i40e_add_vxlan_port,
8356 	.ndo_del_vxlan_port	= i40e_del_vxlan_port,
8357 #endif
8358 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
8359 	.ndo_fdb_add		= i40e_ndo_fdb_add,
8360 	.ndo_features_check	= i40e_features_check,
8361 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
8362 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
8363 };
8364 
8365 /**
8366  * i40e_config_netdev - Setup the netdev flags
8367  * @vsi: the VSI being configured
8368  *
8369  * Returns 0 on success, negative value on failure
8370  **/
8371 static int i40e_config_netdev(struct i40e_vsi *vsi)
8372 {
8373 	u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8374 	struct i40e_pf *pf = vsi->back;
8375 	struct i40e_hw *hw = &pf->hw;
8376 	struct i40e_netdev_priv *np;
8377 	struct net_device *netdev;
8378 	u8 mac_addr[ETH_ALEN];
8379 	int etherdev_size;
8380 
8381 	etherdev_size = sizeof(struct i40e_netdev_priv);
8382 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8383 	if (!netdev)
8384 		return -ENOMEM;
8385 
8386 	vsi->netdev = netdev;
8387 	np = netdev_priv(netdev);
8388 	np->vsi = vsi;
8389 
8390 	netdev->hw_enc_features |= NETIF_F_IP_CSUM	 |
8391 				  NETIF_F_GSO_UDP_TUNNEL |
8392 				  NETIF_F_TSO;
8393 
8394 	netdev->features = NETIF_F_SG		       |
8395 			   NETIF_F_IP_CSUM	       |
8396 			   NETIF_F_SCTP_CSUM	       |
8397 			   NETIF_F_HIGHDMA	       |
8398 			   NETIF_F_GSO_UDP_TUNNEL      |
8399 			   NETIF_F_HW_VLAN_CTAG_TX     |
8400 			   NETIF_F_HW_VLAN_CTAG_RX     |
8401 			   NETIF_F_HW_VLAN_CTAG_FILTER |
8402 			   NETIF_F_IPV6_CSUM	       |
8403 			   NETIF_F_TSO		       |
8404 			   NETIF_F_TSO_ECN	       |
8405 			   NETIF_F_TSO6		       |
8406 			   NETIF_F_RXCSUM	       |
8407 			   NETIF_F_RXHASH	       |
8408 			   0;
8409 
8410 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8411 		netdev->features |= NETIF_F_NTUPLE;
8412 
8413 	/* copy netdev features into list of user selectable features */
8414 	netdev->hw_features |= netdev->features;
8415 
8416 	if (vsi->type == I40E_VSI_MAIN) {
8417 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8418 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
8419 		/* The following steps are necessary to prevent reception
8420 		 * of tagged packets - some older NVM configurations load a
8421 		 * default a MAC-VLAN filter that accepts any tagged packet
8422 		 * which must be replaced by a normal filter.
8423 		 */
8424 		if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8425 			i40e_add_filter(vsi, mac_addr,
8426 					I40E_VLAN_ANY, false, true);
8427 	} else {
8428 		/* relate the VSI_VMDQ name to the VSI_MAIN name */
8429 		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8430 			 pf->vsi[pf->lan_vsi]->netdev->name);
8431 		random_ether_addr(mac_addr);
8432 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8433 	}
8434 	i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8435 
8436 	ether_addr_copy(netdev->dev_addr, mac_addr);
8437 	ether_addr_copy(netdev->perm_addr, mac_addr);
8438 	/* vlan gets same features (except vlan offload)
8439 	 * after any tweaks for specific VSI types
8440 	 */
8441 	netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8442 						     NETIF_F_HW_VLAN_CTAG_RX |
8443 						   NETIF_F_HW_VLAN_CTAG_FILTER);
8444 	netdev->priv_flags |= IFF_UNICAST_FLT;
8445 	netdev->priv_flags |= IFF_SUPP_NOFCS;
8446 	/* Setup netdev TC information */
8447 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8448 
8449 	netdev->netdev_ops = &i40e_netdev_ops;
8450 	netdev->watchdog_timeo = 5 * HZ;
8451 	i40e_set_ethtool_ops(netdev);
8452 #ifdef I40E_FCOE
8453 	i40e_fcoe_config_netdev(netdev, vsi);
8454 #endif
8455 
8456 	return 0;
8457 }
8458 
8459 /**
8460  * i40e_vsi_delete - Delete a VSI from the switch
8461  * @vsi: the VSI being removed
8462  *
8463  * Returns 0 on success, negative value on failure
8464  **/
8465 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8466 {
8467 	/* remove default VSI is not allowed */
8468 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8469 		return;
8470 
8471 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8472 }
8473 
8474 /**
8475  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8476  * @vsi: the VSI being queried
8477  *
8478  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8479  **/
8480 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8481 {
8482 	struct i40e_veb *veb;
8483 	struct i40e_pf *pf = vsi->back;
8484 
8485 	/* Uplink is not a bridge so default to VEB */
8486 	if (vsi->veb_idx == I40E_NO_VEB)
8487 		return 1;
8488 
8489 	veb = pf->veb[vsi->veb_idx];
8490 	/* Uplink is a bridge in VEPA mode */
8491 	if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8492 		return 0;
8493 
8494 	/* Uplink is a bridge in VEB mode */
8495 	return 1;
8496 }
8497 
8498 /**
8499  * i40e_add_vsi - Add a VSI to the switch
8500  * @vsi: the VSI being configured
8501  *
8502  * This initializes a VSI context depending on the VSI type to be added and
8503  * passes it down to the add_vsi aq command.
8504  **/
8505 static int i40e_add_vsi(struct i40e_vsi *vsi)
8506 {
8507 	int ret = -ENODEV;
8508 	struct i40e_mac_filter *f, *ftmp;
8509 	struct i40e_pf *pf = vsi->back;
8510 	struct i40e_hw *hw = &pf->hw;
8511 	struct i40e_vsi_context ctxt;
8512 	u8 enabled_tc = 0x1; /* TC0 enabled */
8513 	int f_count = 0;
8514 
8515 	memset(&ctxt, 0, sizeof(ctxt));
8516 	switch (vsi->type) {
8517 	case I40E_VSI_MAIN:
8518 		/* The PF's main VSI is already setup as part of the
8519 		 * device initialization, so we'll not bother with
8520 		 * the add_vsi call, but we will retrieve the current
8521 		 * VSI context.
8522 		 */
8523 		ctxt.seid = pf->main_vsi_seid;
8524 		ctxt.pf_num = pf->hw.pf_id;
8525 		ctxt.vf_num = 0;
8526 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8527 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8528 		if (ret) {
8529 			dev_info(&pf->pdev->dev,
8530 				 "couldn't get PF vsi config, err %s aq_err %s\n",
8531 				 i40e_stat_str(&pf->hw, ret),
8532 				 i40e_aq_str(&pf->hw,
8533 					     pf->hw.aq.asq_last_status));
8534 			return -ENOENT;
8535 		}
8536 		vsi->info = ctxt.info;
8537 		vsi->info.valid_sections = 0;
8538 
8539 		vsi->seid = ctxt.seid;
8540 		vsi->id = ctxt.vsi_number;
8541 
8542 		enabled_tc = i40e_pf_get_tc_map(pf);
8543 
8544 		/* MFP mode setup queue map and update VSI */
8545 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8546 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8547 			memset(&ctxt, 0, sizeof(ctxt));
8548 			ctxt.seid = pf->main_vsi_seid;
8549 			ctxt.pf_num = pf->hw.pf_id;
8550 			ctxt.vf_num = 0;
8551 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8552 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8553 			if (ret) {
8554 				dev_info(&pf->pdev->dev,
8555 					 "update vsi failed, err %s aq_err %s\n",
8556 					 i40e_stat_str(&pf->hw, ret),
8557 					 i40e_aq_str(&pf->hw,
8558 						    pf->hw.aq.asq_last_status));
8559 				ret = -ENOENT;
8560 				goto err;
8561 			}
8562 			/* update the local VSI info queue map */
8563 			i40e_vsi_update_queue_map(vsi, &ctxt);
8564 			vsi->info.valid_sections = 0;
8565 		} else {
8566 			/* Default/Main VSI is only enabled for TC0
8567 			 * reconfigure it to enable all TCs that are
8568 			 * available on the port in SFP mode.
8569 			 * For MFP case the iSCSI PF would use this
8570 			 * flow to enable LAN+iSCSI TC.
8571 			 */
8572 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
8573 			if (ret) {
8574 				dev_info(&pf->pdev->dev,
8575 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
8576 					 enabled_tc,
8577 					 i40e_stat_str(&pf->hw, ret),
8578 					 i40e_aq_str(&pf->hw,
8579 						    pf->hw.aq.asq_last_status));
8580 				ret = -ENOENT;
8581 			}
8582 		}
8583 		break;
8584 
8585 	case I40E_VSI_FDIR:
8586 		ctxt.pf_num = hw->pf_id;
8587 		ctxt.vf_num = 0;
8588 		ctxt.uplink_seid = vsi->uplink_seid;
8589 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8590 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8591 		if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8592 		    (i40e_is_vsi_uplink_mode_veb(vsi))) {
8593 			ctxt.info.valid_sections |=
8594 			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8595 			ctxt.info.switch_id =
8596 			   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8597 		}
8598 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8599 		break;
8600 
8601 	case I40E_VSI_VMDQ2:
8602 		ctxt.pf_num = hw->pf_id;
8603 		ctxt.vf_num = 0;
8604 		ctxt.uplink_seid = vsi->uplink_seid;
8605 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8606 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8607 
8608 		/* This VSI is connected to VEB so the switch_id
8609 		 * should be set to zero by default.
8610 		 */
8611 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8612 			ctxt.info.valid_sections |=
8613 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8614 			ctxt.info.switch_id =
8615 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8616 		}
8617 
8618 		/* Setup the VSI tx/rx queue map for TC0 only for now */
8619 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8620 		break;
8621 
8622 	case I40E_VSI_SRIOV:
8623 		ctxt.pf_num = hw->pf_id;
8624 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8625 		ctxt.uplink_seid = vsi->uplink_seid;
8626 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8627 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8628 
8629 		/* This VSI is connected to VEB so the switch_id
8630 		 * should be set to zero by default.
8631 		 */
8632 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8633 			ctxt.info.valid_sections |=
8634 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8635 			ctxt.info.switch_id =
8636 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8637 		}
8638 
8639 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8640 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8641 		if (pf->vf[vsi->vf_id].spoofchk) {
8642 			ctxt.info.valid_sections |=
8643 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8644 			ctxt.info.sec_flags |=
8645 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8646 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8647 		}
8648 		/* Setup the VSI tx/rx queue map for TC0 only for now */
8649 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8650 		break;
8651 
8652 #ifdef I40E_FCOE
8653 	case I40E_VSI_FCOE:
8654 		ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8655 		if (ret) {
8656 			dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8657 			return ret;
8658 		}
8659 		break;
8660 
8661 #endif /* I40E_FCOE */
8662 	default:
8663 		return -ENODEV;
8664 	}
8665 
8666 	if (vsi->type != I40E_VSI_MAIN) {
8667 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8668 		if (ret) {
8669 			dev_info(&vsi->back->pdev->dev,
8670 				 "add vsi failed, err %s aq_err %s\n",
8671 				 i40e_stat_str(&pf->hw, ret),
8672 				 i40e_aq_str(&pf->hw,
8673 					     pf->hw.aq.asq_last_status));
8674 			ret = -ENOENT;
8675 			goto err;
8676 		}
8677 		vsi->info = ctxt.info;
8678 		vsi->info.valid_sections = 0;
8679 		vsi->seid = ctxt.seid;
8680 		vsi->id = ctxt.vsi_number;
8681 	}
8682 
8683 	/* If macvlan filters already exist, force them to get loaded */
8684 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8685 		f->changed = true;
8686 		f_count++;
8687 
8688 		if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8689 			struct i40e_aqc_remove_macvlan_element_data element;
8690 
8691 			memset(&element, 0, sizeof(element));
8692 			ether_addr_copy(element.mac_addr, f->macaddr);
8693 			element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8694 			ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8695 						     &element, 1, NULL);
8696 			if (ret) {
8697 				/* some older FW has a different default */
8698 				element.flags |=
8699 					       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8700 				i40e_aq_remove_macvlan(hw, vsi->seid,
8701 						       &element, 1, NULL);
8702 			}
8703 
8704 			i40e_aq_mac_address_write(hw,
8705 						  I40E_AQC_WRITE_TYPE_LAA_WOL,
8706 						  f->macaddr, NULL);
8707 		}
8708 	}
8709 	if (f_count) {
8710 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8711 		pf->flags |= I40E_FLAG_FILTER_SYNC;
8712 	}
8713 
8714 	/* Update VSI BW information */
8715 	ret = i40e_vsi_get_bw_info(vsi);
8716 	if (ret) {
8717 		dev_info(&pf->pdev->dev,
8718 			 "couldn't get vsi bw info, err %s aq_err %s\n",
8719 			 i40e_stat_str(&pf->hw, ret),
8720 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8721 		/* VSI is already added so not tearing that up */
8722 		ret = 0;
8723 	}
8724 
8725 err:
8726 	return ret;
8727 }
8728 
8729 /**
8730  * i40e_vsi_release - Delete a VSI and free its resources
8731  * @vsi: the VSI being removed
8732  *
8733  * Returns 0 on success or < 0 on error
8734  **/
8735 int i40e_vsi_release(struct i40e_vsi *vsi)
8736 {
8737 	struct i40e_mac_filter *f, *ftmp;
8738 	struct i40e_veb *veb = NULL;
8739 	struct i40e_pf *pf;
8740 	u16 uplink_seid;
8741 	int i, n;
8742 
8743 	pf = vsi->back;
8744 
8745 	/* release of a VEB-owner or last VSI is not allowed */
8746 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8747 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8748 			 vsi->seid, vsi->uplink_seid);
8749 		return -ENODEV;
8750 	}
8751 	if (vsi == pf->vsi[pf->lan_vsi] &&
8752 	    !test_bit(__I40E_DOWN, &pf->state)) {
8753 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8754 		return -ENODEV;
8755 	}
8756 
8757 	uplink_seid = vsi->uplink_seid;
8758 	if (vsi->type != I40E_VSI_SRIOV) {
8759 		if (vsi->netdev_registered) {
8760 			vsi->netdev_registered = false;
8761 			if (vsi->netdev) {
8762 				/* results in a call to i40e_close() */
8763 				unregister_netdev(vsi->netdev);
8764 			}
8765 		} else {
8766 			i40e_vsi_close(vsi);
8767 		}
8768 		i40e_vsi_disable_irq(vsi);
8769 	}
8770 
8771 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8772 		i40e_del_filter(vsi, f->macaddr, f->vlan,
8773 				f->is_vf, f->is_netdev);
8774 	i40e_sync_vsi_filters(vsi);
8775 
8776 	i40e_vsi_delete(vsi);
8777 	i40e_vsi_free_q_vectors(vsi);
8778 	if (vsi->netdev) {
8779 		free_netdev(vsi->netdev);
8780 		vsi->netdev = NULL;
8781 	}
8782 	i40e_vsi_clear_rings(vsi);
8783 	i40e_vsi_clear(vsi);
8784 
8785 	/* If this was the last thing on the VEB, except for the
8786 	 * controlling VSI, remove the VEB, which puts the controlling
8787 	 * VSI onto the next level down in the switch.
8788 	 *
8789 	 * Well, okay, there's one more exception here: don't remove
8790 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
8791 	 * from up the network stack.
8792 	 */
8793 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8794 		if (pf->vsi[i] &&
8795 		    pf->vsi[i]->uplink_seid == uplink_seid &&
8796 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8797 			n++;      /* count the VSIs */
8798 		}
8799 	}
8800 	for (i = 0; i < I40E_MAX_VEB; i++) {
8801 		if (!pf->veb[i])
8802 			continue;
8803 		if (pf->veb[i]->uplink_seid == uplink_seid)
8804 			n++;     /* count the VEBs */
8805 		if (pf->veb[i]->seid == uplink_seid)
8806 			veb = pf->veb[i];
8807 	}
8808 	if (n == 0 && veb && veb->uplink_seid != 0)
8809 		i40e_veb_release(veb);
8810 
8811 	return 0;
8812 }
8813 
8814 /**
8815  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8816  * @vsi: ptr to the VSI
8817  *
8818  * This should only be called after i40e_vsi_mem_alloc() which allocates the
8819  * corresponding SW VSI structure and initializes num_queue_pairs for the
8820  * newly allocated VSI.
8821  *
8822  * Returns 0 on success or negative on failure
8823  **/
8824 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8825 {
8826 	int ret = -ENOENT;
8827 	struct i40e_pf *pf = vsi->back;
8828 
8829 	if (vsi->q_vectors[0]) {
8830 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8831 			 vsi->seid);
8832 		return -EEXIST;
8833 	}
8834 
8835 	if (vsi->base_vector) {
8836 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8837 			 vsi->seid, vsi->base_vector);
8838 		return -EEXIST;
8839 	}
8840 
8841 	ret = i40e_vsi_alloc_q_vectors(vsi);
8842 	if (ret) {
8843 		dev_info(&pf->pdev->dev,
8844 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8845 			 vsi->num_q_vectors, vsi->seid, ret);
8846 		vsi->num_q_vectors = 0;
8847 		goto vector_setup_out;
8848 	}
8849 
8850 	/* In Legacy mode, we do not have to get any other vector since we
8851 	 * piggyback on the misc/ICR0 for queue interrupts.
8852 	*/
8853 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8854 		return ret;
8855 	if (vsi->num_q_vectors)
8856 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8857 						 vsi->num_q_vectors, vsi->idx);
8858 	if (vsi->base_vector < 0) {
8859 		dev_info(&pf->pdev->dev,
8860 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8861 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8862 		i40e_vsi_free_q_vectors(vsi);
8863 		ret = -ENOENT;
8864 		goto vector_setup_out;
8865 	}
8866 
8867 vector_setup_out:
8868 	return ret;
8869 }
8870 
8871 /**
8872  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8873  * @vsi: pointer to the vsi.
8874  *
8875  * This re-allocates a vsi's queue resources.
8876  *
8877  * Returns pointer to the successfully allocated and configured VSI sw struct
8878  * on success, otherwise returns NULL on failure.
8879  **/
8880 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8881 {
8882 	struct i40e_pf *pf = vsi->back;
8883 	u8 enabled_tc;
8884 	int ret;
8885 
8886 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8887 	i40e_vsi_clear_rings(vsi);
8888 
8889 	i40e_vsi_free_arrays(vsi, false);
8890 	i40e_set_num_rings_in_vsi(vsi);
8891 	ret = i40e_vsi_alloc_arrays(vsi, false);
8892 	if (ret)
8893 		goto err_vsi;
8894 
8895 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8896 	if (ret < 0) {
8897 		dev_info(&pf->pdev->dev,
8898 			 "failed to get tracking for %d queues for VSI %d err %d\n",
8899 			 vsi->alloc_queue_pairs, vsi->seid, ret);
8900 		goto err_vsi;
8901 	}
8902 	vsi->base_queue = ret;
8903 
8904 	/* Update the FW view of the VSI. Force a reset of TC and queue
8905 	 * layout configurations.
8906 	 */
8907 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8908 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8909 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8910 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8911 
8912 	/* assign it some queues */
8913 	ret = i40e_alloc_rings(vsi);
8914 	if (ret)
8915 		goto err_rings;
8916 
8917 	/* map all of the rings to the q_vectors */
8918 	i40e_vsi_map_rings_to_vectors(vsi);
8919 	return vsi;
8920 
8921 err_rings:
8922 	i40e_vsi_free_q_vectors(vsi);
8923 	if (vsi->netdev_registered) {
8924 		vsi->netdev_registered = false;
8925 		unregister_netdev(vsi->netdev);
8926 		free_netdev(vsi->netdev);
8927 		vsi->netdev = NULL;
8928 	}
8929 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8930 err_vsi:
8931 	i40e_vsi_clear(vsi);
8932 	return NULL;
8933 }
8934 
8935 /**
8936  * i40e_vsi_setup - Set up a VSI by a given type
8937  * @pf: board private structure
8938  * @type: VSI type
8939  * @uplink_seid: the switch element to link to
8940  * @param1: usage depends upon VSI type. For VF types, indicates VF id
8941  *
8942  * This allocates the sw VSI structure and its queue resources, then add a VSI
8943  * to the identified VEB.
8944  *
8945  * Returns pointer to the successfully allocated and configure VSI sw struct on
8946  * success, otherwise returns NULL on failure.
8947  **/
8948 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8949 				u16 uplink_seid, u32 param1)
8950 {
8951 	struct i40e_vsi *vsi = NULL;
8952 	struct i40e_veb *veb = NULL;
8953 	int ret, i;
8954 	int v_idx;
8955 
8956 	/* The requested uplink_seid must be either
8957 	 *     - the PF's port seid
8958 	 *              no VEB is needed because this is the PF
8959 	 *              or this is a Flow Director special case VSI
8960 	 *     - seid of an existing VEB
8961 	 *     - seid of a VSI that owns an existing VEB
8962 	 *     - seid of a VSI that doesn't own a VEB
8963 	 *              a new VEB is created and the VSI becomes the owner
8964 	 *     - seid of the PF VSI, which is what creates the first VEB
8965 	 *              this is a special case of the previous
8966 	 *
8967 	 * Find which uplink_seid we were given and create a new VEB if needed
8968 	 */
8969 	for (i = 0; i < I40E_MAX_VEB; i++) {
8970 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8971 			veb = pf->veb[i];
8972 			break;
8973 		}
8974 	}
8975 
8976 	if (!veb && uplink_seid != pf->mac_seid) {
8977 
8978 		for (i = 0; i < pf->num_alloc_vsi; i++) {
8979 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8980 				vsi = pf->vsi[i];
8981 				break;
8982 			}
8983 		}
8984 		if (!vsi) {
8985 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8986 				 uplink_seid);
8987 			return NULL;
8988 		}
8989 
8990 		if (vsi->uplink_seid == pf->mac_seid)
8991 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8992 					     vsi->tc_config.enabled_tc);
8993 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8994 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8995 					     vsi->tc_config.enabled_tc);
8996 		if (veb) {
8997 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8998 				dev_info(&vsi->back->pdev->dev,
8999 					 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
9000 					 __func__);
9001 				return NULL;
9002 			}
9003 			/* We come up by default in VEPA mode if SRIOV is not
9004 			 * already enabled, in which case we can't force VEPA
9005 			 * mode.
9006 			 */
9007 			if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9008 				veb->bridge_mode = BRIDGE_MODE_VEPA;
9009 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9010 			}
9011 			i40e_config_bridge_mode(veb);
9012 		}
9013 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9014 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9015 				veb = pf->veb[i];
9016 		}
9017 		if (!veb) {
9018 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9019 			return NULL;
9020 		}
9021 
9022 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9023 		uplink_seid = veb->seid;
9024 	}
9025 
9026 	/* get vsi sw struct */
9027 	v_idx = i40e_vsi_mem_alloc(pf, type);
9028 	if (v_idx < 0)
9029 		goto err_alloc;
9030 	vsi = pf->vsi[v_idx];
9031 	if (!vsi)
9032 		goto err_alloc;
9033 	vsi->type = type;
9034 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9035 
9036 	if (type == I40E_VSI_MAIN)
9037 		pf->lan_vsi = v_idx;
9038 	else if (type == I40E_VSI_SRIOV)
9039 		vsi->vf_id = param1;
9040 	/* assign it some queues */
9041 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9042 				vsi->idx);
9043 	if (ret < 0) {
9044 		dev_info(&pf->pdev->dev,
9045 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
9046 			 vsi->alloc_queue_pairs, vsi->seid, ret);
9047 		goto err_vsi;
9048 	}
9049 	vsi->base_queue = ret;
9050 
9051 	/* get a VSI from the hardware */
9052 	vsi->uplink_seid = uplink_seid;
9053 	ret = i40e_add_vsi(vsi);
9054 	if (ret)
9055 		goto err_vsi;
9056 
9057 	switch (vsi->type) {
9058 	/* setup the netdev if needed */
9059 	case I40E_VSI_MAIN:
9060 	case I40E_VSI_VMDQ2:
9061 	case I40E_VSI_FCOE:
9062 		ret = i40e_config_netdev(vsi);
9063 		if (ret)
9064 			goto err_netdev;
9065 		ret = register_netdev(vsi->netdev);
9066 		if (ret)
9067 			goto err_netdev;
9068 		vsi->netdev_registered = true;
9069 		netif_carrier_off(vsi->netdev);
9070 #ifdef CONFIG_I40E_DCB
9071 		/* Setup DCB netlink interface */
9072 		i40e_dcbnl_setup(vsi);
9073 #endif /* CONFIG_I40E_DCB */
9074 		/* fall through */
9075 
9076 	case I40E_VSI_FDIR:
9077 		/* set up vectors and rings if needed */
9078 		ret = i40e_vsi_setup_vectors(vsi);
9079 		if (ret)
9080 			goto err_msix;
9081 
9082 		ret = i40e_alloc_rings(vsi);
9083 		if (ret)
9084 			goto err_rings;
9085 
9086 		/* map all of the rings to the q_vectors */
9087 		i40e_vsi_map_rings_to_vectors(vsi);
9088 
9089 		i40e_vsi_reset_stats(vsi);
9090 		break;
9091 
9092 	default:
9093 		/* no netdev or rings for the other VSI types */
9094 		break;
9095 	}
9096 
9097 	if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9098 	    (vsi->type == I40E_VSI_VMDQ2)) {
9099 		ret = i40e_vsi_config_rss(vsi);
9100 	}
9101 	return vsi;
9102 
9103 err_rings:
9104 	i40e_vsi_free_q_vectors(vsi);
9105 err_msix:
9106 	if (vsi->netdev_registered) {
9107 		vsi->netdev_registered = false;
9108 		unregister_netdev(vsi->netdev);
9109 		free_netdev(vsi->netdev);
9110 		vsi->netdev = NULL;
9111 	}
9112 err_netdev:
9113 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9114 err_vsi:
9115 	i40e_vsi_clear(vsi);
9116 err_alloc:
9117 	return NULL;
9118 }
9119 
9120 /**
9121  * i40e_veb_get_bw_info - Query VEB BW information
9122  * @veb: the veb to query
9123  *
9124  * Query the Tx scheduler BW configuration data for given VEB
9125  **/
9126 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9127 {
9128 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9129 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9130 	struct i40e_pf *pf = veb->pf;
9131 	struct i40e_hw *hw = &pf->hw;
9132 	u32 tc_bw_max;
9133 	int ret = 0;
9134 	int i;
9135 
9136 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9137 						  &bw_data, NULL);
9138 	if (ret) {
9139 		dev_info(&pf->pdev->dev,
9140 			 "query veb bw config failed, err %s aq_err %s\n",
9141 			 i40e_stat_str(&pf->hw, ret),
9142 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9143 		goto out;
9144 	}
9145 
9146 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9147 						   &ets_data, NULL);
9148 	if (ret) {
9149 		dev_info(&pf->pdev->dev,
9150 			 "query veb bw ets config failed, err %s aq_err %s\n",
9151 			 i40e_stat_str(&pf->hw, ret),
9152 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9153 		goto out;
9154 	}
9155 
9156 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9157 	veb->bw_max_quanta = ets_data.tc_bw_max;
9158 	veb->is_abs_credits = bw_data.absolute_credits_enable;
9159 	veb->enabled_tc = ets_data.tc_valid_bits;
9160 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9161 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9162 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9163 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9164 		veb->bw_tc_limit_credits[i] =
9165 					le16_to_cpu(bw_data.tc_bw_limits[i]);
9166 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9167 	}
9168 
9169 out:
9170 	return ret;
9171 }
9172 
9173 /**
9174  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9175  * @pf: board private structure
9176  *
9177  * On error: returns error code (negative)
9178  * On success: returns vsi index in PF (positive)
9179  **/
9180 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9181 {
9182 	int ret = -ENOENT;
9183 	struct i40e_veb *veb;
9184 	int i;
9185 
9186 	/* Need to protect the allocation of switch elements at the PF level */
9187 	mutex_lock(&pf->switch_mutex);
9188 
9189 	/* VEB list may be fragmented if VEB creation/destruction has
9190 	 * been happening.  We can afford to do a quick scan to look
9191 	 * for any free slots in the list.
9192 	 *
9193 	 * find next empty veb slot, looping back around if necessary
9194 	 */
9195 	i = 0;
9196 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9197 		i++;
9198 	if (i >= I40E_MAX_VEB) {
9199 		ret = -ENOMEM;
9200 		goto err_alloc_veb;  /* out of VEB slots! */
9201 	}
9202 
9203 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9204 	if (!veb) {
9205 		ret = -ENOMEM;
9206 		goto err_alloc_veb;
9207 	}
9208 	veb->pf = pf;
9209 	veb->idx = i;
9210 	veb->enabled_tc = 1;
9211 
9212 	pf->veb[i] = veb;
9213 	ret = i;
9214 err_alloc_veb:
9215 	mutex_unlock(&pf->switch_mutex);
9216 	return ret;
9217 }
9218 
9219 /**
9220  * i40e_switch_branch_release - Delete a branch of the switch tree
9221  * @branch: where to start deleting
9222  *
9223  * This uses recursion to find the tips of the branch to be
9224  * removed, deleting until we get back to and can delete this VEB.
9225  **/
9226 static void i40e_switch_branch_release(struct i40e_veb *branch)
9227 {
9228 	struct i40e_pf *pf = branch->pf;
9229 	u16 branch_seid = branch->seid;
9230 	u16 veb_idx = branch->idx;
9231 	int i;
9232 
9233 	/* release any VEBs on this VEB - RECURSION */
9234 	for (i = 0; i < I40E_MAX_VEB; i++) {
9235 		if (!pf->veb[i])
9236 			continue;
9237 		if (pf->veb[i]->uplink_seid == branch->seid)
9238 			i40e_switch_branch_release(pf->veb[i]);
9239 	}
9240 
9241 	/* Release the VSIs on this VEB, but not the owner VSI.
9242 	 *
9243 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9244 	 *       the VEB itself, so don't use (*branch) after this loop.
9245 	 */
9246 	for (i = 0; i < pf->num_alloc_vsi; i++) {
9247 		if (!pf->vsi[i])
9248 			continue;
9249 		if (pf->vsi[i]->uplink_seid == branch_seid &&
9250 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9251 			i40e_vsi_release(pf->vsi[i]);
9252 		}
9253 	}
9254 
9255 	/* There's one corner case where the VEB might not have been
9256 	 * removed, so double check it here and remove it if needed.
9257 	 * This case happens if the veb was created from the debugfs
9258 	 * commands and no VSIs were added to it.
9259 	 */
9260 	if (pf->veb[veb_idx])
9261 		i40e_veb_release(pf->veb[veb_idx]);
9262 }
9263 
9264 /**
9265  * i40e_veb_clear - remove veb struct
9266  * @veb: the veb to remove
9267  **/
9268 static void i40e_veb_clear(struct i40e_veb *veb)
9269 {
9270 	if (!veb)
9271 		return;
9272 
9273 	if (veb->pf) {
9274 		struct i40e_pf *pf = veb->pf;
9275 
9276 		mutex_lock(&pf->switch_mutex);
9277 		if (pf->veb[veb->idx] == veb)
9278 			pf->veb[veb->idx] = NULL;
9279 		mutex_unlock(&pf->switch_mutex);
9280 	}
9281 
9282 	kfree(veb);
9283 }
9284 
9285 /**
9286  * i40e_veb_release - Delete a VEB and free its resources
9287  * @veb: the VEB being removed
9288  **/
9289 void i40e_veb_release(struct i40e_veb *veb)
9290 {
9291 	struct i40e_vsi *vsi = NULL;
9292 	struct i40e_pf *pf;
9293 	int i, n = 0;
9294 
9295 	pf = veb->pf;
9296 
9297 	/* find the remaining VSI and check for extras */
9298 	for (i = 0; i < pf->num_alloc_vsi; i++) {
9299 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9300 			n++;
9301 			vsi = pf->vsi[i];
9302 		}
9303 	}
9304 	if (n != 1) {
9305 		dev_info(&pf->pdev->dev,
9306 			 "can't remove VEB %d with %d VSIs left\n",
9307 			 veb->seid, n);
9308 		return;
9309 	}
9310 
9311 	/* move the remaining VSI to uplink veb */
9312 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9313 	if (veb->uplink_seid) {
9314 		vsi->uplink_seid = veb->uplink_seid;
9315 		if (veb->uplink_seid == pf->mac_seid)
9316 			vsi->veb_idx = I40E_NO_VEB;
9317 		else
9318 			vsi->veb_idx = veb->veb_idx;
9319 	} else {
9320 		/* floating VEB */
9321 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9322 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9323 	}
9324 
9325 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9326 	i40e_veb_clear(veb);
9327 }
9328 
9329 /**
9330  * i40e_add_veb - create the VEB in the switch
9331  * @veb: the VEB to be instantiated
9332  * @vsi: the controlling VSI
9333  **/
9334 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9335 {
9336 	struct i40e_pf *pf = veb->pf;
9337 	bool is_default = veb->pf->cur_promisc;
9338 	bool is_cloud = false;
9339 	int ret;
9340 
9341 	/* get a VEB from the hardware */
9342 	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9343 			      veb->enabled_tc, is_default,
9344 			      is_cloud, &veb->seid, NULL);
9345 	if (ret) {
9346 		dev_info(&pf->pdev->dev,
9347 			 "couldn't add VEB, err %s aq_err %s\n",
9348 			 i40e_stat_str(&pf->hw, ret),
9349 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9350 		return -EPERM;
9351 	}
9352 
9353 	/* get statistics counter */
9354 	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
9355 					 &veb->stats_idx, NULL, NULL, NULL);
9356 	if (ret) {
9357 		dev_info(&pf->pdev->dev,
9358 			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
9359 			 i40e_stat_str(&pf->hw, ret),
9360 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9361 		return -EPERM;
9362 	}
9363 	ret = i40e_veb_get_bw_info(veb);
9364 	if (ret) {
9365 		dev_info(&pf->pdev->dev,
9366 			 "couldn't get VEB bw info, err %s aq_err %s\n",
9367 			 i40e_stat_str(&pf->hw, ret),
9368 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9369 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9370 		return -ENOENT;
9371 	}
9372 
9373 	vsi->uplink_seid = veb->seid;
9374 	vsi->veb_idx = veb->idx;
9375 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9376 
9377 	return 0;
9378 }
9379 
9380 /**
9381  * i40e_veb_setup - Set up a VEB
9382  * @pf: board private structure
9383  * @flags: VEB setup flags
9384  * @uplink_seid: the switch element to link to
9385  * @vsi_seid: the initial VSI seid
9386  * @enabled_tc: Enabled TC bit-map
9387  *
9388  * This allocates the sw VEB structure and links it into the switch
9389  * It is possible and legal for this to be a duplicate of an already
9390  * existing VEB.  It is also possible for both uplink and vsi seids
9391  * to be zero, in order to create a floating VEB.
9392  *
9393  * Returns pointer to the successfully allocated VEB sw struct on
9394  * success, otherwise returns NULL on failure.
9395  **/
9396 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9397 				u16 uplink_seid, u16 vsi_seid,
9398 				u8 enabled_tc)
9399 {
9400 	struct i40e_veb *veb, *uplink_veb = NULL;
9401 	int vsi_idx, veb_idx;
9402 	int ret;
9403 
9404 	/* if one seid is 0, the other must be 0 to create a floating relay */
9405 	if ((uplink_seid == 0 || vsi_seid == 0) &&
9406 	    (uplink_seid + vsi_seid != 0)) {
9407 		dev_info(&pf->pdev->dev,
9408 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
9409 			 uplink_seid, vsi_seid);
9410 		return NULL;
9411 	}
9412 
9413 	/* make sure there is such a vsi and uplink */
9414 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9415 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9416 			break;
9417 	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9418 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9419 			 vsi_seid);
9420 		return NULL;
9421 	}
9422 
9423 	if (uplink_seid && uplink_seid != pf->mac_seid) {
9424 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9425 			if (pf->veb[veb_idx] &&
9426 			    pf->veb[veb_idx]->seid == uplink_seid) {
9427 				uplink_veb = pf->veb[veb_idx];
9428 				break;
9429 			}
9430 		}
9431 		if (!uplink_veb) {
9432 			dev_info(&pf->pdev->dev,
9433 				 "uplink seid %d not found\n", uplink_seid);
9434 			return NULL;
9435 		}
9436 	}
9437 
9438 	/* get veb sw struct */
9439 	veb_idx = i40e_veb_mem_alloc(pf);
9440 	if (veb_idx < 0)
9441 		goto err_alloc;
9442 	veb = pf->veb[veb_idx];
9443 	veb->flags = flags;
9444 	veb->uplink_seid = uplink_seid;
9445 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9446 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9447 
9448 	/* create the VEB in the switch */
9449 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9450 	if (ret)
9451 		goto err_veb;
9452 	if (vsi_idx == pf->lan_vsi)
9453 		pf->lan_veb = veb->idx;
9454 
9455 	return veb;
9456 
9457 err_veb:
9458 	i40e_veb_clear(veb);
9459 err_alloc:
9460 	return NULL;
9461 }
9462 
9463 /**
9464  * i40e_setup_pf_switch_element - set PF vars based on switch type
9465  * @pf: board private structure
9466  * @ele: element we are building info from
9467  * @num_reported: total number of elements
9468  * @printconfig: should we print the contents
9469  *
9470  * helper function to assist in extracting a few useful SEID values.
9471  **/
9472 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9473 				struct i40e_aqc_switch_config_element_resp *ele,
9474 				u16 num_reported, bool printconfig)
9475 {
9476 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9477 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9478 	u8 element_type = ele->element_type;
9479 	u16 seid = le16_to_cpu(ele->seid);
9480 
9481 	if (printconfig)
9482 		dev_info(&pf->pdev->dev,
9483 			 "type=%d seid=%d uplink=%d downlink=%d\n",
9484 			 element_type, seid, uplink_seid, downlink_seid);
9485 
9486 	switch (element_type) {
9487 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
9488 		pf->mac_seid = seid;
9489 		break;
9490 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
9491 		/* Main VEB? */
9492 		if (uplink_seid != pf->mac_seid)
9493 			break;
9494 		if (pf->lan_veb == I40E_NO_VEB) {
9495 			int v;
9496 
9497 			/* find existing or else empty VEB */
9498 			for (v = 0; v < I40E_MAX_VEB; v++) {
9499 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9500 					pf->lan_veb = v;
9501 					break;
9502 				}
9503 			}
9504 			if (pf->lan_veb == I40E_NO_VEB) {
9505 				v = i40e_veb_mem_alloc(pf);
9506 				if (v < 0)
9507 					break;
9508 				pf->lan_veb = v;
9509 			}
9510 		}
9511 
9512 		pf->veb[pf->lan_veb]->seid = seid;
9513 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9514 		pf->veb[pf->lan_veb]->pf = pf;
9515 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9516 		break;
9517 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
9518 		if (num_reported != 1)
9519 			break;
9520 		/* This is immediately after a reset so we can assume this is
9521 		 * the PF's VSI
9522 		 */
9523 		pf->mac_seid = uplink_seid;
9524 		pf->pf_seid = downlink_seid;
9525 		pf->main_vsi_seid = seid;
9526 		if (printconfig)
9527 			dev_info(&pf->pdev->dev,
9528 				 "pf_seid=%d main_vsi_seid=%d\n",
9529 				 pf->pf_seid, pf->main_vsi_seid);
9530 		break;
9531 	case I40E_SWITCH_ELEMENT_TYPE_PF:
9532 	case I40E_SWITCH_ELEMENT_TYPE_VF:
9533 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
9534 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
9535 	case I40E_SWITCH_ELEMENT_TYPE_PE:
9536 	case I40E_SWITCH_ELEMENT_TYPE_PA:
9537 		/* ignore these for now */
9538 		break;
9539 	default:
9540 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9541 			 element_type, seid);
9542 		break;
9543 	}
9544 }
9545 
9546 /**
9547  * i40e_fetch_switch_configuration - Get switch config from firmware
9548  * @pf: board private structure
9549  * @printconfig: should we print the contents
9550  *
9551  * Get the current switch configuration from the device and
9552  * extract a few useful SEID values.
9553  **/
9554 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9555 {
9556 	struct i40e_aqc_get_switch_config_resp *sw_config;
9557 	u16 next_seid = 0;
9558 	int ret = 0;
9559 	u8 *aq_buf;
9560 	int i;
9561 
9562 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9563 	if (!aq_buf)
9564 		return -ENOMEM;
9565 
9566 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9567 	do {
9568 		u16 num_reported, num_total;
9569 
9570 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9571 						I40E_AQ_LARGE_BUF,
9572 						&next_seid, NULL);
9573 		if (ret) {
9574 			dev_info(&pf->pdev->dev,
9575 				 "get switch config failed err %s aq_err %s\n",
9576 				 i40e_stat_str(&pf->hw, ret),
9577 				 i40e_aq_str(&pf->hw,
9578 					     pf->hw.aq.asq_last_status));
9579 			kfree(aq_buf);
9580 			return -ENOENT;
9581 		}
9582 
9583 		num_reported = le16_to_cpu(sw_config->header.num_reported);
9584 		num_total = le16_to_cpu(sw_config->header.num_total);
9585 
9586 		if (printconfig)
9587 			dev_info(&pf->pdev->dev,
9588 				 "header: %d reported %d total\n",
9589 				 num_reported, num_total);
9590 
9591 		for (i = 0; i < num_reported; i++) {
9592 			struct i40e_aqc_switch_config_element_resp *ele =
9593 				&sw_config->element[i];
9594 
9595 			i40e_setup_pf_switch_element(pf, ele, num_reported,
9596 						     printconfig);
9597 		}
9598 	} while (next_seid != 0);
9599 
9600 	kfree(aq_buf);
9601 	return ret;
9602 }
9603 
9604 /**
9605  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9606  * @pf: board private structure
9607  * @reinit: if the Main VSI needs to re-initialized.
9608  *
9609  * Returns 0 on success, negative value on failure
9610  **/
9611 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9612 {
9613 	int ret;
9614 
9615 	/* find out what's out there already */
9616 	ret = i40e_fetch_switch_configuration(pf, false);
9617 	if (ret) {
9618 		dev_info(&pf->pdev->dev,
9619 			 "couldn't fetch switch config, err %s aq_err %s\n",
9620 			 i40e_stat_str(&pf->hw, ret),
9621 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9622 		return ret;
9623 	}
9624 	i40e_pf_reset_stats(pf);
9625 
9626 	/* first time setup */
9627 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9628 		struct i40e_vsi *vsi = NULL;
9629 		u16 uplink_seid;
9630 
9631 		/* Set up the PF VSI associated with the PF's main VSI
9632 		 * that is already in the HW switch
9633 		 */
9634 		if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9635 			uplink_seid = pf->veb[pf->lan_veb]->seid;
9636 		else
9637 			uplink_seid = pf->mac_seid;
9638 		if (pf->lan_vsi == I40E_NO_VSI)
9639 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9640 		else if (reinit)
9641 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9642 		if (!vsi) {
9643 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9644 			i40e_fdir_teardown(pf);
9645 			return -EAGAIN;
9646 		}
9647 	} else {
9648 		/* force a reset of TC and queue layout configurations */
9649 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9650 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9651 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9652 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9653 	}
9654 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9655 
9656 	i40e_fdir_sb_setup(pf);
9657 
9658 	/* Setup static PF queue filter control settings */
9659 	ret = i40e_setup_pf_filter_control(pf);
9660 	if (ret) {
9661 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9662 			 ret);
9663 		/* Failure here should not stop continuing other steps */
9664 	}
9665 
9666 	/* enable RSS in the HW, even for only one queue, as the stack can use
9667 	 * the hash
9668 	 */
9669 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9670 		i40e_config_rss(pf);
9671 
9672 	/* fill in link information and enable LSE reporting */
9673 	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
9674 	i40e_link_event(pf);
9675 
9676 	/* Initialize user-specific link properties */
9677 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9678 				  I40E_AQ_AN_COMPLETED) ? true : false);
9679 
9680 	i40e_ptp_init(pf);
9681 
9682 	return ret;
9683 }
9684 
9685 /**
9686  * i40e_determine_queue_usage - Work out queue distribution
9687  * @pf: board private structure
9688  **/
9689 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9690 {
9691 	int queues_left;
9692 
9693 	pf->num_lan_qps = 0;
9694 #ifdef I40E_FCOE
9695 	pf->num_fcoe_qps = 0;
9696 #endif
9697 
9698 	/* Find the max queues to be put into basic use.  We'll always be
9699 	 * using TC0, whether or not DCB is running, and TC0 will get the
9700 	 * big RSS set.
9701 	 */
9702 	queues_left = pf->hw.func_caps.num_tx_qp;
9703 
9704 	if ((queues_left == 1) ||
9705 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9706 		/* one qp for PF, no queues for anything else */
9707 		queues_left = 0;
9708 		pf->rss_size = pf->num_lan_qps = 1;
9709 
9710 		/* make sure all the fancies are disabled */
9711 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
9712 #ifdef I40E_FCOE
9713 			       I40E_FLAG_FCOE_ENABLED	|
9714 #endif
9715 			       I40E_FLAG_FD_SB_ENABLED	|
9716 			       I40E_FLAG_FD_ATR_ENABLED	|
9717 			       I40E_FLAG_DCB_CAPABLE	|
9718 			       I40E_FLAG_SRIOV_ENABLED	|
9719 			       I40E_FLAG_VMDQ_ENABLED);
9720 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9721 				  I40E_FLAG_FD_SB_ENABLED |
9722 				  I40E_FLAG_FD_ATR_ENABLED |
9723 				  I40E_FLAG_DCB_CAPABLE))) {
9724 		/* one qp for PF */
9725 		pf->rss_size = pf->num_lan_qps = 1;
9726 		queues_left -= pf->num_lan_qps;
9727 
9728 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
9729 #ifdef I40E_FCOE
9730 			       I40E_FLAG_FCOE_ENABLED	|
9731 #endif
9732 			       I40E_FLAG_FD_SB_ENABLED	|
9733 			       I40E_FLAG_FD_ATR_ENABLED	|
9734 			       I40E_FLAG_DCB_ENABLED	|
9735 			       I40E_FLAG_VMDQ_ENABLED);
9736 	} else {
9737 		/* Not enough queues for all TCs */
9738 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9739 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9740 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9741 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9742 		}
9743 		pf->num_lan_qps = max_t(int, pf->rss_size_max,
9744 					num_online_cpus());
9745 		pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9746 					pf->hw.func_caps.num_tx_qp);
9747 
9748 		queues_left -= pf->num_lan_qps;
9749 	}
9750 
9751 #ifdef I40E_FCOE
9752 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9753 		if (I40E_DEFAULT_FCOE <= queues_left) {
9754 			pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9755 		} else if (I40E_MINIMUM_FCOE <= queues_left) {
9756 			pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9757 		} else {
9758 			pf->num_fcoe_qps = 0;
9759 			pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9760 			dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9761 		}
9762 
9763 		queues_left -= pf->num_fcoe_qps;
9764 	}
9765 
9766 #endif
9767 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9768 		if (queues_left > 1) {
9769 			queues_left -= 1; /* save 1 queue for FD */
9770 		} else {
9771 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9772 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9773 		}
9774 	}
9775 
9776 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9777 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9778 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9779 					(queues_left / pf->num_vf_qps));
9780 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9781 	}
9782 
9783 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9784 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9785 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9786 					  (queues_left / pf->num_vmdq_qps));
9787 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9788 	}
9789 
9790 	pf->queues_left = queues_left;
9791 #ifdef I40E_FCOE
9792 	dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9793 #endif
9794 }
9795 
9796 /**
9797  * i40e_setup_pf_filter_control - Setup PF static filter control
9798  * @pf: PF to be setup
9799  *
9800  * i40e_setup_pf_filter_control sets up a PF's initial filter control
9801  * settings. If PE/FCoE are enabled then it will also set the per PF
9802  * based filter sizes required for them. It also enables Flow director,
9803  * ethertype and macvlan type filter settings for the pf.
9804  *
9805  * Returns 0 on success, negative on failure
9806  **/
9807 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9808 {
9809 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
9810 
9811 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9812 
9813 	/* Flow Director is enabled */
9814 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9815 		settings->enable_fdir = true;
9816 
9817 	/* Ethtype and MACVLAN filters enabled for PF */
9818 	settings->enable_ethtype = true;
9819 	settings->enable_macvlan = true;
9820 
9821 	if (i40e_set_filter_control(&pf->hw, settings))
9822 		return -ENOENT;
9823 
9824 	return 0;
9825 }
9826 
9827 #define INFO_STRING_LEN 255
9828 static void i40e_print_features(struct i40e_pf *pf)
9829 {
9830 	struct i40e_hw *hw = &pf->hw;
9831 	char *buf, *string;
9832 
9833 	string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9834 	if (!string) {
9835 		dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9836 		return;
9837 	}
9838 
9839 	buf = string;
9840 
9841 	buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9842 #ifdef CONFIG_PCI_IOV
9843 	buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9844 #endif
9845 	buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9846 		       pf->hw.func_caps.num_vsis,
9847 		       pf->vsi[pf->lan_vsi]->num_queue_pairs,
9848 		       pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9849 
9850 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
9851 		buf += sprintf(buf, "RSS ");
9852 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9853 		buf += sprintf(buf, "FD_ATR ");
9854 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9855 		buf += sprintf(buf, "FD_SB ");
9856 		buf += sprintf(buf, "NTUPLE ");
9857 	}
9858 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9859 		buf += sprintf(buf, "DCB ");
9860 	if (pf->flags & I40E_FLAG_PTP)
9861 		buf += sprintf(buf, "PTP ");
9862 #ifdef I40E_FCOE
9863 	if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9864 		buf += sprintf(buf, "FCOE ");
9865 #endif
9866 
9867 	BUG_ON(buf > (string + INFO_STRING_LEN));
9868 	dev_info(&pf->pdev->dev, "%s\n", string);
9869 	kfree(string);
9870 }
9871 
9872 /**
9873  * i40e_probe - Device initialization routine
9874  * @pdev: PCI device information struct
9875  * @ent: entry in i40e_pci_tbl
9876  *
9877  * i40e_probe initializes a PF identified by a pci_dev structure.
9878  * The OS initialization, configuring of the PF private structure,
9879  * and a hardware reset occur.
9880  *
9881  * Returns 0 on success, negative on failure
9882  **/
9883 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9884 {
9885 	struct i40e_aq_get_phy_abilities_resp abilities;
9886 	unsigned long ioremap_len;
9887 	struct i40e_pf *pf;
9888 	struct i40e_hw *hw;
9889 	static u16 pfs_found;
9890 	u16 link_status;
9891 	int err = 0;
9892 	u32 len;
9893 	u32 i;
9894 
9895 	err = pci_enable_device_mem(pdev);
9896 	if (err)
9897 		return err;
9898 
9899 	/* set up for high or low dma */
9900 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9901 	if (err) {
9902 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9903 		if (err) {
9904 			dev_err(&pdev->dev,
9905 				"DMA configuration failed: 0x%x\n", err);
9906 			goto err_dma;
9907 		}
9908 	}
9909 
9910 	/* set up pci connections */
9911 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9912 					   IORESOURCE_MEM), i40e_driver_name);
9913 	if (err) {
9914 		dev_info(&pdev->dev,
9915 			 "pci_request_selected_regions failed %d\n", err);
9916 		goto err_pci_reg;
9917 	}
9918 
9919 	pci_enable_pcie_error_reporting(pdev);
9920 	pci_set_master(pdev);
9921 
9922 	/* Now that we have a PCI connection, we need to do the
9923 	 * low level device setup.  This is primarily setting up
9924 	 * the Admin Queue structures and then querying for the
9925 	 * device's current profile information.
9926 	 */
9927 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9928 	if (!pf) {
9929 		err = -ENOMEM;
9930 		goto err_pf_alloc;
9931 	}
9932 	pf->next_vsi = 0;
9933 	pf->pdev = pdev;
9934 	set_bit(__I40E_DOWN, &pf->state);
9935 
9936 	hw = &pf->hw;
9937 	hw->back = pf;
9938 
9939 	ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
9940 			    I40E_MAX_CSR_SPACE);
9941 
9942 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
9943 	if (!hw->hw_addr) {
9944 		err = -EIO;
9945 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9946 			 (unsigned int)pci_resource_start(pdev, 0),
9947 			 (unsigned int)pci_resource_len(pdev, 0), err);
9948 		goto err_ioremap;
9949 	}
9950 	hw->vendor_id = pdev->vendor;
9951 	hw->device_id = pdev->device;
9952 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9953 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
9954 	hw->subsystem_device_id = pdev->subsystem_device;
9955 	hw->bus.device = PCI_SLOT(pdev->devfn);
9956 	hw->bus.func = PCI_FUNC(pdev->devfn);
9957 	pf->instance = pfs_found;
9958 
9959 	if (debug != -1) {
9960 		pf->msg_enable = pf->hw.debug_mask;
9961 		pf->msg_enable = debug;
9962 	}
9963 
9964 	/* do a special CORER for clearing PXE mode once at init */
9965 	if (hw->revision_id == 0 &&
9966 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9967 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9968 		i40e_flush(hw);
9969 		msleep(200);
9970 		pf->corer_count++;
9971 
9972 		i40e_clear_pxe_mode(hw);
9973 	}
9974 
9975 	/* Reset here to make sure all is clean and to define PF 'n' */
9976 	i40e_clear_hw(hw);
9977 	err = i40e_pf_reset(hw);
9978 	if (err) {
9979 		dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9980 		goto err_pf_reset;
9981 	}
9982 	pf->pfr_count++;
9983 
9984 	hw->aq.num_arq_entries = I40E_AQ_LEN;
9985 	hw->aq.num_asq_entries = I40E_AQ_LEN;
9986 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9987 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9988 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9989 
9990 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9991 		 "%s-%s:misc",
9992 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9993 
9994 	err = i40e_init_shared_code(hw);
9995 	if (err) {
9996 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
9997 			 err);
9998 		goto err_pf_reset;
9999 	}
10000 
10001 	/* set up a default setting for link flow control */
10002 	pf->hw.fc.requested_mode = I40E_FC_NONE;
10003 
10004 	err = i40e_init_adminq(hw);
10005 	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
10006 	if (err) {
10007 		dev_info(&pdev->dev,
10008 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10009 		goto err_pf_reset;
10010 	}
10011 
10012 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10013 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10014 		dev_info(&pdev->dev,
10015 			 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10016 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10017 		 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10018 		dev_info(&pdev->dev,
10019 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10020 
10021 	i40e_verify_eeprom(pf);
10022 
10023 	/* Rev 0 hardware was never productized */
10024 	if (hw->revision_id < 1)
10025 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10026 
10027 	i40e_clear_pxe_mode(hw);
10028 	err = i40e_get_capabilities(pf);
10029 	if (err)
10030 		goto err_adminq_setup;
10031 
10032 	err = i40e_sw_init(pf);
10033 	if (err) {
10034 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10035 		goto err_sw_init;
10036 	}
10037 
10038 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10039 				hw->func_caps.num_rx_qp,
10040 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10041 	if (err) {
10042 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10043 		goto err_init_lan_hmc;
10044 	}
10045 
10046 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10047 	if (err) {
10048 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10049 		err = -ENOENT;
10050 		goto err_configure_lan_hmc;
10051 	}
10052 
10053 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
10054 	 * Ignore error return codes because if it was already disabled via
10055 	 * hardware settings this will fail
10056 	 */
10057 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10058 	    (pf->hw.aq.fw_maj_ver < 4)) {
10059 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10060 		i40e_aq_stop_lldp(hw, true, NULL);
10061 	}
10062 
10063 	i40e_get_mac_addr(hw, hw->mac.addr);
10064 	if (!is_valid_ether_addr(hw->mac.addr)) {
10065 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10066 		err = -EIO;
10067 		goto err_mac_addr;
10068 	}
10069 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10070 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10071 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10072 	if (is_valid_ether_addr(hw->mac.port_addr))
10073 		pf->flags |= I40E_FLAG_PORT_ID_VALID;
10074 #ifdef I40E_FCOE
10075 	err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10076 	if (err)
10077 		dev_info(&pdev->dev,
10078 			 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10079 	if (!is_valid_ether_addr(hw->mac.san_addr)) {
10080 		dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10081 			 hw->mac.san_addr);
10082 		ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10083 	}
10084 	dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10085 #endif /* I40E_FCOE */
10086 
10087 	pci_set_drvdata(pdev, pf);
10088 	pci_save_state(pdev);
10089 #ifdef CONFIG_I40E_DCB
10090 	err = i40e_init_pf_dcb(pf);
10091 	if (err) {
10092 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10093 		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10094 		/* Continue without DCB enabled */
10095 	}
10096 #endif /* CONFIG_I40E_DCB */
10097 
10098 	/* set up periodic task facility */
10099 	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10100 	pf->service_timer_period = HZ;
10101 
10102 	INIT_WORK(&pf->service_task, i40e_service_task);
10103 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10104 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10105 	pf->link_check_timeout = jiffies;
10106 
10107 	/* WoL defaults to disabled */
10108 	pf->wol_en = false;
10109 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10110 
10111 	/* set up the main switch operations */
10112 	i40e_determine_queue_usage(pf);
10113 	err = i40e_init_interrupt_scheme(pf);
10114 	if (err)
10115 		goto err_switch_setup;
10116 
10117 	/* The number of VSIs reported by the FW is the minimum guaranteed
10118 	 * to us; HW supports far more and we share the remaining pool with
10119 	 * the other PFs. We allocate space for more than the guarantee with
10120 	 * the understanding that we might not get them all later.
10121 	 */
10122 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10123 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10124 	else
10125 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10126 
10127 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10128 	len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10129 	pf->vsi = kzalloc(len, GFP_KERNEL);
10130 	if (!pf->vsi) {
10131 		err = -ENOMEM;
10132 		goto err_switch_setup;
10133 	}
10134 
10135 #ifdef CONFIG_PCI_IOV
10136 	/* prep for VF support */
10137 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10138 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10139 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10140 		if (pci_num_vf(pdev))
10141 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10142 	}
10143 #endif
10144 	err = i40e_setup_pf_switch(pf, false);
10145 	if (err) {
10146 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10147 		goto err_vsis;
10148 	}
10149 	/* if FDIR VSI was set up, start it now */
10150 	for (i = 0; i < pf->num_alloc_vsi; i++) {
10151 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10152 			i40e_vsi_open(pf->vsi[i]);
10153 			break;
10154 		}
10155 	}
10156 
10157 	/* driver is only interested in link up/down and module qualification
10158 	 * reports from firmware
10159 	 */
10160 	err = i40e_aq_set_phy_int_mask(&pf->hw,
10161 				       I40E_AQ_EVENT_LINK_UPDOWN |
10162 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10163 	if (err)
10164 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10165 			 i40e_stat_str(&pf->hw, err),
10166 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10167 
10168 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10169 	    (pf->hw.aq.fw_maj_ver < 4)) {
10170 		msleep(75);
10171 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10172 		if (err)
10173 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10174 				 i40e_stat_str(&pf->hw, err),
10175 				 i40e_aq_str(&pf->hw,
10176 					     pf->hw.aq.asq_last_status));
10177 	}
10178 	/* The main driver is (mostly) up and happy. We need to set this state
10179 	 * before setting up the misc vector or we get a race and the vector
10180 	 * ends up disabled forever.
10181 	 */
10182 	clear_bit(__I40E_DOWN, &pf->state);
10183 
10184 	/* In case of MSIX we are going to setup the misc vector right here
10185 	 * to handle admin queue events etc. In case of legacy and MSI
10186 	 * the misc functionality and queue processing is combined in
10187 	 * the same vector and that gets setup at open.
10188 	 */
10189 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10190 		err = i40e_setup_misc_vector(pf);
10191 		if (err) {
10192 			dev_info(&pdev->dev,
10193 				 "setup of misc vector failed: %d\n", err);
10194 			goto err_vsis;
10195 		}
10196 	}
10197 
10198 #ifdef CONFIG_PCI_IOV
10199 	/* prep for VF support */
10200 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10201 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10202 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10203 		u32 val;
10204 
10205 		/* disable link interrupts for VFs */
10206 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10207 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10208 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10209 		i40e_flush(hw);
10210 
10211 		if (pci_num_vf(pdev)) {
10212 			dev_info(&pdev->dev,
10213 				 "Active VFs found, allocating resources.\n");
10214 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10215 			if (err)
10216 				dev_info(&pdev->dev,
10217 					 "Error %d allocating resources for existing VFs\n",
10218 					 err);
10219 		}
10220 	}
10221 #endif /* CONFIG_PCI_IOV */
10222 
10223 	pfs_found++;
10224 
10225 	i40e_dbg_pf_init(pf);
10226 
10227 	/* tell the firmware that we're starting */
10228 	i40e_send_version(pf);
10229 
10230 	/* since everything's happy, start the service_task timer */
10231 	mod_timer(&pf->service_timer,
10232 		  round_jiffies(jiffies + pf->service_timer_period));
10233 
10234 #ifdef I40E_FCOE
10235 	/* create FCoE interface */
10236 	i40e_fcoe_vsi_setup(pf);
10237 
10238 #endif
10239 	/* Get the negotiated link width and speed from PCI config space */
10240 	pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
10241 
10242 	i40e_set_pci_config_data(hw, link_status);
10243 
10244 	dev_info(&pdev->dev, "PCI-Express: %s %s\n",
10245 		(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
10246 		 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
10247 		 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
10248 		 "Unknown"),
10249 		(hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
10250 		 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
10251 		 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
10252 		 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
10253 		 "Unknown"));
10254 
10255 	if (hw->bus.width < i40e_bus_width_pcie_x8 ||
10256 	    hw->bus.speed < i40e_bus_speed_8000) {
10257 		dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
10258 		dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
10259 	}
10260 
10261 	/* get the requested speeds from the fw */
10262 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10263 	if (err)
10264 		dev_info(&pf->pdev->dev,
10265 			 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
10266 			 i40e_stat_str(&pf->hw, err),
10267 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10268 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10269 
10270 	/* print a string summarizing features */
10271 	i40e_print_features(pf);
10272 
10273 	return 0;
10274 
10275 	/* Unwind what we've done if something failed in the setup */
10276 err_vsis:
10277 	set_bit(__I40E_DOWN, &pf->state);
10278 	i40e_clear_interrupt_scheme(pf);
10279 	kfree(pf->vsi);
10280 err_switch_setup:
10281 	i40e_reset_interrupt_capability(pf);
10282 	del_timer_sync(&pf->service_timer);
10283 err_mac_addr:
10284 err_configure_lan_hmc:
10285 	(void)i40e_shutdown_lan_hmc(hw);
10286 err_init_lan_hmc:
10287 	kfree(pf->qp_pile);
10288 err_sw_init:
10289 err_adminq_setup:
10290 	(void)i40e_shutdown_adminq(hw);
10291 err_pf_reset:
10292 	iounmap(hw->hw_addr);
10293 err_ioremap:
10294 	kfree(pf);
10295 err_pf_alloc:
10296 	pci_disable_pcie_error_reporting(pdev);
10297 	pci_release_selected_regions(pdev,
10298 				     pci_select_bars(pdev, IORESOURCE_MEM));
10299 err_pci_reg:
10300 err_dma:
10301 	pci_disable_device(pdev);
10302 	return err;
10303 }
10304 
10305 /**
10306  * i40e_remove - Device removal routine
10307  * @pdev: PCI device information struct
10308  *
10309  * i40e_remove is called by the PCI subsystem to alert the driver
10310  * that is should release a PCI device.  This could be caused by a
10311  * Hot-Plug event, or because the driver is going to be removed from
10312  * memory.
10313  **/
10314 static void i40e_remove(struct pci_dev *pdev)
10315 {
10316 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10317 	i40e_status ret_code;
10318 	int i;
10319 
10320 	i40e_dbg_pf_exit(pf);
10321 
10322 	i40e_ptp_stop(pf);
10323 
10324 	/* no more scheduling of any task */
10325 	set_bit(__I40E_DOWN, &pf->state);
10326 	del_timer_sync(&pf->service_timer);
10327 	cancel_work_sync(&pf->service_task);
10328 	i40e_fdir_teardown(pf);
10329 
10330 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10331 		i40e_free_vfs(pf);
10332 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10333 	}
10334 
10335 	i40e_fdir_teardown(pf);
10336 
10337 	/* If there is a switch structure or any orphans, remove them.
10338 	 * This will leave only the PF's VSI remaining.
10339 	 */
10340 	for (i = 0; i < I40E_MAX_VEB; i++) {
10341 		if (!pf->veb[i])
10342 			continue;
10343 
10344 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10345 		    pf->veb[i]->uplink_seid == 0)
10346 			i40e_switch_branch_release(pf->veb[i]);
10347 	}
10348 
10349 	/* Now we can shutdown the PF's VSI, just before we kill
10350 	 * adminq and hmc.
10351 	 */
10352 	if (pf->vsi[pf->lan_vsi])
10353 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10354 
10355 	/* shutdown and destroy the HMC */
10356 	if (pf->hw.hmc.hmc_obj) {
10357 		ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10358 		if (ret_code)
10359 			dev_warn(&pdev->dev,
10360 				 "Failed to destroy the HMC resources: %d\n",
10361 				 ret_code);
10362 	}
10363 
10364 	/* shutdown the adminq */
10365 	ret_code = i40e_shutdown_adminq(&pf->hw);
10366 	if (ret_code)
10367 		dev_warn(&pdev->dev,
10368 			 "Failed to destroy the Admin Queue resources: %d\n",
10369 			 ret_code);
10370 
10371 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10372 	i40e_clear_interrupt_scheme(pf);
10373 	for (i = 0; i < pf->num_alloc_vsi; i++) {
10374 		if (pf->vsi[i]) {
10375 			i40e_vsi_clear_rings(pf->vsi[i]);
10376 			i40e_vsi_clear(pf->vsi[i]);
10377 			pf->vsi[i] = NULL;
10378 		}
10379 	}
10380 
10381 	for (i = 0; i < I40E_MAX_VEB; i++) {
10382 		kfree(pf->veb[i]);
10383 		pf->veb[i] = NULL;
10384 	}
10385 
10386 	kfree(pf->qp_pile);
10387 	kfree(pf->vsi);
10388 
10389 	iounmap(pf->hw.hw_addr);
10390 	kfree(pf);
10391 	pci_release_selected_regions(pdev,
10392 				     pci_select_bars(pdev, IORESOURCE_MEM));
10393 
10394 	pci_disable_pcie_error_reporting(pdev);
10395 	pci_disable_device(pdev);
10396 }
10397 
10398 /**
10399  * i40e_pci_error_detected - warning that something funky happened in PCI land
10400  * @pdev: PCI device information struct
10401  *
10402  * Called to warn that something happened and the error handling steps
10403  * are in progress.  Allows the driver to quiesce things, be ready for
10404  * remediation.
10405  **/
10406 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10407 						enum pci_channel_state error)
10408 {
10409 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10410 
10411 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10412 
10413 	/* shutdown all operations */
10414 	if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10415 		rtnl_lock();
10416 		i40e_prep_for_reset(pf);
10417 		rtnl_unlock();
10418 	}
10419 
10420 	/* Request a slot reset */
10421 	return PCI_ERS_RESULT_NEED_RESET;
10422 }
10423 
10424 /**
10425  * i40e_pci_error_slot_reset - a PCI slot reset just happened
10426  * @pdev: PCI device information struct
10427  *
10428  * Called to find if the driver can work with the device now that
10429  * the pci slot has been reset.  If a basic connection seems good
10430  * (registers are readable and have sane content) then return a
10431  * happy little PCI_ERS_RESULT_xxx.
10432  **/
10433 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10434 {
10435 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10436 	pci_ers_result_t result;
10437 	int err;
10438 	u32 reg;
10439 
10440 	dev_info(&pdev->dev, "%s\n", __func__);
10441 	if (pci_enable_device_mem(pdev)) {
10442 		dev_info(&pdev->dev,
10443 			 "Cannot re-enable PCI device after reset.\n");
10444 		result = PCI_ERS_RESULT_DISCONNECT;
10445 	} else {
10446 		pci_set_master(pdev);
10447 		pci_restore_state(pdev);
10448 		pci_save_state(pdev);
10449 		pci_wake_from_d3(pdev, false);
10450 
10451 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10452 		if (reg == 0)
10453 			result = PCI_ERS_RESULT_RECOVERED;
10454 		else
10455 			result = PCI_ERS_RESULT_DISCONNECT;
10456 	}
10457 
10458 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
10459 	if (err) {
10460 		dev_info(&pdev->dev,
10461 			 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10462 			 err);
10463 		/* non-fatal, continue */
10464 	}
10465 
10466 	return result;
10467 }
10468 
10469 /**
10470  * i40e_pci_error_resume - restart operations after PCI error recovery
10471  * @pdev: PCI device information struct
10472  *
10473  * Called to allow the driver to bring things back up after PCI error
10474  * and/or reset recovery has finished.
10475  **/
10476 static void i40e_pci_error_resume(struct pci_dev *pdev)
10477 {
10478 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10479 
10480 	dev_info(&pdev->dev, "%s\n", __func__);
10481 	if (test_bit(__I40E_SUSPENDED, &pf->state))
10482 		return;
10483 
10484 	rtnl_lock();
10485 	i40e_handle_reset_warning(pf);
10486 	rtnl_lock();
10487 }
10488 
10489 /**
10490  * i40e_shutdown - PCI callback for shutting down
10491  * @pdev: PCI device information struct
10492  **/
10493 static void i40e_shutdown(struct pci_dev *pdev)
10494 {
10495 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10496 	struct i40e_hw *hw = &pf->hw;
10497 
10498 	set_bit(__I40E_SUSPENDED, &pf->state);
10499 	set_bit(__I40E_DOWN, &pf->state);
10500 	rtnl_lock();
10501 	i40e_prep_for_reset(pf);
10502 	rtnl_unlock();
10503 
10504 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10505 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10506 
10507 	del_timer_sync(&pf->service_timer);
10508 	cancel_work_sync(&pf->service_task);
10509 	i40e_fdir_teardown(pf);
10510 
10511 	rtnl_lock();
10512 	i40e_prep_for_reset(pf);
10513 	rtnl_unlock();
10514 
10515 	wr32(hw, I40E_PFPM_APM,
10516 	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10517 	wr32(hw, I40E_PFPM_WUFC,
10518 	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10519 
10520 	i40e_clear_interrupt_scheme(pf);
10521 
10522 	if (system_state == SYSTEM_POWER_OFF) {
10523 		pci_wake_from_d3(pdev, pf->wol_en);
10524 		pci_set_power_state(pdev, PCI_D3hot);
10525 	}
10526 }
10527 
10528 #ifdef CONFIG_PM
10529 /**
10530  * i40e_suspend - PCI callback for moving to D3
10531  * @pdev: PCI device information struct
10532  **/
10533 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10534 {
10535 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10536 	struct i40e_hw *hw = &pf->hw;
10537 
10538 	set_bit(__I40E_SUSPENDED, &pf->state);
10539 	set_bit(__I40E_DOWN, &pf->state);
10540 
10541 	rtnl_lock();
10542 	i40e_prep_for_reset(pf);
10543 	rtnl_unlock();
10544 
10545 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10546 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10547 
10548 	pci_wake_from_d3(pdev, pf->wol_en);
10549 	pci_set_power_state(pdev, PCI_D3hot);
10550 
10551 	return 0;
10552 }
10553 
10554 /**
10555  * i40e_resume - PCI callback for waking up from D3
10556  * @pdev: PCI device information struct
10557  **/
10558 static int i40e_resume(struct pci_dev *pdev)
10559 {
10560 	struct i40e_pf *pf = pci_get_drvdata(pdev);
10561 	u32 err;
10562 
10563 	pci_set_power_state(pdev, PCI_D0);
10564 	pci_restore_state(pdev);
10565 	/* pci_restore_state() clears dev->state_saves, so
10566 	 * call pci_save_state() again to restore it.
10567 	 */
10568 	pci_save_state(pdev);
10569 
10570 	err = pci_enable_device_mem(pdev);
10571 	if (err) {
10572 		dev_err(&pdev->dev,
10573 			"%s: Cannot enable PCI device from suspend\n",
10574 			__func__);
10575 		return err;
10576 	}
10577 	pci_set_master(pdev);
10578 
10579 	/* no wakeup events while running */
10580 	pci_wake_from_d3(pdev, false);
10581 
10582 	/* handling the reset will rebuild the device state */
10583 	if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10584 		clear_bit(__I40E_DOWN, &pf->state);
10585 		rtnl_lock();
10586 		i40e_reset_and_rebuild(pf, false);
10587 		rtnl_unlock();
10588 	}
10589 
10590 	return 0;
10591 }
10592 
10593 #endif
10594 static const struct pci_error_handlers i40e_err_handler = {
10595 	.error_detected = i40e_pci_error_detected,
10596 	.slot_reset = i40e_pci_error_slot_reset,
10597 	.resume = i40e_pci_error_resume,
10598 };
10599 
10600 static struct pci_driver i40e_driver = {
10601 	.name     = i40e_driver_name,
10602 	.id_table = i40e_pci_tbl,
10603 	.probe    = i40e_probe,
10604 	.remove   = i40e_remove,
10605 #ifdef CONFIG_PM
10606 	.suspend  = i40e_suspend,
10607 	.resume   = i40e_resume,
10608 #endif
10609 	.shutdown = i40e_shutdown,
10610 	.err_handler = &i40e_err_handler,
10611 	.sriov_configure = i40e_pci_sriov_configure,
10612 };
10613 
10614 /**
10615  * i40e_init_module - Driver registration routine
10616  *
10617  * i40e_init_module is the first routine called when the driver is
10618  * loaded. All it does is register with the PCI subsystem.
10619  **/
10620 static int __init i40e_init_module(void)
10621 {
10622 	pr_info("%s: %s - version %s\n", i40e_driver_name,
10623 		i40e_driver_string, i40e_driver_version_str);
10624 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10625 
10626 	i40e_dbg_init();
10627 	return pci_register_driver(&i40e_driver);
10628 }
10629 module_init(i40e_init_module);
10630 
10631 /**
10632  * i40e_exit_module - Driver exit cleanup routine
10633  *
10634  * i40e_exit_module is called just before the driver is removed
10635  * from memory.
10636  **/
10637 static void __exit i40e_exit_module(void)
10638 {
10639 	pci_unregister_driver(&i40e_driver);
10640 	i40e_dbg_exit();
10641 }
10642 module_exit(i40e_exit_module);
10643