1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
6 #include <linux/pci.h>
7 #include <linux/bpf.h>
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 
11 /* Local includes */
12 #include "i40e.h"
13 #include "i40e_diag.h"
14 #include "i40e_xsk.h"
15 #include <net/udp_tunnel.h>
16 #include <net/xdp_sock_drv.h>
17 /* All i40e tracepoints are defined by the include below, which
18  * must be included exactly once across the whole kernel with
19  * CREATE_TRACE_POINTS defined
20  */
21 #define CREATE_TRACE_POINTS
22 #include "i40e_trace.h"
23 
24 const char i40e_driver_name[] = "i40e";
25 static const char i40e_driver_string[] =
26 			"Intel(R) Ethernet Connection XL710 Network Driver";
27 
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
29 
30 /* a bit of forward declarations */
31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33 static int i40e_add_vsi(struct i40e_vsi *vsi);
34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
36 static int i40e_setup_misc_vector(struct i40e_pf *pf);
37 static void i40e_determine_queue_usage(struct i40e_pf *pf);
38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39 static void i40e_prep_for_reset(struct i40e_pf *pf);
40 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
41 				   bool lock_acquired);
42 static int i40e_reset(struct i40e_pf *pf);
43 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50 static int i40e_get_capabilities(struct i40e_pf *pf,
51 				 enum i40e_admin_queue_opc list_type);
52 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
53 
54 /* i40e_pci_tbl - PCI Device ID Table
55  *
56  * Last entry must be all 0s
57  *
58  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59  *   Class, Class Mask, private data (not used) }
60  */
61 static const struct pci_device_id i40e_pci_tbl[] = {
62 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
70 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
71 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
72 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
78 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
79 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
80 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
81 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
82 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
83 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
84 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
85 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
86 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
91 
92 #define I40E_MAX_VF_COUNT 128
93 static int debug = -1;
94 module_param(debug, uint, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
96 
97 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
98 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
99 MODULE_LICENSE("GPL v2");
100 
101 static struct workqueue_struct *i40e_wq;
102 
103 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
104 				  struct net_device *netdev, int delta)
105 {
106 	struct netdev_hw_addr *ha;
107 
108 	if (!f || !netdev)
109 		return;
110 
111 	netdev_for_each_mc_addr(ha, netdev) {
112 		if (ether_addr_equal(ha->addr, f->macaddr)) {
113 			ha->refcount += delta;
114 			if (ha->refcount <= 0)
115 				ha->refcount = 1;
116 			break;
117 		}
118 	}
119 }
120 
121 /**
122  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
123  * @hw:   pointer to the HW structure
124  * @mem:  ptr to mem struct to fill out
125  * @size: size of memory requested
126  * @alignment: what to align the allocation to
127  **/
128 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
129 			    u64 size, u32 alignment)
130 {
131 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
132 
133 	mem->size = ALIGN(size, alignment);
134 	mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
135 				     GFP_KERNEL);
136 	if (!mem->va)
137 		return -ENOMEM;
138 
139 	return 0;
140 }
141 
142 /**
143  * i40e_free_dma_mem_d - OS specific memory free for shared code
144  * @hw:   pointer to the HW structure
145  * @mem:  ptr to mem struct to free
146  **/
147 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
148 {
149 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
150 
151 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
152 	mem->va = NULL;
153 	mem->pa = 0;
154 	mem->size = 0;
155 
156 	return 0;
157 }
158 
159 /**
160  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
161  * @hw:   pointer to the HW structure
162  * @mem:  ptr to mem struct to fill out
163  * @size: size of memory requested
164  **/
165 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
166 			     u32 size)
167 {
168 	mem->size = size;
169 	mem->va = kzalloc(size, GFP_KERNEL);
170 
171 	if (!mem->va)
172 		return -ENOMEM;
173 
174 	return 0;
175 }
176 
177 /**
178  * i40e_free_virt_mem_d - OS specific memory free for shared code
179  * @hw:   pointer to the HW structure
180  * @mem:  ptr to mem struct to free
181  **/
182 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
183 {
184 	/* it's ok to kfree a NULL pointer */
185 	kfree(mem->va);
186 	mem->va = NULL;
187 	mem->size = 0;
188 
189 	return 0;
190 }
191 
192 /**
193  * i40e_get_lump - find a lump of free generic resource
194  * @pf: board private structure
195  * @pile: the pile of resource to search
196  * @needed: the number of items needed
197  * @id: an owner id to stick on the items assigned
198  *
199  * Returns the base item index of the lump, or negative for error
200  **/
201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
202 			 u16 needed, u16 id)
203 {
204 	int ret = -ENOMEM;
205 	int i, j;
206 
207 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 		dev_info(&pf->pdev->dev,
209 			 "param err: pile=%s needed=%d id=0x%04x\n",
210 			 pile ? "<valid>" : "<null>", needed, id);
211 		return -EINVAL;
212 	}
213 
214 	/* Allocate last queue in the pile for FDIR VSI queue
215 	 * so it doesn't fragment the qp_pile
216 	 */
217 	if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
218 		if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
219 			dev_err(&pf->pdev->dev,
220 				"Cannot allocate queue %d for I40E_VSI_FDIR\n",
221 				pile->num_entries - 1);
222 			return -ENOMEM;
223 		}
224 		pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
225 		return pile->num_entries - 1;
226 	}
227 
228 	i = 0;
229 	while (i < pile->num_entries) {
230 		/* skip already allocated entries */
231 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
232 			i++;
233 			continue;
234 		}
235 
236 		/* do we have enough in this lump? */
237 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
238 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
239 				break;
240 		}
241 
242 		if (j == needed) {
243 			/* there was enough, so assign it to the requestor */
244 			for (j = 0; j < needed; j++)
245 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
246 			ret = i;
247 			break;
248 		}
249 
250 		/* not enough, so skip over it and continue looking */
251 		i += j;
252 	}
253 
254 	return ret;
255 }
256 
257 /**
258  * i40e_put_lump - return a lump of generic resource
259  * @pile: the pile of resource to search
260  * @index: the base item index
261  * @id: the owner id of the items assigned
262  *
263  * Returns the count of items in the lump
264  **/
265 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
266 {
267 	int valid_id = (id | I40E_PILE_VALID_BIT);
268 	int count = 0;
269 	u16 i;
270 
271 	if (!pile || index >= pile->num_entries)
272 		return -EINVAL;
273 
274 	for (i = index;
275 	     i < pile->num_entries && pile->list[i] == valid_id;
276 	     i++) {
277 		pile->list[i] = 0;
278 		count++;
279 	}
280 
281 
282 	return count;
283 }
284 
285 /**
286  * i40e_find_vsi_from_id - searches for the vsi with the given id
287  * @pf: the pf structure to search for the vsi
288  * @id: id of the vsi it is searching for
289  **/
290 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
291 {
292 	int i;
293 
294 	for (i = 0; i < pf->num_alloc_vsi; i++)
295 		if (pf->vsi[i] && (pf->vsi[i]->id == id))
296 			return pf->vsi[i];
297 
298 	return NULL;
299 }
300 
301 /**
302  * i40e_service_event_schedule - Schedule the service task to wake up
303  * @pf: board private structure
304  *
305  * If not already scheduled, this puts the task into the work queue
306  **/
307 void i40e_service_event_schedule(struct i40e_pf *pf)
308 {
309 	if ((!test_bit(__I40E_DOWN, pf->state) &&
310 	     !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
311 	      test_bit(__I40E_RECOVERY_MODE, pf->state))
312 		queue_work(i40e_wq, &pf->service_task);
313 }
314 
315 /**
316  * i40e_tx_timeout - Respond to a Tx Hang
317  * @netdev: network interface device structure
318  * @txqueue: queue number timing out
319  *
320  * If any port has noticed a Tx timeout, it is likely that the whole
321  * device is munged, not just the one netdev port, so go for the full
322  * reset.
323  **/
324 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
325 {
326 	struct i40e_netdev_priv *np = netdev_priv(netdev);
327 	struct i40e_vsi *vsi = np->vsi;
328 	struct i40e_pf *pf = vsi->back;
329 	struct i40e_ring *tx_ring = NULL;
330 	unsigned int i;
331 	u32 head, val;
332 
333 	pf->tx_timeout_count++;
334 
335 	/* with txqueue index, find the tx_ring struct */
336 	for (i = 0; i < vsi->num_queue_pairs; i++) {
337 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 			if (txqueue ==
339 			    vsi->tx_rings[i]->queue_index) {
340 				tx_ring = vsi->tx_rings[i];
341 				break;
342 			}
343 		}
344 	}
345 
346 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
347 		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
348 	else if (time_before(jiffies,
349 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
350 		return;   /* don't do any new action before the next timeout */
351 
352 	/* don't kick off another recovery if one is already pending */
353 	if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
354 		return;
355 
356 	if (tx_ring) {
357 		head = i40e_get_head(tx_ring);
358 		/* Read interrupt register */
359 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
360 			val = rd32(&pf->hw,
361 			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
362 						tx_ring->vsi->base_vector - 1));
363 		else
364 			val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
365 
366 		netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
367 			    vsi->seid, txqueue, tx_ring->next_to_clean,
368 			    head, tx_ring->next_to_use,
369 			    readl(tx_ring->tail), val);
370 	}
371 
372 	pf->tx_timeout_last_recovery = jiffies;
373 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
374 		    pf->tx_timeout_recovery_level, txqueue);
375 
376 	switch (pf->tx_timeout_recovery_level) {
377 	case 1:
378 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
379 		break;
380 	case 2:
381 		set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
382 		break;
383 	case 3:
384 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
385 		break;
386 	default:
387 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
388 		break;
389 	}
390 
391 	i40e_service_event_schedule(pf);
392 	pf->tx_timeout_recovery_level++;
393 }
394 
395 /**
396  * i40e_get_vsi_stats_struct - Get System Network Statistics
397  * @vsi: the VSI we care about
398  *
399  * Returns the address of the device statistics structure.
400  * The statistics are actually updated from the service task.
401  **/
402 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
403 {
404 	return &vsi->net_stats;
405 }
406 
407 /**
408  * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
409  * @ring: Tx ring to get statistics from
410  * @stats: statistics entry to be updated
411  **/
412 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
413 					    struct rtnl_link_stats64 *stats)
414 {
415 	u64 bytes, packets;
416 	unsigned int start;
417 
418 	do {
419 		start = u64_stats_fetch_begin_irq(&ring->syncp);
420 		packets = ring->stats.packets;
421 		bytes   = ring->stats.bytes;
422 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
423 
424 	stats->tx_packets += packets;
425 	stats->tx_bytes   += bytes;
426 }
427 
428 /**
429  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
430  * @netdev: network interface device structure
431  * @stats: data structure to store statistics
432  *
433  * Returns the address of the device statistics structure.
434  * The statistics are actually updated from the service task.
435  **/
436 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
437 				  struct rtnl_link_stats64 *stats)
438 {
439 	struct i40e_netdev_priv *np = netdev_priv(netdev);
440 	struct i40e_vsi *vsi = np->vsi;
441 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
442 	struct i40e_ring *ring;
443 	int i;
444 
445 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
446 		return;
447 
448 	if (!vsi->tx_rings)
449 		return;
450 
451 	rcu_read_lock();
452 	for (i = 0; i < vsi->num_queue_pairs; i++) {
453 		u64 bytes, packets;
454 		unsigned int start;
455 
456 		ring = READ_ONCE(vsi->tx_rings[i]);
457 		if (!ring)
458 			continue;
459 		i40e_get_netdev_stats_struct_tx(ring, stats);
460 
461 		if (i40e_enabled_xdp_vsi(vsi)) {
462 			ring = READ_ONCE(vsi->xdp_rings[i]);
463 			if (!ring)
464 				continue;
465 			i40e_get_netdev_stats_struct_tx(ring, stats);
466 		}
467 
468 		ring = READ_ONCE(vsi->rx_rings[i]);
469 		if (!ring)
470 			continue;
471 		do {
472 			start   = u64_stats_fetch_begin_irq(&ring->syncp);
473 			packets = ring->stats.packets;
474 			bytes   = ring->stats.bytes;
475 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
476 
477 		stats->rx_packets += packets;
478 		stats->rx_bytes   += bytes;
479 
480 	}
481 	rcu_read_unlock();
482 
483 	/* following stats updated by i40e_watchdog_subtask() */
484 	stats->multicast	= vsi_stats->multicast;
485 	stats->tx_errors	= vsi_stats->tx_errors;
486 	stats->tx_dropped	= vsi_stats->tx_dropped;
487 	stats->rx_errors	= vsi_stats->rx_errors;
488 	stats->rx_dropped	= vsi_stats->rx_dropped;
489 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
490 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
491 }
492 
493 /**
494  * i40e_vsi_reset_stats - Resets all stats of the given vsi
495  * @vsi: the VSI to have its stats reset
496  **/
497 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
498 {
499 	struct rtnl_link_stats64 *ns;
500 	int i;
501 
502 	if (!vsi)
503 		return;
504 
505 	ns = i40e_get_vsi_stats_struct(vsi);
506 	memset(ns, 0, sizeof(*ns));
507 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
508 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
509 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
510 	if (vsi->rx_rings && vsi->rx_rings[0]) {
511 		for (i = 0; i < vsi->num_queue_pairs; i++) {
512 			memset(&vsi->rx_rings[i]->stats, 0,
513 			       sizeof(vsi->rx_rings[i]->stats));
514 			memset(&vsi->rx_rings[i]->rx_stats, 0,
515 			       sizeof(vsi->rx_rings[i]->rx_stats));
516 			memset(&vsi->tx_rings[i]->stats, 0,
517 			       sizeof(vsi->tx_rings[i]->stats));
518 			memset(&vsi->tx_rings[i]->tx_stats, 0,
519 			       sizeof(vsi->tx_rings[i]->tx_stats));
520 		}
521 	}
522 	vsi->stat_offsets_loaded = false;
523 }
524 
525 /**
526  * i40e_pf_reset_stats - Reset all of the stats for the given PF
527  * @pf: the PF to be reset
528  **/
529 void i40e_pf_reset_stats(struct i40e_pf *pf)
530 {
531 	int i;
532 
533 	memset(&pf->stats, 0, sizeof(pf->stats));
534 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
535 	pf->stat_offsets_loaded = false;
536 
537 	for (i = 0; i < I40E_MAX_VEB; i++) {
538 		if (pf->veb[i]) {
539 			memset(&pf->veb[i]->stats, 0,
540 			       sizeof(pf->veb[i]->stats));
541 			memset(&pf->veb[i]->stats_offsets, 0,
542 			       sizeof(pf->veb[i]->stats_offsets));
543 			memset(&pf->veb[i]->tc_stats, 0,
544 			       sizeof(pf->veb[i]->tc_stats));
545 			memset(&pf->veb[i]->tc_stats_offsets, 0,
546 			       sizeof(pf->veb[i]->tc_stats_offsets));
547 			pf->veb[i]->stat_offsets_loaded = false;
548 		}
549 	}
550 	pf->hw_csum_rx_error = 0;
551 }
552 
553 /**
554  * i40e_stat_update48 - read and update a 48 bit stat from the chip
555  * @hw: ptr to the hardware info
556  * @hireg: the high 32 bit reg to read
557  * @loreg: the low 32 bit reg to read
558  * @offset_loaded: has the initial offset been loaded yet
559  * @offset: ptr to current offset value
560  * @stat: ptr to the stat
561  *
562  * Since the device stats are not reset at PFReset, they likely will not
563  * be zeroed when the driver starts.  We'll save the first values read
564  * and use them as offsets to be subtracted from the raw values in order
565  * to report stats that count from zero.  In the process, we also manage
566  * the potential roll-over.
567  **/
568 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
569 			       bool offset_loaded, u64 *offset, u64 *stat)
570 {
571 	u64 new_data;
572 
573 	if (hw->device_id == I40E_DEV_ID_QEMU) {
574 		new_data = rd32(hw, loreg);
575 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
576 	} else {
577 		new_data = rd64(hw, loreg);
578 	}
579 	if (!offset_loaded)
580 		*offset = new_data;
581 	if (likely(new_data >= *offset))
582 		*stat = new_data - *offset;
583 	else
584 		*stat = (new_data + BIT_ULL(48)) - *offset;
585 	*stat &= 0xFFFFFFFFFFFFULL;
586 }
587 
588 /**
589  * i40e_stat_update32 - read and update a 32 bit stat from the chip
590  * @hw: ptr to the hardware info
591  * @reg: the hw reg to read
592  * @offset_loaded: has the initial offset been loaded yet
593  * @offset: ptr to current offset value
594  * @stat: ptr to the stat
595  **/
596 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
597 			       bool offset_loaded, u64 *offset, u64 *stat)
598 {
599 	u32 new_data;
600 
601 	new_data = rd32(hw, reg);
602 	if (!offset_loaded)
603 		*offset = new_data;
604 	if (likely(new_data >= *offset))
605 		*stat = (u32)(new_data - *offset);
606 	else
607 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
608 }
609 
610 /**
611  * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
612  * @hw: ptr to the hardware info
613  * @reg: the hw reg to read and clear
614  * @stat: ptr to the stat
615  **/
616 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
617 {
618 	u32 new_data = rd32(hw, reg);
619 
620 	wr32(hw, reg, 1); /* must write a nonzero value to clear register */
621 	*stat += new_data;
622 }
623 
624 /**
625  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
626  * @vsi: the VSI to be updated
627  **/
628 void i40e_update_eth_stats(struct i40e_vsi *vsi)
629 {
630 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
631 	struct i40e_pf *pf = vsi->back;
632 	struct i40e_hw *hw = &pf->hw;
633 	struct i40e_eth_stats *oes;
634 	struct i40e_eth_stats *es;     /* device's eth stats */
635 
636 	es = &vsi->eth_stats;
637 	oes = &vsi->eth_stats_offsets;
638 
639 	/* Gather up the stats that the hw collects */
640 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
641 			   vsi->stat_offsets_loaded,
642 			   &oes->tx_errors, &es->tx_errors);
643 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
644 			   vsi->stat_offsets_loaded,
645 			   &oes->rx_discards, &es->rx_discards);
646 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
647 			   vsi->stat_offsets_loaded,
648 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
649 
650 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
651 			   I40E_GLV_GORCL(stat_idx),
652 			   vsi->stat_offsets_loaded,
653 			   &oes->rx_bytes, &es->rx_bytes);
654 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
655 			   I40E_GLV_UPRCL(stat_idx),
656 			   vsi->stat_offsets_loaded,
657 			   &oes->rx_unicast, &es->rx_unicast);
658 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
659 			   I40E_GLV_MPRCL(stat_idx),
660 			   vsi->stat_offsets_loaded,
661 			   &oes->rx_multicast, &es->rx_multicast);
662 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
663 			   I40E_GLV_BPRCL(stat_idx),
664 			   vsi->stat_offsets_loaded,
665 			   &oes->rx_broadcast, &es->rx_broadcast);
666 
667 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
668 			   I40E_GLV_GOTCL(stat_idx),
669 			   vsi->stat_offsets_loaded,
670 			   &oes->tx_bytes, &es->tx_bytes);
671 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
672 			   I40E_GLV_UPTCL(stat_idx),
673 			   vsi->stat_offsets_loaded,
674 			   &oes->tx_unicast, &es->tx_unicast);
675 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
676 			   I40E_GLV_MPTCL(stat_idx),
677 			   vsi->stat_offsets_loaded,
678 			   &oes->tx_multicast, &es->tx_multicast);
679 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
680 			   I40E_GLV_BPTCL(stat_idx),
681 			   vsi->stat_offsets_loaded,
682 			   &oes->tx_broadcast, &es->tx_broadcast);
683 	vsi->stat_offsets_loaded = true;
684 }
685 
686 /**
687  * i40e_update_veb_stats - Update Switch component statistics
688  * @veb: the VEB being updated
689  **/
690 void i40e_update_veb_stats(struct i40e_veb *veb)
691 {
692 	struct i40e_pf *pf = veb->pf;
693 	struct i40e_hw *hw = &pf->hw;
694 	struct i40e_eth_stats *oes;
695 	struct i40e_eth_stats *es;     /* device's eth stats */
696 	struct i40e_veb_tc_stats *veb_oes;
697 	struct i40e_veb_tc_stats *veb_es;
698 	int i, idx = 0;
699 
700 	idx = veb->stats_idx;
701 	es = &veb->stats;
702 	oes = &veb->stats_offsets;
703 	veb_es = &veb->tc_stats;
704 	veb_oes = &veb->tc_stats_offsets;
705 
706 	/* Gather up the stats that the hw collects */
707 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
708 			   veb->stat_offsets_loaded,
709 			   &oes->tx_discards, &es->tx_discards);
710 	if (hw->revision_id > 0)
711 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
712 				   veb->stat_offsets_loaded,
713 				   &oes->rx_unknown_protocol,
714 				   &es->rx_unknown_protocol);
715 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
716 			   veb->stat_offsets_loaded,
717 			   &oes->rx_bytes, &es->rx_bytes);
718 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
719 			   veb->stat_offsets_loaded,
720 			   &oes->rx_unicast, &es->rx_unicast);
721 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
722 			   veb->stat_offsets_loaded,
723 			   &oes->rx_multicast, &es->rx_multicast);
724 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
725 			   veb->stat_offsets_loaded,
726 			   &oes->rx_broadcast, &es->rx_broadcast);
727 
728 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
729 			   veb->stat_offsets_loaded,
730 			   &oes->tx_bytes, &es->tx_bytes);
731 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
732 			   veb->stat_offsets_loaded,
733 			   &oes->tx_unicast, &es->tx_unicast);
734 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
735 			   veb->stat_offsets_loaded,
736 			   &oes->tx_multicast, &es->tx_multicast);
737 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
738 			   veb->stat_offsets_loaded,
739 			   &oes->tx_broadcast, &es->tx_broadcast);
740 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
741 		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
742 				   I40E_GLVEBTC_RPCL(i, idx),
743 				   veb->stat_offsets_loaded,
744 				   &veb_oes->tc_rx_packets[i],
745 				   &veb_es->tc_rx_packets[i]);
746 		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
747 				   I40E_GLVEBTC_RBCL(i, idx),
748 				   veb->stat_offsets_loaded,
749 				   &veb_oes->tc_rx_bytes[i],
750 				   &veb_es->tc_rx_bytes[i]);
751 		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
752 				   I40E_GLVEBTC_TPCL(i, idx),
753 				   veb->stat_offsets_loaded,
754 				   &veb_oes->tc_tx_packets[i],
755 				   &veb_es->tc_tx_packets[i]);
756 		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
757 				   I40E_GLVEBTC_TBCL(i, idx),
758 				   veb->stat_offsets_loaded,
759 				   &veb_oes->tc_tx_bytes[i],
760 				   &veb_es->tc_tx_bytes[i]);
761 	}
762 	veb->stat_offsets_loaded = true;
763 }
764 
765 /**
766  * i40e_update_vsi_stats - Update the vsi statistics counters.
767  * @vsi: the VSI to be updated
768  *
769  * There are a few instances where we store the same stat in a
770  * couple of different structs.  This is partly because we have
771  * the netdev stats that need to be filled out, which is slightly
772  * different from the "eth_stats" defined by the chip and used in
773  * VF communications.  We sort it out here.
774  **/
775 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
776 {
777 	u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
778 	struct i40e_pf *pf = vsi->back;
779 	struct rtnl_link_stats64 *ons;
780 	struct rtnl_link_stats64 *ns;   /* netdev stats */
781 	struct i40e_eth_stats *oes;
782 	struct i40e_eth_stats *es;     /* device's eth stats */
783 	u64 tx_restart, tx_busy;
784 	struct i40e_ring *p;
785 	u64 bytes, packets;
786 	unsigned int start;
787 	u64 tx_linearize;
788 	u64 tx_force_wb;
789 	u64 tx_stopped;
790 	u64 rx_p, rx_b;
791 	u64 tx_p, tx_b;
792 	u16 q;
793 
794 	if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
795 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
796 		return;
797 
798 	ns = i40e_get_vsi_stats_struct(vsi);
799 	ons = &vsi->net_stats_offsets;
800 	es = &vsi->eth_stats;
801 	oes = &vsi->eth_stats_offsets;
802 
803 	/* Gather up the netdev and vsi stats that the driver collects
804 	 * on the fly during packet processing
805 	 */
806 	rx_b = rx_p = 0;
807 	tx_b = tx_p = 0;
808 	tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
809 	tx_stopped = 0;
810 	rx_page = 0;
811 	rx_buf = 0;
812 	rx_reuse = 0;
813 	rx_alloc = 0;
814 	rx_waive = 0;
815 	rx_busy = 0;
816 	rcu_read_lock();
817 	for (q = 0; q < vsi->num_queue_pairs; q++) {
818 		/* locate Tx ring */
819 		p = READ_ONCE(vsi->tx_rings[q]);
820 		if (!p)
821 			continue;
822 
823 		do {
824 			start = u64_stats_fetch_begin_irq(&p->syncp);
825 			packets = p->stats.packets;
826 			bytes = p->stats.bytes;
827 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
828 		tx_b += bytes;
829 		tx_p += packets;
830 		tx_restart += p->tx_stats.restart_queue;
831 		tx_busy += p->tx_stats.tx_busy;
832 		tx_linearize += p->tx_stats.tx_linearize;
833 		tx_force_wb += p->tx_stats.tx_force_wb;
834 		tx_stopped += p->tx_stats.tx_stopped;
835 
836 		/* locate Rx ring */
837 		p = READ_ONCE(vsi->rx_rings[q]);
838 		if (!p)
839 			continue;
840 
841 		do {
842 			start = u64_stats_fetch_begin_irq(&p->syncp);
843 			packets = p->stats.packets;
844 			bytes = p->stats.bytes;
845 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
846 		rx_b += bytes;
847 		rx_p += packets;
848 		rx_buf += p->rx_stats.alloc_buff_failed;
849 		rx_page += p->rx_stats.alloc_page_failed;
850 		rx_reuse += p->rx_stats.page_reuse_count;
851 		rx_alloc += p->rx_stats.page_alloc_count;
852 		rx_waive += p->rx_stats.page_waive_count;
853 		rx_busy += p->rx_stats.page_busy_count;
854 
855 		if (i40e_enabled_xdp_vsi(vsi)) {
856 			/* locate XDP ring */
857 			p = READ_ONCE(vsi->xdp_rings[q]);
858 			if (!p)
859 				continue;
860 
861 			do {
862 				start = u64_stats_fetch_begin_irq(&p->syncp);
863 				packets = p->stats.packets;
864 				bytes = p->stats.bytes;
865 			} while (u64_stats_fetch_retry_irq(&p->syncp, start));
866 			tx_b += bytes;
867 			tx_p += packets;
868 			tx_restart += p->tx_stats.restart_queue;
869 			tx_busy += p->tx_stats.tx_busy;
870 			tx_linearize += p->tx_stats.tx_linearize;
871 			tx_force_wb += p->tx_stats.tx_force_wb;
872 		}
873 	}
874 	rcu_read_unlock();
875 	vsi->tx_restart = tx_restart;
876 	vsi->tx_busy = tx_busy;
877 	vsi->tx_linearize = tx_linearize;
878 	vsi->tx_force_wb = tx_force_wb;
879 	vsi->tx_stopped = tx_stopped;
880 	vsi->rx_page_failed = rx_page;
881 	vsi->rx_buf_failed = rx_buf;
882 	vsi->rx_page_reuse = rx_reuse;
883 	vsi->rx_page_alloc = rx_alloc;
884 	vsi->rx_page_waive = rx_waive;
885 	vsi->rx_page_busy = rx_busy;
886 
887 	ns->rx_packets = rx_p;
888 	ns->rx_bytes = rx_b;
889 	ns->tx_packets = tx_p;
890 	ns->tx_bytes = tx_b;
891 
892 	/* update netdev stats from eth stats */
893 	i40e_update_eth_stats(vsi);
894 	ons->tx_errors = oes->tx_errors;
895 	ns->tx_errors = es->tx_errors;
896 	ons->multicast = oes->rx_multicast;
897 	ns->multicast = es->rx_multicast;
898 	ons->rx_dropped = oes->rx_discards;
899 	ns->rx_dropped = es->rx_discards;
900 	ons->tx_dropped = oes->tx_discards;
901 	ns->tx_dropped = es->tx_discards;
902 
903 	/* pull in a couple PF stats if this is the main vsi */
904 	if (vsi == pf->vsi[pf->lan_vsi]) {
905 		ns->rx_crc_errors = pf->stats.crc_errors;
906 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
907 		ns->rx_length_errors = pf->stats.rx_length_errors;
908 	}
909 }
910 
911 /**
912  * i40e_update_pf_stats - Update the PF statistics counters.
913  * @pf: the PF to be updated
914  **/
915 static void i40e_update_pf_stats(struct i40e_pf *pf)
916 {
917 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
918 	struct i40e_hw_port_stats *nsd = &pf->stats;
919 	struct i40e_hw *hw = &pf->hw;
920 	u32 val;
921 	int i;
922 
923 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
924 			   I40E_GLPRT_GORCL(hw->port),
925 			   pf->stat_offsets_loaded,
926 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
927 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
928 			   I40E_GLPRT_GOTCL(hw->port),
929 			   pf->stat_offsets_loaded,
930 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
931 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
932 			   pf->stat_offsets_loaded,
933 			   &osd->eth.rx_discards,
934 			   &nsd->eth.rx_discards);
935 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
936 			   I40E_GLPRT_UPRCL(hw->port),
937 			   pf->stat_offsets_loaded,
938 			   &osd->eth.rx_unicast,
939 			   &nsd->eth.rx_unicast);
940 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
941 			   I40E_GLPRT_MPRCL(hw->port),
942 			   pf->stat_offsets_loaded,
943 			   &osd->eth.rx_multicast,
944 			   &nsd->eth.rx_multicast);
945 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
946 			   I40E_GLPRT_BPRCL(hw->port),
947 			   pf->stat_offsets_loaded,
948 			   &osd->eth.rx_broadcast,
949 			   &nsd->eth.rx_broadcast);
950 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
951 			   I40E_GLPRT_UPTCL(hw->port),
952 			   pf->stat_offsets_loaded,
953 			   &osd->eth.tx_unicast,
954 			   &nsd->eth.tx_unicast);
955 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
956 			   I40E_GLPRT_MPTCL(hw->port),
957 			   pf->stat_offsets_loaded,
958 			   &osd->eth.tx_multicast,
959 			   &nsd->eth.tx_multicast);
960 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
961 			   I40E_GLPRT_BPTCL(hw->port),
962 			   pf->stat_offsets_loaded,
963 			   &osd->eth.tx_broadcast,
964 			   &nsd->eth.tx_broadcast);
965 
966 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
967 			   pf->stat_offsets_loaded,
968 			   &osd->tx_dropped_link_down,
969 			   &nsd->tx_dropped_link_down);
970 
971 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
972 			   pf->stat_offsets_loaded,
973 			   &osd->crc_errors, &nsd->crc_errors);
974 
975 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
976 			   pf->stat_offsets_loaded,
977 			   &osd->illegal_bytes, &nsd->illegal_bytes);
978 
979 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
980 			   pf->stat_offsets_loaded,
981 			   &osd->mac_local_faults,
982 			   &nsd->mac_local_faults);
983 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
984 			   pf->stat_offsets_loaded,
985 			   &osd->mac_remote_faults,
986 			   &nsd->mac_remote_faults);
987 
988 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
989 			   pf->stat_offsets_loaded,
990 			   &osd->rx_length_errors,
991 			   &nsd->rx_length_errors);
992 
993 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
994 			   pf->stat_offsets_loaded,
995 			   &osd->link_xon_rx, &nsd->link_xon_rx);
996 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
997 			   pf->stat_offsets_loaded,
998 			   &osd->link_xon_tx, &nsd->link_xon_tx);
999 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1000 			   pf->stat_offsets_loaded,
1001 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
1002 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1003 			   pf->stat_offsets_loaded,
1004 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
1005 
1006 	for (i = 0; i < 8; i++) {
1007 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1008 				   pf->stat_offsets_loaded,
1009 				   &osd->priority_xoff_rx[i],
1010 				   &nsd->priority_xoff_rx[i]);
1011 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1012 				   pf->stat_offsets_loaded,
1013 				   &osd->priority_xon_rx[i],
1014 				   &nsd->priority_xon_rx[i]);
1015 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1016 				   pf->stat_offsets_loaded,
1017 				   &osd->priority_xon_tx[i],
1018 				   &nsd->priority_xon_tx[i]);
1019 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1020 				   pf->stat_offsets_loaded,
1021 				   &osd->priority_xoff_tx[i],
1022 				   &nsd->priority_xoff_tx[i]);
1023 		i40e_stat_update32(hw,
1024 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1025 				   pf->stat_offsets_loaded,
1026 				   &osd->priority_xon_2_xoff[i],
1027 				   &nsd->priority_xon_2_xoff[i]);
1028 	}
1029 
1030 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1031 			   I40E_GLPRT_PRC64L(hw->port),
1032 			   pf->stat_offsets_loaded,
1033 			   &osd->rx_size_64, &nsd->rx_size_64);
1034 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1035 			   I40E_GLPRT_PRC127L(hw->port),
1036 			   pf->stat_offsets_loaded,
1037 			   &osd->rx_size_127, &nsd->rx_size_127);
1038 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1039 			   I40E_GLPRT_PRC255L(hw->port),
1040 			   pf->stat_offsets_loaded,
1041 			   &osd->rx_size_255, &nsd->rx_size_255);
1042 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1043 			   I40E_GLPRT_PRC511L(hw->port),
1044 			   pf->stat_offsets_loaded,
1045 			   &osd->rx_size_511, &nsd->rx_size_511);
1046 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1047 			   I40E_GLPRT_PRC1023L(hw->port),
1048 			   pf->stat_offsets_loaded,
1049 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1050 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1051 			   I40E_GLPRT_PRC1522L(hw->port),
1052 			   pf->stat_offsets_loaded,
1053 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1054 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1055 			   I40E_GLPRT_PRC9522L(hw->port),
1056 			   pf->stat_offsets_loaded,
1057 			   &osd->rx_size_big, &nsd->rx_size_big);
1058 
1059 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1060 			   I40E_GLPRT_PTC64L(hw->port),
1061 			   pf->stat_offsets_loaded,
1062 			   &osd->tx_size_64, &nsd->tx_size_64);
1063 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1064 			   I40E_GLPRT_PTC127L(hw->port),
1065 			   pf->stat_offsets_loaded,
1066 			   &osd->tx_size_127, &nsd->tx_size_127);
1067 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1068 			   I40E_GLPRT_PTC255L(hw->port),
1069 			   pf->stat_offsets_loaded,
1070 			   &osd->tx_size_255, &nsd->tx_size_255);
1071 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1072 			   I40E_GLPRT_PTC511L(hw->port),
1073 			   pf->stat_offsets_loaded,
1074 			   &osd->tx_size_511, &nsd->tx_size_511);
1075 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1076 			   I40E_GLPRT_PTC1023L(hw->port),
1077 			   pf->stat_offsets_loaded,
1078 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1079 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1080 			   I40E_GLPRT_PTC1522L(hw->port),
1081 			   pf->stat_offsets_loaded,
1082 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1083 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1084 			   I40E_GLPRT_PTC9522L(hw->port),
1085 			   pf->stat_offsets_loaded,
1086 			   &osd->tx_size_big, &nsd->tx_size_big);
1087 
1088 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1089 			   pf->stat_offsets_loaded,
1090 			   &osd->rx_undersize, &nsd->rx_undersize);
1091 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1092 			   pf->stat_offsets_loaded,
1093 			   &osd->rx_fragments, &nsd->rx_fragments);
1094 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1095 			   pf->stat_offsets_loaded,
1096 			   &osd->rx_oversize, &nsd->rx_oversize);
1097 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1098 			   pf->stat_offsets_loaded,
1099 			   &osd->rx_jabber, &nsd->rx_jabber);
1100 
1101 	/* FDIR stats */
1102 	i40e_stat_update_and_clear32(hw,
1103 			I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1104 			&nsd->fd_atr_match);
1105 	i40e_stat_update_and_clear32(hw,
1106 			I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1107 			&nsd->fd_sb_match);
1108 	i40e_stat_update_and_clear32(hw,
1109 			I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1110 			&nsd->fd_atr_tunnel_match);
1111 
1112 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1113 	nsd->tx_lpi_status =
1114 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1115 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1116 	nsd->rx_lpi_status =
1117 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1118 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1119 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1120 			   pf->stat_offsets_loaded,
1121 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1122 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1123 			   pf->stat_offsets_loaded,
1124 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1125 
1126 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1127 	    !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1128 		nsd->fd_sb_status = true;
1129 	else
1130 		nsd->fd_sb_status = false;
1131 
1132 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1133 	    !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1134 		nsd->fd_atr_status = true;
1135 	else
1136 		nsd->fd_atr_status = false;
1137 
1138 	pf->stat_offsets_loaded = true;
1139 }
1140 
1141 /**
1142  * i40e_update_stats - Update the various statistics counters.
1143  * @vsi: the VSI to be updated
1144  *
1145  * Update the various stats for this VSI and its related entities.
1146  **/
1147 void i40e_update_stats(struct i40e_vsi *vsi)
1148 {
1149 	struct i40e_pf *pf = vsi->back;
1150 
1151 	if (vsi == pf->vsi[pf->lan_vsi])
1152 		i40e_update_pf_stats(pf);
1153 
1154 	i40e_update_vsi_stats(vsi);
1155 }
1156 
1157 /**
1158  * i40e_count_filters - counts VSI mac filters
1159  * @vsi: the VSI to be searched
1160  *
1161  * Returns count of mac filters
1162  **/
1163 int i40e_count_filters(struct i40e_vsi *vsi)
1164 {
1165 	struct i40e_mac_filter *f;
1166 	struct hlist_node *h;
1167 	int bkt;
1168 	int cnt = 0;
1169 
1170 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1171 		++cnt;
1172 
1173 	return cnt;
1174 }
1175 
1176 /**
1177  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1178  * @vsi: the VSI to be searched
1179  * @macaddr: the MAC address
1180  * @vlan: the vlan
1181  *
1182  * Returns ptr to the filter object or NULL
1183  **/
1184 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1185 						const u8 *macaddr, s16 vlan)
1186 {
1187 	struct i40e_mac_filter *f;
1188 	u64 key;
1189 
1190 	if (!vsi || !macaddr)
1191 		return NULL;
1192 
1193 	key = i40e_addr_to_hkey(macaddr);
1194 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1195 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1196 		    (vlan == f->vlan))
1197 			return f;
1198 	}
1199 	return NULL;
1200 }
1201 
1202 /**
1203  * i40e_find_mac - Find a mac addr in the macvlan filters list
1204  * @vsi: the VSI to be searched
1205  * @macaddr: the MAC address we are searching for
1206  *
1207  * Returns the first filter with the provided MAC address or NULL if
1208  * MAC address was not found
1209  **/
1210 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1211 {
1212 	struct i40e_mac_filter *f;
1213 	u64 key;
1214 
1215 	if (!vsi || !macaddr)
1216 		return NULL;
1217 
1218 	key = i40e_addr_to_hkey(macaddr);
1219 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1220 		if ((ether_addr_equal(macaddr, f->macaddr)))
1221 			return f;
1222 	}
1223 	return NULL;
1224 }
1225 
1226 /**
1227  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1228  * @vsi: the VSI to be searched
1229  *
1230  * Returns true if VSI is in vlan mode or false otherwise
1231  **/
1232 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1233 {
1234 	/* If we have a PVID, always operate in VLAN mode */
1235 	if (vsi->info.pvid)
1236 		return true;
1237 
1238 	/* We need to operate in VLAN mode whenever we have any filters with
1239 	 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1240 	 * time, incurring search cost repeatedly. However, we can notice two
1241 	 * things:
1242 	 *
1243 	 * 1) the only place where we can gain a VLAN filter is in
1244 	 *    i40e_add_filter.
1245 	 *
1246 	 * 2) the only place where filters are actually removed is in
1247 	 *    i40e_sync_filters_subtask.
1248 	 *
1249 	 * Thus, we can simply use a boolean value, has_vlan_filters which we
1250 	 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1251 	 * we have to perform the full search after deleting filters in
1252 	 * i40e_sync_filters_subtask, but we already have to search
1253 	 * filters here and can perform the check at the same time. This
1254 	 * results in avoiding embedding a loop for VLAN mode inside another
1255 	 * loop over all the filters, and should maintain correctness as noted
1256 	 * above.
1257 	 */
1258 	return vsi->has_vlan_filter;
1259 }
1260 
1261 /**
1262  * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1263  * @vsi: the VSI to configure
1264  * @tmp_add_list: list of filters ready to be added
1265  * @tmp_del_list: list of filters ready to be deleted
1266  * @vlan_filters: the number of active VLAN filters
1267  *
1268  * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1269  * behave as expected. If we have any active VLAN filters remaining or about
1270  * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1271  * so that they only match against untagged traffic. If we no longer have any
1272  * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1273  * so that they match against both tagged and untagged traffic. In this way,
1274  * we ensure that we correctly receive the desired traffic. This ensures that
1275  * when we have an active VLAN we will receive only untagged traffic and
1276  * traffic matching active VLANs. If we have no active VLANs then we will
1277  * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1278  *
1279  * Finally, in a similar fashion, this function also corrects filters when
1280  * there is an active PVID assigned to this VSI.
1281  *
1282  * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1283  *
1284  * This function is only expected to be called from within
1285  * i40e_sync_vsi_filters.
1286  *
1287  * NOTE: This function expects to be called while under the
1288  * mac_filter_hash_lock
1289  */
1290 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1291 					 struct hlist_head *tmp_add_list,
1292 					 struct hlist_head *tmp_del_list,
1293 					 int vlan_filters)
1294 {
1295 	s16 pvid = le16_to_cpu(vsi->info.pvid);
1296 	struct i40e_mac_filter *f, *add_head;
1297 	struct i40e_new_mac_filter *new;
1298 	struct hlist_node *h;
1299 	int bkt, new_vlan;
1300 
1301 	/* To determine if a particular filter needs to be replaced we
1302 	 * have the three following conditions:
1303 	 *
1304 	 * a) if we have a PVID assigned, then all filters which are
1305 	 *    not marked as VLAN=PVID must be replaced with filters that
1306 	 *    are.
1307 	 * b) otherwise, if we have any active VLANS, all filters
1308 	 *    which are marked as VLAN=-1 must be replaced with
1309 	 *    filters marked as VLAN=0
1310 	 * c) finally, if we do not have any active VLANS, all filters
1311 	 *    which are marked as VLAN=0 must be replaced with filters
1312 	 *    marked as VLAN=-1
1313 	 */
1314 
1315 	/* Update the filters about to be added in place */
1316 	hlist_for_each_entry(new, tmp_add_list, hlist) {
1317 		if (pvid && new->f->vlan != pvid)
1318 			new->f->vlan = pvid;
1319 		else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1320 			new->f->vlan = 0;
1321 		else if (!vlan_filters && new->f->vlan == 0)
1322 			new->f->vlan = I40E_VLAN_ANY;
1323 	}
1324 
1325 	/* Update the remaining active filters */
1326 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1327 		/* Combine the checks for whether a filter needs to be changed
1328 		 * and then determine the new VLAN inside the if block, in
1329 		 * order to avoid duplicating code for adding the new filter
1330 		 * then deleting the old filter.
1331 		 */
1332 		if ((pvid && f->vlan != pvid) ||
1333 		    (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1334 		    (!vlan_filters && f->vlan == 0)) {
1335 			/* Determine the new vlan we will be adding */
1336 			if (pvid)
1337 				new_vlan = pvid;
1338 			else if (vlan_filters)
1339 				new_vlan = 0;
1340 			else
1341 				new_vlan = I40E_VLAN_ANY;
1342 
1343 			/* Create the new filter */
1344 			add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1345 			if (!add_head)
1346 				return -ENOMEM;
1347 
1348 			/* Create a temporary i40e_new_mac_filter */
1349 			new = kzalloc(sizeof(*new), GFP_ATOMIC);
1350 			if (!new)
1351 				return -ENOMEM;
1352 
1353 			new->f = add_head;
1354 			new->state = add_head->state;
1355 
1356 			/* Add the new filter to the tmp list */
1357 			hlist_add_head(&new->hlist, tmp_add_list);
1358 
1359 			/* Put the original filter into the delete list */
1360 			f->state = I40E_FILTER_REMOVE;
1361 			hash_del(&f->hlist);
1362 			hlist_add_head(&f->hlist, tmp_del_list);
1363 		}
1364 	}
1365 
1366 	vsi->has_vlan_filter = !!vlan_filters;
1367 
1368 	return 0;
1369 }
1370 
1371 /**
1372  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1373  * @vsi: the PF Main VSI - inappropriate for any other VSI
1374  * @macaddr: the MAC address
1375  *
1376  * Remove whatever filter the firmware set up so the driver can manage
1377  * its own filtering intelligently.
1378  **/
1379 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1380 {
1381 	struct i40e_aqc_remove_macvlan_element_data element;
1382 	struct i40e_pf *pf = vsi->back;
1383 
1384 	/* Only appropriate for the PF main VSI */
1385 	if (vsi->type != I40E_VSI_MAIN)
1386 		return;
1387 
1388 	memset(&element, 0, sizeof(element));
1389 	ether_addr_copy(element.mac_addr, macaddr);
1390 	element.vlan_tag = 0;
1391 	/* Ignore error returns, some firmware does it this way... */
1392 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1393 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1394 
1395 	memset(&element, 0, sizeof(element));
1396 	ether_addr_copy(element.mac_addr, macaddr);
1397 	element.vlan_tag = 0;
1398 	/* ...and some firmware does it this way. */
1399 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1400 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1401 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1402 }
1403 
1404 /**
1405  * i40e_add_filter - Add a mac/vlan filter to the VSI
1406  * @vsi: the VSI to be searched
1407  * @macaddr: the MAC address
1408  * @vlan: the vlan
1409  *
1410  * Returns ptr to the filter object or NULL when no memory available.
1411  *
1412  * NOTE: This function is expected to be called with mac_filter_hash_lock
1413  * being held.
1414  **/
1415 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1416 					const u8 *macaddr, s16 vlan)
1417 {
1418 	struct i40e_mac_filter *f;
1419 	u64 key;
1420 
1421 	if (!vsi || !macaddr)
1422 		return NULL;
1423 
1424 	f = i40e_find_filter(vsi, macaddr, vlan);
1425 	if (!f) {
1426 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1427 		if (!f)
1428 			return NULL;
1429 
1430 		/* Update the boolean indicating if we need to function in
1431 		 * VLAN mode.
1432 		 */
1433 		if (vlan >= 0)
1434 			vsi->has_vlan_filter = true;
1435 
1436 		ether_addr_copy(f->macaddr, macaddr);
1437 		f->vlan = vlan;
1438 		f->state = I40E_FILTER_NEW;
1439 		INIT_HLIST_NODE(&f->hlist);
1440 
1441 		key = i40e_addr_to_hkey(macaddr);
1442 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
1443 
1444 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1445 		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1446 	}
1447 
1448 	/* If we're asked to add a filter that has been marked for removal, it
1449 	 * is safe to simply restore it to active state. __i40e_del_filter
1450 	 * will have simply deleted any filters which were previously marked
1451 	 * NEW or FAILED, so if it is currently marked REMOVE it must have
1452 	 * previously been ACTIVE. Since we haven't yet run the sync filters
1453 	 * task, just restore this filter to the ACTIVE state so that the
1454 	 * sync task leaves it in place
1455 	 */
1456 	if (f->state == I40E_FILTER_REMOVE)
1457 		f->state = I40E_FILTER_ACTIVE;
1458 
1459 	return f;
1460 }
1461 
1462 /**
1463  * __i40e_del_filter - Remove a specific filter from the VSI
1464  * @vsi: VSI to remove from
1465  * @f: the filter to remove from the list
1466  *
1467  * This function should be called instead of i40e_del_filter only if you know
1468  * the exact filter you will remove already, such as via i40e_find_filter or
1469  * i40e_find_mac.
1470  *
1471  * NOTE: This function is expected to be called with mac_filter_hash_lock
1472  * being held.
1473  * ANOTHER NOTE: This function MUST be called from within the context of
1474  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1475  * instead of list_for_each_entry().
1476  **/
1477 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1478 {
1479 	if (!f)
1480 		return;
1481 
1482 	/* If the filter was never added to firmware then we can just delete it
1483 	 * directly and we don't want to set the status to remove or else an
1484 	 * admin queue command will unnecessarily fire.
1485 	 */
1486 	if ((f->state == I40E_FILTER_FAILED) ||
1487 	    (f->state == I40E_FILTER_NEW)) {
1488 		hash_del(&f->hlist);
1489 		kfree(f);
1490 	} else {
1491 		f->state = I40E_FILTER_REMOVE;
1492 	}
1493 
1494 	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1495 	set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1496 }
1497 
1498 /**
1499  * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1500  * @vsi: the VSI to be searched
1501  * @macaddr: the MAC address
1502  * @vlan: the VLAN
1503  *
1504  * NOTE: This function is expected to be called with mac_filter_hash_lock
1505  * being held.
1506  * ANOTHER NOTE: This function MUST be called from within the context of
1507  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1508  * instead of list_for_each_entry().
1509  **/
1510 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1511 {
1512 	struct i40e_mac_filter *f;
1513 
1514 	if (!vsi || !macaddr)
1515 		return;
1516 
1517 	f = i40e_find_filter(vsi, macaddr, vlan);
1518 	__i40e_del_filter(vsi, f);
1519 }
1520 
1521 /**
1522  * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1523  * @vsi: the VSI to be searched
1524  * @macaddr: the mac address to be filtered
1525  *
1526  * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1527  * go through all the macvlan filters and add a macvlan filter for each
1528  * unique vlan that already exists. If a PVID has been assigned, instead only
1529  * add the macaddr to that VLAN.
1530  *
1531  * Returns last filter added on success, else NULL
1532  **/
1533 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1534 					    const u8 *macaddr)
1535 {
1536 	struct i40e_mac_filter *f, *add = NULL;
1537 	struct hlist_node *h;
1538 	int bkt;
1539 
1540 	if (vsi->info.pvid)
1541 		return i40e_add_filter(vsi, macaddr,
1542 				       le16_to_cpu(vsi->info.pvid));
1543 
1544 	if (!i40e_is_vsi_in_vlan(vsi))
1545 		return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1546 
1547 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1548 		if (f->state == I40E_FILTER_REMOVE)
1549 			continue;
1550 		add = i40e_add_filter(vsi, macaddr, f->vlan);
1551 		if (!add)
1552 			return NULL;
1553 	}
1554 
1555 	return add;
1556 }
1557 
1558 /**
1559  * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1560  * @vsi: the VSI to be searched
1561  * @macaddr: the mac address to be removed
1562  *
1563  * Removes a given MAC address from a VSI regardless of what VLAN it has been
1564  * associated with.
1565  *
1566  * Returns 0 for success, or error
1567  **/
1568 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1569 {
1570 	struct i40e_mac_filter *f;
1571 	struct hlist_node *h;
1572 	bool found = false;
1573 	int bkt;
1574 
1575 	lockdep_assert_held(&vsi->mac_filter_hash_lock);
1576 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1577 		if (ether_addr_equal(macaddr, f->macaddr)) {
1578 			__i40e_del_filter(vsi, f);
1579 			found = true;
1580 		}
1581 	}
1582 
1583 	if (found)
1584 		return 0;
1585 	else
1586 		return -ENOENT;
1587 }
1588 
1589 /**
1590  * i40e_set_mac - NDO callback to set mac address
1591  * @netdev: network interface device structure
1592  * @p: pointer to an address structure
1593  *
1594  * Returns 0 on success, negative on failure
1595  **/
1596 static int i40e_set_mac(struct net_device *netdev, void *p)
1597 {
1598 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1599 	struct i40e_vsi *vsi = np->vsi;
1600 	struct i40e_pf *pf = vsi->back;
1601 	struct i40e_hw *hw = &pf->hw;
1602 	struct sockaddr *addr = p;
1603 
1604 	if (!is_valid_ether_addr(addr->sa_data))
1605 		return -EADDRNOTAVAIL;
1606 
1607 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1608 		netdev_info(netdev, "already using mac address %pM\n",
1609 			    addr->sa_data);
1610 		return 0;
1611 	}
1612 
1613 	if (test_bit(__I40E_DOWN, pf->state) ||
1614 	    test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1615 		return -EADDRNOTAVAIL;
1616 
1617 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1618 		netdev_info(netdev, "returning to hw mac address %pM\n",
1619 			    hw->mac.addr);
1620 	else
1621 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1622 
1623 	/* Copy the address first, so that we avoid a possible race with
1624 	 * .set_rx_mode().
1625 	 * - Remove old address from MAC filter
1626 	 * - Copy new address
1627 	 * - Add new address to MAC filter
1628 	 */
1629 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1630 	i40e_del_mac_filter(vsi, netdev->dev_addr);
1631 	eth_hw_addr_set(netdev, addr->sa_data);
1632 	i40e_add_mac_filter(vsi, netdev->dev_addr);
1633 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1634 
1635 	if (vsi->type == I40E_VSI_MAIN) {
1636 		i40e_status ret;
1637 
1638 		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1639 						addr->sa_data, NULL);
1640 		if (ret)
1641 			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1642 				    i40e_stat_str(hw, ret),
1643 				    i40e_aq_str(hw, hw->aq.asq_last_status));
1644 	}
1645 
1646 	/* schedule our worker thread which will take care of
1647 	 * applying the new filter changes
1648 	 */
1649 	i40e_service_event_schedule(pf);
1650 	return 0;
1651 }
1652 
1653 /**
1654  * i40e_config_rss_aq - Prepare for RSS using AQ commands
1655  * @vsi: vsi structure
1656  * @seed: RSS hash seed
1657  * @lut: pointer to lookup table of lut_size
1658  * @lut_size: size of the lookup table
1659  **/
1660 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1661 			      u8 *lut, u16 lut_size)
1662 {
1663 	struct i40e_pf *pf = vsi->back;
1664 	struct i40e_hw *hw = &pf->hw;
1665 	int ret = 0;
1666 
1667 	if (seed) {
1668 		struct i40e_aqc_get_set_rss_key_data *seed_dw =
1669 			(struct i40e_aqc_get_set_rss_key_data *)seed;
1670 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1671 		if (ret) {
1672 			dev_info(&pf->pdev->dev,
1673 				 "Cannot set RSS key, err %s aq_err %s\n",
1674 				 i40e_stat_str(hw, ret),
1675 				 i40e_aq_str(hw, hw->aq.asq_last_status));
1676 			return ret;
1677 		}
1678 	}
1679 	if (lut) {
1680 		bool pf_lut = vsi->type == I40E_VSI_MAIN;
1681 
1682 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1683 		if (ret) {
1684 			dev_info(&pf->pdev->dev,
1685 				 "Cannot set RSS lut, err %s aq_err %s\n",
1686 				 i40e_stat_str(hw, ret),
1687 				 i40e_aq_str(hw, hw->aq.asq_last_status));
1688 			return ret;
1689 		}
1690 	}
1691 	return ret;
1692 }
1693 
1694 /**
1695  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1696  * @vsi: VSI structure
1697  **/
1698 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1699 {
1700 	struct i40e_pf *pf = vsi->back;
1701 	u8 seed[I40E_HKEY_ARRAY_SIZE];
1702 	u8 *lut;
1703 	int ret;
1704 
1705 	if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1706 		return 0;
1707 	if (!vsi->rss_size)
1708 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
1709 				      vsi->num_queue_pairs);
1710 	if (!vsi->rss_size)
1711 		return -EINVAL;
1712 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1713 	if (!lut)
1714 		return -ENOMEM;
1715 
1716 	/* Use the user configured hash keys and lookup table if there is one,
1717 	 * otherwise use default
1718 	 */
1719 	if (vsi->rss_lut_user)
1720 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1721 	else
1722 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1723 	if (vsi->rss_hkey_user)
1724 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1725 	else
1726 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1727 	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1728 	kfree(lut);
1729 	return ret;
1730 }
1731 
1732 /**
1733  * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1734  * @vsi: the VSI being configured,
1735  * @ctxt: VSI context structure
1736  * @enabled_tc: number of traffic classes to enable
1737  *
1738  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1739  **/
1740 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1741 					   struct i40e_vsi_context *ctxt,
1742 					   u8 enabled_tc)
1743 {
1744 	u16 qcount = 0, max_qcount, qmap, sections = 0;
1745 	int i, override_q, pow, num_qps, ret;
1746 	u8 netdev_tc = 0, offset = 0;
1747 
1748 	if (vsi->type != I40E_VSI_MAIN)
1749 		return -EINVAL;
1750 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1751 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1752 	vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1753 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1754 	num_qps = vsi->mqprio_qopt.qopt.count[0];
1755 
1756 	/* find the next higher power-of-2 of num queue pairs */
1757 	pow = ilog2(num_qps);
1758 	if (!is_power_of_2(num_qps))
1759 		pow++;
1760 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1761 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1762 
1763 	/* Setup queue offset/count for all TCs for given VSI */
1764 	max_qcount = vsi->mqprio_qopt.qopt.count[0];
1765 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1766 		/* See if the given TC is enabled for the given VSI */
1767 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1768 			offset = vsi->mqprio_qopt.qopt.offset[i];
1769 			qcount = vsi->mqprio_qopt.qopt.count[i];
1770 			if (qcount > max_qcount)
1771 				max_qcount = qcount;
1772 			vsi->tc_config.tc_info[i].qoffset = offset;
1773 			vsi->tc_config.tc_info[i].qcount = qcount;
1774 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1775 		} else {
1776 			/* TC is not enabled so set the offset to
1777 			 * default queue and allocate one queue
1778 			 * for the given TC.
1779 			 */
1780 			vsi->tc_config.tc_info[i].qoffset = 0;
1781 			vsi->tc_config.tc_info[i].qcount = 1;
1782 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1783 		}
1784 	}
1785 
1786 	/* Set actual Tx/Rx queue pairs */
1787 	vsi->num_queue_pairs = offset + qcount;
1788 
1789 	/* Setup queue TC[0].qmap for given VSI context */
1790 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1791 	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1792 	ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1793 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1794 
1795 	/* Reconfigure RSS for main VSI with max queue count */
1796 	vsi->rss_size = max_qcount;
1797 	ret = i40e_vsi_config_rss(vsi);
1798 	if (ret) {
1799 		dev_info(&vsi->back->pdev->dev,
1800 			 "Failed to reconfig rss for num_queues (%u)\n",
1801 			 max_qcount);
1802 		return ret;
1803 	}
1804 	vsi->reconfig_rss = true;
1805 	dev_dbg(&vsi->back->pdev->dev,
1806 		"Reconfigured rss with num_queues (%u)\n", max_qcount);
1807 
1808 	/* Find queue count available for channel VSIs and starting offset
1809 	 * for channel VSIs
1810 	 */
1811 	override_q = vsi->mqprio_qopt.qopt.count[0];
1812 	if (override_q && override_q < vsi->num_queue_pairs) {
1813 		vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1814 		vsi->next_base_queue = override_q;
1815 	}
1816 	return 0;
1817 }
1818 
1819 /**
1820  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1821  * @vsi: the VSI being setup
1822  * @ctxt: VSI context structure
1823  * @enabled_tc: Enabled TCs bitmap
1824  * @is_add: True if called before Add VSI
1825  *
1826  * Setup VSI queue mapping for enabled traffic classes.
1827  **/
1828 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1829 				     struct i40e_vsi_context *ctxt,
1830 				     u8 enabled_tc,
1831 				     bool is_add)
1832 {
1833 	struct i40e_pf *pf = vsi->back;
1834 	u16 num_tc_qps = 0;
1835 	u16 sections = 0;
1836 	u8 netdev_tc = 0;
1837 	u16 numtc = 1;
1838 	u16 qcount;
1839 	u8 offset;
1840 	u16 qmap;
1841 	int i;
1842 
1843 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1844 	offset = 0;
1845 	/* zero out queue mapping, it will get updated on the end of the function */
1846 	memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
1847 
1848 	if (vsi->type == I40E_VSI_MAIN) {
1849 		/* This code helps add more queue to the VSI if we have
1850 		 * more cores than RSS can support, the higher cores will
1851 		 * be served by ATR or other filters. Furthermore, the
1852 		 * non-zero req_queue_pairs says that user requested a new
1853 		 * queue count via ethtool's set_channels, so use this
1854 		 * value for queues distribution across traffic classes
1855 		 */
1856 		if (vsi->req_queue_pairs > 0)
1857 			vsi->num_queue_pairs = vsi->req_queue_pairs;
1858 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1859 			vsi->num_queue_pairs = pf->num_lan_msix;
1860 	}
1861 
1862 	/* Number of queues per enabled TC */
1863 	if (vsi->type == I40E_VSI_MAIN ||
1864 	    (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
1865 		num_tc_qps = vsi->num_queue_pairs;
1866 	else
1867 		num_tc_qps = vsi->alloc_queue_pairs;
1868 
1869 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1870 		/* Find numtc from enabled TC bitmap */
1871 		for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1872 			if (enabled_tc & BIT(i)) /* TC is enabled */
1873 				numtc++;
1874 		}
1875 		if (!numtc) {
1876 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1877 			numtc = 1;
1878 		}
1879 		num_tc_qps = num_tc_qps / numtc;
1880 		num_tc_qps = min_t(int, num_tc_qps,
1881 				   i40e_pf_get_max_q_per_tc(pf));
1882 	}
1883 
1884 	vsi->tc_config.numtc = numtc;
1885 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1886 
1887 	/* Do not allow use more TC queue pairs than MSI-X vectors exist */
1888 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1889 		num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1890 
1891 	/* Setup queue offset/count for all TCs for given VSI */
1892 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1893 		/* See if the given TC is enabled for the given VSI */
1894 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1895 			/* TC is enabled */
1896 			int pow, num_qps;
1897 
1898 			switch (vsi->type) {
1899 			case I40E_VSI_MAIN:
1900 				if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1901 				    I40E_FLAG_FD_ATR_ENABLED)) ||
1902 				    vsi->tc_config.enabled_tc != 1) {
1903 					qcount = min_t(int, pf->alloc_rss_size,
1904 						       num_tc_qps);
1905 					break;
1906 				}
1907 				fallthrough;
1908 			case I40E_VSI_FDIR:
1909 			case I40E_VSI_SRIOV:
1910 			case I40E_VSI_VMDQ2:
1911 			default:
1912 				qcount = num_tc_qps;
1913 				WARN_ON(i != 0);
1914 				break;
1915 			}
1916 			vsi->tc_config.tc_info[i].qoffset = offset;
1917 			vsi->tc_config.tc_info[i].qcount = qcount;
1918 
1919 			/* find the next higher power-of-2 of num queue pairs */
1920 			num_qps = qcount;
1921 			pow = 0;
1922 			while (num_qps && (BIT_ULL(pow) < qcount)) {
1923 				pow++;
1924 				num_qps >>= 1;
1925 			}
1926 
1927 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1928 			qmap =
1929 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1930 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1931 
1932 			offset += qcount;
1933 		} else {
1934 			/* TC is not enabled so set the offset to
1935 			 * default queue and allocate one queue
1936 			 * for the given TC.
1937 			 */
1938 			vsi->tc_config.tc_info[i].qoffset = 0;
1939 			vsi->tc_config.tc_info[i].qcount = 1;
1940 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1941 
1942 			qmap = 0;
1943 		}
1944 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1945 	}
1946 	/* Do not change previously set num_queue_pairs for PFs and VFs*/
1947 	if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
1948 	    (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
1949 	    (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
1950 		vsi->num_queue_pairs = offset;
1951 
1952 	/* Scheduler section valid can only be set for ADD VSI */
1953 	if (is_add) {
1954 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1955 
1956 		ctxt->info.up_enable_bits = enabled_tc;
1957 	}
1958 	if (vsi->type == I40E_VSI_SRIOV) {
1959 		ctxt->info.mapping_flags |=
1960 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1961 		for (i = 0; i < vsi->num_queue_pairs; i++)
1962 			ctxt->info.queue_mapping[i] =
1963 					       cpu_to_le16(vsi->base_queue + i);
1964 	} else {
1965 		ctxt->info.mapping_flags |=
1966 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1967 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1968 	}
1969 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1970 }
1971 
1972 /**
1973  * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1974  * @netdev: the netdevice
1975  * @addr: address to add
1976  *
1977  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1978  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1979  */
1980 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1981 {
1982 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1983 	struct i40e_vsi *vsi = np->vsi;
1984 
1985 	if (i40e_add_mac_filter(vsi, addr))
1986 		return 0;
1987 	else
1988 		return -ENOMEM;
1989 }
1990 
1991 /**
1992  * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1993  * @netdev: the netdevice
1994  * @addr: address to add
1995  *
1996  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1997  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1998  */
1999 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2000 {
2001 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2002 	struct i40e_vsi *vsi = np->vsi;
2003 
2004 	/* Under some circumstances, we might receive a request to delete
2005 	 * our own device address from our uc list. Because we store the
2006 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2007 	 * such requests and not delete our device address from this list.
2008 	 */
2009 	if (ether_addr_equal(addr, netdev->dev_addr))
2010 		return 0;
2011 
2012 	i40e_del_mac_filter(vsi, addr);
2013 
2014 	return 0;
2015 }
2016 
2017 /**
2018  * i40e_set_rx_mode - NDO callback to set the netdev filters
2019  * @netdev: network interface device structure
2020  **/
2021 static void i40e_set_rx_mode(struct net_device *netdev)
2022 {
2023 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2024 	struct i40e_vsi *vsi = np->vsi;
2025 
2026 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2027 
2028 	__dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2029 	__dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2030 
2031 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2032 
2033 	/* check for other flag changes */
2034 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
2035 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2036 		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2037 	}
2038 }
2039 
2040 /**
2041  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2042  * @vsi: Pointer to VSI struct
2043  * @from: Pointer to list which contains MAC filter entries - changes to
2044  *        those entries needs to be undone.
2045  *
2046  * MAC filter entries from this list were slated for deletion.
2047  **/
2048 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2049 					 struct hlist_head *from)
2050 {
2051 	struct i40e_mac_filter *f;
2052 	struct hlist_node *h;
2053 
2054 	hlist_for_each_entry_safe(f, h, from, hlist) {
2055 		u64 key = i40e_addr_to_hkey(f->macaddr);
2056 
2057 		/* Move the element back into MAC filter list*/
2058 		hlist_del(&f->hlist);
2059 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
2060 	}
2061 }
2062 
2063 /**
2064  * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2065  * @vsi: Pointer to vsi struct
2066  * @from: Pointer to list which contains MAC filter entries - changes to
2067  *        those entries needs to be undone.
2068  *
2069  * MAC filter entries from this list were slated for addition.
2070  **/
2071 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2072 					 struct hlist_head *from)
2073 {
2074 	struct i40e_new_mac_filter *new;
2075 	struct hlist_node *h;
2076 
2077 	hlist_for_each_entry_safe(new, h, from, hlist) {
2078 		/* We can simply free the wrapper structure */
2079 		hlist_del(&new->hlist);
2080 		netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2081 		kfree(new);
2082 	}
2083 }
2084 
2085 /**
2086  * i40e_next_filter - Get the next non-broadcast filter from a list
2087  * @next: pointer to filter in list
2088  *
2089  * Returns the next non-broadcast filter in the list. Required so that we
2090  * ignore broadcast filters within the list, since these are not handled via
2091  * the normal firmware update path.
2092  */
2093 static
2094 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2095 {
2096 	hlist_for_each_entry_continue(next, hlist) {
2097 		if (!is_broadcast_ether_addr(next->f->macaddr))
2098 			return next;
2099 	}
2100 
2101 	return NULL;
2102 }
2103 
2104 /**
2105  * i40e_update_filter_state - Update filter state based on return data
2106  * from firmware
2107  * @count: Number of filters added
2108  * @add_list: return data from fw
2109  * @add_head: pointer to first filter in current batch
2110  *
2111  * MAC filter entries from list were slated to be added to device. Returns
2112  * number of successful filters. Note that 0 does NOT mean success!
2113  **/
2114 static int
2115 i40e_update_filter_state(int count,
2116 			 struct i40e_aqc_add_macvlan_element_data *add_list,
2117 			 struct i40e_new_mac_filter *add_head)
2118 {
2119 	int retval = 0;
2120 	int i;
2121 
2122 	for (i = 0; i < count; i++) {
2123 		/* Always check status of each filter. We don't need to check
2124 		 * the firmware return status because we pre-set the filter
2125 		 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2126 		 * request to the adminq. Thus, if it no longer matches then
2127 		 * we know the filter is active.
2128 		 */
2129 		if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2130 			add_head->state = I40E_FILTER_FAILED;
2131 		} else {
2132 			add_head->state = I40E_FILTER_ACTIVE;
2133 			retval++;
2134 		}
2135 
2136 		add_head = i40e_next_filter(add_head);
2137 		if (!add_head)
2138 			break;
2139 	}
2140 
2141 	return retval;
2142 }
2143 
2144 /**
2145  * i40e_aqc_del_filters - Request firmware to delete a set of filters
2146  * @vsi: ptr to the VSI
2147  * @vsi_name: name to display in messages
2148  * @list: the list of filters to send to firmware
2149  * @num_del: the number of filters to delete
2150  * @retval: Set to -EIO on failure to delete
2151  *
2152  * Send a request to firmware via AdminQ to delete a set of filters. Uses
2153  * *retval instead of a return value so that success does not force ret_val to
2154  * be set to 0. This ensures that a sequence of calls to this function
2155  * preserve the previous value of *retval on successful delete.
2156  */
2157 static
2158 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2159 			  struct i40e_aqc_remove_macvlan_element_data *list,
2160 			  int num_del, int *retval)
2161 {
2162 	struct i40e_hw *hw = &vsi->back->hw;
2163 	enum i40e_admin_queue_err aq_status;
2164 	i40e_status aq_ret;
2165 
2166 	aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2167 					   &aq_status);
2168 
2169 	/* Explicitly ignore and do not report when firmware returns ENOENT */
2170 	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2171 		*retval = -EIO;
2172 		dev_info(&vsi->back->pdev->dev,
2173 			 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2174 			 vsi_name, i40e_stat_str(hw, aq_ret),
2175 			 i40e_aq_str(hw, aq_status));
2176 	}
2177 }
2178 
2179 /**
2180  * i40e_aqc_add_filters - Request firmware to add a set of filters
2181  * @vsi: ptr to the VSI
2182  * @vsi_name: name to display in messages
2183  * @list: the list of filters to send to firmware
2184  * @add_head: Position in the add hlist
2185  * @num_add: the number of filters to add
2186  *
2187  * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2188  * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2189  * space for more filters.
2190  */
2191 static
2192 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2193 			  struct i40e_aqc_add_macvlan_element_data *list,
2194 			  struct i40e_new_mac_filter *add_head,
2195 			  int num_add)
2196 {
2197 	struct i40e_hw *hw = &vsi->back->hw;
2198 	enum i40e_admin_queue_err aq_status;
2199 	int fcnt;
2200 
2201 	i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2202 	fcnt = i40e_update_filter_state(num_add, list, add_head);
2203 
2204 	if (fcnt != num_add) {
2205 		if (vsi->type == I40E_VSI_MAIN) {
2206 			set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2207 			dev_warn(&vsi->back->pdev->dev,
2208 				 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2209 				 i40e_aq_str(hw, aq_status), vsi_name);
2210 		} else if (vsi->type == I40E_VSI_SRIOV ||
2211 			   vsi->type == I40E_VSI_VMDQ1 ||
2212 			   vsi->type == I40E_VSI_VMDQ2) {
2213 			dev_warn(&vsi->back->pdev->dev,
2214 				 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2215 				 i40e_aq_str(hw, aq_status), vsi_name,
2216 					     vsi_name);
2217 		} else {
2218 			dev_warn(&vsi->back->pdev->dev,
2219 				 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2220 				 i40e_aq_str(hw, aq_status), vsi_name,
2221 					     vsi->type);
2222 		}
2223 	}
2224 }
2225 
2226 /**
2227  * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2228  * @vsi: pointer to the VSI
2229  * @vsi_name: the VSI name
2230  * @f: filter data
2231  *
2232  * This function sets or clears the promiscuous broadcast flags for VLAN
2233  * filters in order to properly receive broadcast frames. Assumes that only
2234  * broadcast filters are passed.
2235  *
2236  * Returns status indicating success or failure;
2237  **/
2238 static i40e_status
2239 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2240 			  struct i40e_mac_filter *f)
2241 {
2242 	bool enable = f->state == I40E_FILTER_NEW;
2243 	struct i40e_hw *hw = &vsi->back->hw;
2244 	i40e_status aq_ret;
2245 
2246 	if (f->vlan == I40E_VLAN_ANY) {
2247 		aq_ret = i40e_aq_set_vsi_broadcast(hw,
2248 						   vsi->seid,
2249 						   enable,
2250 						   NULL);
2251 	} else {
2252 		aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2253 							    vsi->seid,
2254 							    enable,
2255 							    f->vlan,
2256 							    NULL);
2257 	}
2258 
2259 	if (aq_ret) {
2260 		set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2261 		dev_warn(&vsi->back->pdev->dev,
2262 			 "Error %s, forcing overflow promiscuous on %s\n",
2263 			 i40e_aq_str(hw, hw->aq.asq_last_status),
2264 			 vsi_name);
2265 	}
2266 
2267 	return aq_ret;
2268 }
2269 
2270 /**
2271  * i40e_set_promiscuous - set promiscuous mode
2272  * @pf: board private structure
2273  * @promisc: promisc on or off
2274  *
2275  * There are different ways of setting promiscuous mode on a PF depending on
2276  * what state/environment we're in.  This identifies and sets it appropriately.
2277  * Returns 0 on success.
2278  **/
2279 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2280 {
2281 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2282 	struct i40e_hw *hw = &pf->hw;
2283 	i40e_status aq_ret;
2284 
2285 	if (vsi->type == I40E_VSI_MAIN &&
2286 	    pf->lan_veb != I40E_NO_VEB &&
2287 	    !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2288 		/* set defport ON for Main VSI instead of true promisc
2289 		 * this way we will get all unicast/multicast and VLAN
2290 		 * promisc behavior but will not get VF or VMDq traffic
2291 		 * replicated on the Main VSI.
2292 		 */
2293 		if (promisc)
2294 			aq_ret = i40e_aq_set_default_vsi(hw,
2295 							 vsi->seid,
2296 							 NULL);
2297 		else
2298 			aq_ret = i40e_aq_clear_default_vsi(hw,
2299 							   vsi->seid,
2300 							   NULL);
2301 		if (aq_ret) {
2302 			dev_info(&pf->pdev->dev,
2303 				 "Set default VSI failed, err %s, aq_err %s\n",
2304 				 i40e_stat_str(hw, aq_ret),
2305 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2306 		}
2307 	} else {
2308 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2309 						  hw,
2310 						  vsi->seid,
2311 						  promisc, NULL,
2312 						  true);
2313 		if (aq_ret) {
2314 			dev_info(&pf->pdev->dev,
2315 				 "set unicast promisc failed, err %s, aq_err %s\n",
2316 				 i40e_stat_str(hw, aq_ret),
2317 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2318 		}
2319 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2320 						  hw,
2321 						  vsi->seid,
2322 						  promisc, NULL);
2323 		if (aq_ret) {
2324 			dev_info(&pf->pdev->dev,
2325 				 "set multicast promisc failed, err %s, aq_err %s\n",
2326 				 i40e_stat_str(hw, aq_ret),
2327 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2328 		}
2329 	}
2330 
2331 	if (!aq_ret)
2332 		pf->cur_promisc = promisc;
2333 
2334 	return aq_ret;
2335 }
2336 
2337 /**
2338  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2339  * @vsi: ptr to the VSI
2340  *
2341  * Push any outstanding VSI filter changes through the AdminQ.
2342  *
2343  * Returns 0 or error value
2344  **/
2345 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2346 {
2347 	struct hlist_head tmp_add_list, tmp_del_list;
2348 	struct i40e_mac_filter *f;
2349 	struct i40e_new_mac_filter *new, *add_head = NULL;
2350 	struct i40e_hw *hw = &vsi->back->hw;
2351 	bool old_overflow, new_overflow;
2352 	unsigned int failed_filters = 0;
2353 	unsigned int vlan_filters = 0;
2354 	char vsi_name[16] = "PF";
2355 	int filter_list_len = 0;
2356 	i40e_status aq_ret = 0;
2357 	u32 changed_flags = 0;
2358 	struct hlist_node *h;
2359 	struct i40e_pf *pf;
2360 	int num_add = 0;
2361 	int num_del = 0;
2362 	int retval = 0;
2363 	u16 cmd_flags;
2364 	int list_size;
2365 	int bkt;
2366 
2367 	/* empty array typed pointers, kcalloc later */
2368 	struct i40e_aqc_add_macvlan_element_data *add_list;
2369 	struct i40e_aqc_remove_macvlan_element_data *del_list;
2370 
2371 	while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2372 		usleep_range(1000, 2000);
2373 	pf = vsi->back;
2374 
2375 	old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2376 
2377 	if (vsi->netdev) {
2378 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2379 		vsi->current_netdev_flags = vsi->netdev->flags;
2380 	}
2381 
2382 	INIT_HLIST_HEAD(&tmp_add_list);
2383 	INIT_HLIST_HEAD(&tmp_del_list);
2384 
2385 	if (vsi->type == I40E_VSI_SRIOV)
2386 		snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2387 	else if (vsi->type != I40E_VSI_MAIN)
2388 		snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2389 
2390 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2391 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2392 
2393 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2394 		/* Create a list of filters to delete. */
2395 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2396 			if (f->state == I40E_FILTER_REMOVE) {
2397 				/* Move the element into temporary del_list */
2398 				hash_del(&f->hlist);
2399 				hlist_add_head(&f->hlist, &tmp_del_list);
2400 
2401 				/* Avoid counting removed filters */
2402 				continue;
2403 			}
2404 			if (f->state == I40E_FILTER_NEW) {
2405 				/* Create a temporary i40e_new_mac_filter */
2406 				new = kzalloc(sizeof(*new), GFP_ATOMIC);
2407 				if (!new)
2408 					goto err_no_memory_locked;
2409 
2410 				/* Store pointer to the real filter */
2411 				new->f = f;
2412 				new->state = f->state;
2413 
2414 				/* Add it to the hash list */
2415 				hlist_add_head(&new->hlist, &tmp_add_list);
2416 			}
2417 
2418 			/* Count the number of active (current and new) VLAN
2419 			 * filters we have now. Does not count filters which
2420 			 * are marked for deletion.
2421 			 */
2422 			if (f->vlan > 0)
2423 				vlan_filters++;
2424 		}
2425 
2426 		retval = i40e_correct_mac_vlan_filters(vsi,
2427 						       &tmp_add_list,
2428 						       &tmp_del_list,
2429 						       vlan_filters);
2430 
2431 		hlist_for_each_entry(new, &tmp_add_list, hlist)
2432 			netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2433 
2434 		if (retval)
2435 			goto err_no_memory_locked;
2436 
2437 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2438 	}
2439 
2440 	/* Now process 'del_list' outside the lock */
2441 	if (!hlist_empty(&tmp_del_list)) {
2442 		filter_list_len = hw->aq.asq_buf_size /
2443 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
2444 		list_size = filter_list_len *
2445 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
2446 		del_list = kzalloc(list_size, GFP_ATOMIC);
2447 		if (!del_list)
2448 			goto err_no_memory;
2449 
2450 		hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2451 			cmd_flags = 0;
2452 
2453 			/* handle broadcast filters by updating the broadcast
2454 			 * promiscuous flag and release filter list.
2455 			 */
2456 			if (is_broadcast_ether_addr(f->macaddr)) {
2457 				i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2458 
2459 				hlist_del(&f->hlist);
2460 				kfree(f);
2461 				continue;
2462 			}
2463 
2464 			/* add to delete list */
2465 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2466 			if (f->vlan == I40E_VLAN_ANY) {
2467 				del_list[num_del].vlan_tag = 0;
2468 				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2469 			} else {
2470 				del_list[num_del].vlan_tag =
2471 					cpu_to_le16((u16)(f->vlan));
2472 			}
2473 
2474 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2475 			del_list[num_del].flags = cmd_flags;
2476 			num_del++;
2477 
2478 			/* flush a full buffer */
2479 			if (num_del == filter_list_len) {
2480 				i40e_aqc_del_filters(vsi, vsi_name, del_list,
2481 						     num_del, &retval);
2482 				memset(del_list, 0, list_size);
2483 				num_del = 0;
2484 			}
2485 			/* Release memory for MAC filter entries which were
2486 			 * synced up with HW.
2487 			 */
2488 			hlist_del(&f->hlist);
2489 			kfree(f);
2490 		}
2491 
2492 		if (num_del) {
2493 			i40e_aqc_del_filters(vsi, vsi_name, del_list,
2494 					     num_del, &retval);
2495 		}
2496 
2497 		kfree(del_list);
2498 		del_list = NULL;
2499 	}
2500 
2501 	if (!hlist_empty(&tmp_add_list)) {
2502 		/* Do all the adds now. */
2503 		filter_list_len = hw->aq.asq_buf_size /
2504 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
2505 		list_size = filter_list_len *
2506 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
2507 		add_list = kzalloc(list_size, GFP_ATOMIC);
2508 		if (!add_list)
2509 			goto err_no_memory;
2510 
2511 		num_add = 0;
2512 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2513 			/* handle broadcast filters by updating the broadcast
2514 			 * promiscuous flag instead of adding a MAC filter.
2515 			 */
2516 			if (is_broadcast_ether_addr(new->f->macaddr)) {
2517 				if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2518 							      new->f))
2519 					new->state = I40E_FILTER_FAILED;
2520 				else
2521 					new->state = I40E_FILTER_ACTIVE;
2522 				continue;
2523 			}
2524 
2525 			/* add to add array */
2526 			if (num_add == 0)
2527 				add_head = new;
2528 			cmd_flags = 0;
2529 			ether_addr_copy(add_list[num_add].mac_addr,
2530 					new->f->macaddr);
2531 			if (new->f->vlan == I40E_VLAN_ANY) {
2532 				add_list[num_add].vlan_tag = 0;
2533 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2534 			} else {
2535 				add_list[num_add].vlan_tag =
2536 					cpu_to_le16((u16)(new->f->vlan));
2537 			}
2538 			add_list[num_add].queue_number = 0;
2539 			/* set invalid match method for later detection */
2540 			add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2541 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2542 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
2543 			num_add++;
2544 
2545 			/* flush a full buffer */
2546 			if (num_add == filter_list_len) {
2547 				i40e_aqc_add_filters(vsi, vsi_name, add_list,
2548 						     add_head, num_add);
2549 				memset(add_list, 0, list_size);
2550 				num_add = 0;
2551 			}
2552 		}
2553 		if (num_add) {
2554 			i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2555 					     num_add);
2556 		}
2557 		/* Now move all of the filters from the temp add list back to
2558 		 * the VSI's list.
2559 		 */
2560 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2561 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2562 			/* Only update the state if we're still NEW */
2563 			if (new->f->state == I40E_FILTER_NEW)
2564 				new->f->state = new->state;
2565 			hlist_del(&new->hlist);
2566 			netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2567 			kfree(new);
2568 		}
2569 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2570 		kfree(add_list);
2571 		add_list = NULL;
2572 	}
2573 
2574 	/* Determine the number of active and failed filters. */
2575 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2576 	vsi->active_filters = 0;
2577 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2578 		if (f->state == I40E_FILTER_ACTIVE)
2579 			vsi->active_filters++;
2580 		else if (f->state == I40E_FILTER_FAILED)
2581 			failed_filters++;
2582 	}
2583 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2584 
2585 	/* Check if we are able to exit overflow promiscuous mode. We can
2586 	 * safely exit if we didn't just enter, we no longer have any failed
2587 	 * filters, and we have reduced filters below the threshold value.
2588 	 */
2589 	if (old_overflow && !failed_filters &&
2590 	    vsi->active_filters < vsi->promisc_threshold) {
2591 		dev_info(&pf->pdev->dev,
2592 			 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2593 			 vsi_name);
2594 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2595 		vsi->promisc_threshold = 0;
2596 	}
2597 
2598 	/* if the VF is not trusted do not do promisc */
2599 	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2600 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2601 		goto out;
2602 	}
2603 
2604 	new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2605 
2606 	/* If we are entering overflow promiscuous, we need to calculate a new
2607 	 * threshold for when we are safe to exit
2608 	 */
2609 	if (!old_overflow && new_overflow)
2610 		vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2611 
2612 	/* check for changes in promiscuous modes */
2613 	if (changed_flags & IFF_ALLMULTI) {
2614 		bool cur_multipromisc;
2615 
2616 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2617 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2618 							       vsi->seid,
2619 							       cur_multipromisc,
2620 							       NULL);
2621 		if (aq_ret) {
2622 			retval = i40e_aq_rc_to_posix(aq_ret,
2623 						     hw->aq.asq_last_status);
2624 			dev_info(&pf->pdev->dev,
2625 				 "set multi promisc failed on %s, err %s aq_err %s\n",
2626 				 vsi_name,
2627 				 i40e_stat_str(hw, aq_ret),
2628 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2629 		} else {
2630 			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2631 				 cur_multipromisc ? "entering" : "leaving");
2632 		}
2633 	}
2634 
2635 	if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2636 		bool cur_promisc;
2637 
2638 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2639 			       new_overflow);
2640 		aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2641 		if (aq_ret) {
2642 			retval = i40e_aq_rc_to_posix(aq_ret,
2643 						     hw->aq.asq_last_status);
2644 			dev_info(&pf->pdev->dev,
2645 				 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2646 				 cur_promisc ? "on" : "off",
2647 				 vsi_name,
2648 				 i40e_stat_str(hw, aq_ret),
2649 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2650 		}
2651 	}
2652 out:
2653 	/* if something went wrong then set the changed flag so we try again */
2654 	if (retval)
2655 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2656 
2657 	clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2658 	return retval;
2659 
2660 err_no_memory:
2661 	/* Restore elements on the temporary add and delete lists */
2662 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2663 err_no_memory_locked:
2664 	i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2665 	i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2666 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2667 
2668 	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2669 	clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2670 	return -ENOMEM;
2671 }
2672 
2673 /**
2674  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2675  * @pf: board private structure
2676  **/
2677 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2678 {
2679 	int v;
2680 
2681 	if (!pf)
2682 		return;
2683 	if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2684 		return;
2685 	if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2686 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2687 		return;
2688 	}
2689 
2690 	for (v = 0; v < pf->num_alloc_vsi; v++) {
2691 		if (pf->vsi[v] &&
2692 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2693 		    !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2694 			int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2695 
2696 			if (ret) {
2697 				/* come back and try again later */
2698 				set_bit(__I40E_MACVLAN_SYNC_PENDING,
2699 					pf->state);
2700 				break;
2701 			}
2702 		}
2703 	}
2704 }
2705 
2706 /**
2707  * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2708  * @vsi: the vsi
2709  **/
2710 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2711 {
2712 	if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2713 		return I40E_RXBUFFER_2048;
2714 	else
2715 		return I40E_RXBUFFER_3072;
2716 }
2717 
2718 /**
2719  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2720  * @netdev: network interface device structure
2721  * @new_mtu: new value for maximum frame size
2722  *
2723  * Returns 0 on success, negative on failure
2724  **/
2725 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2726 {
2727 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2728 	struct i40e_vsi *vsi = np->vsi;
2729 	struct i40e_pf *pf = vsi->back;
2730 
2731 	if (i40e_enabled_xdp_vsi(vsi)) {
2732 		int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2733 
2734 		if (frame_size > i40e_max_xdp_frame_size(vsi))
2735 			return -EINVAL;
2736 	}
2737 
2738 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
2739 		   netdev->mtu, new_mtu);
2740 	netdev->mtu = new_mtu;
2741 	if (netif_running(netdev))
2742 		i40e_vsi_reinit_locked(vsi);
2743 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2744 	set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2745 	return 0;
2746 }
2747 
2748 /**
2749  * i40e_ioctl - Access the hwtstamp interface
2750  * @netdev: network interface device structure
2751  * @ifr: interface request data
2752  * @cmd: ioctl command
2753  **/
2754 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2755 {
2756 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2757 	struct i40e_pf *pf = np->vsi->back;
2758 
2759 	switch (cmd) {
2760 	case SIOCGHWTSTAMP:
2761 		return i40e_ptp_get_ts_config(pf, ifr);
2762 	case SIOCSHWTSTAMP:
2763 		return i40e_ptp_set_ts_config(pf, ifr);
2764 	default:
2765 		return -EOPNOTSUPP;
2766 	}
2767 }
2768 
2769 /**
2770  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2771  * @vsi: the vsi being adjusted
2772  **/
2773 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2774 {
2775 	struct i40e_vsi_context ctxt;
2776 	i40e_status ret;
2777 
2778 	/* Don't modify stripping options if a port VLAN is active */
2779 	if (vsi->info.pvid)
2780 		return;
2781 
2782 	if ((vsi->info.valid_sections &
2783 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2784 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2785 		return;  /* already enabled */
2786 
2787 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2788 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2789 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2790 
2791 	ctxt.seid = vsi->seid;
2792 	ctxt.info = vsi->info;
2793 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2794 	if (ret) {
2795 		dev_info(&vsi->back->pdev->dev,
2796 			 "update vlan stripping failed, err %s aq_err %s\n",
2797 			 i40e_stat_str(&vsi->back->hw, ret),
2798 			 i40e_aq_str(&vsi->back->hw,
2799 				     vsi->back->hw.aq.asq_last_status));
2800 	}
2801 }
2802 
2803 /**
2804  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2805  * @vsi: the vsi being adjusted
2806  **/
2807 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2808 {
2809 	struct i40e_vsi_context ctxt;
2810 	i40e_status ret;
2811 
2812 	/* Don't modify stripping options if a port VLAN is active */
2813 	if (vsi->info.pvid)
2814 		return;
2815 
2816 	if ((vsi->info.valid_sections &
2817 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2818 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2819 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
2820 		return;  /* already disabled */
2821 
2822 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2823 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2824 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2825 
2826 	ctxt.seid = vsi->seid;
2827 	ctxt.info = vsi->info;
2828 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2829 	if (ret) {
2830 		dev_info(&vsi->back->pdev->dev,
2831 			 "update vlan stripping failed, err %s aq_err %s\n",
2832 			 i40e_stat_str(&vsi->back->hw, ret),
2833 			 i40e_aq_str(&vsi->back->hw,
2834 				     vsi->back->hw.aq.asq_last_status));
2835 	}
2836 }
2837 
2838 /**
2839  * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2840  * @vsi: the vsi being configured
2841  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2842  *
2843  * This is a helper function for adding a new MAC/VLAN filter with the
2844  * specified VLAN for each existing MAC address already in the hash table.
2845  * This function does *not* perform any accounting to update filters based on
2846  * VLAN mode.
2847  *
2848  * NOTE: this function expects to be called while under the
2849  * mac_filter_hash_lock
2850  **/
2851 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2852 {
2853 	struct i40e_mac_filter *f, *add_f;
2854 	struct hlist_node *h;
2855 	int bkt;
2856 
2857 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2858 		if (f->state == I40E_FILTER_REMOVE)
2859 			continue;
2860 		add_f = i40e_add_filter(vsi, f->macaddr, vid);
2861 		if (!add_f) {
2862 			dev_info(&vsi->back->pdev->dev,
2863 				 "Could not add vlan filter %d for %pM\n",
2864 				 vid, f->macaddr);
2865 			return -ENOMEM;
2866 		}
2867 	}
2868 
2869 	return 0;
2870 }
2871 
2872 /**
2873  * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2874  * @vsi: the VSI being configured
2875  * @vid: VLAN id to be added
2876  **/
2877 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2878 {
2879 	int err;
2880 
2881 	if (vsi->info.pvid)
2882 		return -EINVAL;
2883 
2884 	/* The network stack will attempt to add VID=0, with the intention to
2885 	 * receive priority tagged packets with a VLAN of 0. Our HW receives
2886 	 * these packets by default when configured to receive untagged
2887 	 * packets, so we don't need to add a filter for this case.
2888 	 * Additionally, HW interprets adding a VID=0 filter as meaning to
2889 	 * receive *only* tagged traffic and stops receiving untagged traffic.
2890 	 * Thus, we do not want to actually add a filter for VID=0
2891 	 */
2892 	if (!vid)
2893 		return 0;
2894 
2895 	/* Locked once because all functions invoked below iterates list*/
2896 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2897 	err = i40e_add_vlan_all_mac(vsi, vid);
2898 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2899 	if (err)
2900 		return err;
2901 
2902 	/* schedule our worker thread which will take care of
2903 	 * applying the new filter changes
2904 	 */
2905 	i40e_service_event_schedule(vsi->back);
2906 	return 0;
2907 }
2908 
2909 /**
2910  * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2911  * @vsi: the vsi being configured
2912  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2913  *
2914  * This function should be used to remove all VLAN filters which match the
2915  * given VID. It does not schedule the service event and does not take the
2916  * mac_filter_hash_lock so it may be combined with other operations under
2917  * a single invocation of the mac_filter_hash_lock.
2918  *
2919  * NOTE: this function expects to be called while under the
2920  * mac_filter_hash_lock
2921  */
2922 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2923 {
2924 	struct i40e_mac_filter *f;
2925 	struct hlist_node *h;
2926 	int bkt;
2927 
2928 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2929 		if (f->vlan == vid)
2930 			__i40e_del_filter(vsi, f);
2931 	}
2932 }
2933 
2934 /**
2935  * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2936  * @vsi: the VSI being configured
2937  * @vid: VLAN id to be removed
2938  **/
2939 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2940 {
2941 	if (!vid || vsi->info.pvid)
2942 		return;
2943 
2944 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2945 	i40e_rm_vlan_all_mac(vsi, vid);
2946 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2947 
2948 	/* schedule our worker thread which will take care of
2949 	 * applying the new filter changes
2950 	 */
2951 	i40e_service_event_schedule(vsi->back);
2952 }
2953 
2954 /**
2955  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2956  * @netdev: network interface to be adjusted
2957  * @proto: unused protocol value
2958  * @vid: vlan id to be added
2959  *
2960  * net_device_ops implementation for adding vlan ids
2961  **/
2962 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2963 				__always_unused __be16 proto, u16 vid)
2964 {
2965 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2966 	struct i40e_vsi *vsi = np->vsi;
2967 	int ret = 0;
2968 
2969 	if (vid >= VLAN_N_VID)
2970 		return -EINVAL;
2971 
2972 	ret = i40e_vsi_add_vlan(vsi, vid);
2973 	if (!ret)
2974 		set_bit(vid, vsi->active_vlans);
2975 
2976 	return ret;
2977 }
2978 
2979 /**
2980  * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2981  * @netdev: network interface to be adjusted
2982  * @proto: unused protocol value
2983  * @vid: vlan id to be added
2984  **/
2985 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2986 				    __always_unused __be16 proto, u16 vid)
2987 {
2988 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2989 	struct i40e_vsi *vsi = np->vsi;
2990 
2991 	if (vid >= VLAN_N_VID)
2992 		return;
2993 	set_bit(vid, vsi->active_vlans);
2994 }
2995 
2996 /**
2997  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2998  * @netdev: network interface to be adjusted
2999  * @proto: unused protocol value
3000  * @vid: vlan id to be removed
3001  *
3002  * net_device_ops implementation for removing vlan ids
3003  **/
3004 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3005 				 __always_unused __be16 proto, u16 vid)
3006 {
3007 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3008 	struct i40e_vsi *vsi = np->vsi;
3009 
3010 	/* return code is ignored as there is nothing a user
3011 	 * can do about failure to remove and a log message was
3012 	 * already printed from the other function
3013 	 */
3014 	i40e_vsi_kill_vlan(vsi, vid);
3015 
3016 	clear_bit(vid, vsi->active_vlans);
3017 
3018 	return 0;
3019 }
3020 
3021 /**
3022  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3023  * @vsi: the vsi being brought back up
3024  **/
3025 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3026 {
3027 	u16 vid;
3028 
3029 	if (!vsi->netdev)
3030 		return;
3031 
3032 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3033 		i40e_vlan_stripping_enable(vsi);
3034 	else
3035 		i40e_vlan_stripping_disable(vsi);
3036 
3037 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3038 		i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3039 					vid);
3040 }
3041 
3042 /**
3043  * i40e_vsi_add_pvid - Add pvid for the VSI
3044  * @vsi: the vsi being adjusted
3045  * @vid: the vlan id to set as a PVID
3046  **/
3047 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3048 {
3049 	struct i40e_vsi_context ctxt;
3050 	i40e_status ret;
3051 
3052 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3053 	vsi->info.pvid = cpu_to_le16(vid);
3054 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3055 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
3056 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
3057 
3058 	ctxt.seid = vsi->seid;
3059 	ctxt.info = vsi->info;
3060 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3061 	if (ret) {
3062 		dev_info(&vsi->back->pdev->dev,
3063 			 "add pvid failed, err %s aq_err %s\n",
3064 			 i40e_stat_str(&vsi->back->hw, ret),
3065 			 i40e_aq_str(&vsi->back->hw,
3066 				     vsi->back->hw.aq.asq_last_status));
3067 		return -ENOENT;
3068 	}
3069 
3070 	return 0;
3071 }
3072 
3073 /**
3074  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3075  * @vsi: the vsi being adjusted
3076  *
3077  * Just use the vlan_rx_register() service to put it back to normal
3078  **/
3079 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3080 {
3081 	vsi->info.pvid = 0;
3082 
3083 	i40e_vlan_stripping_disable(vsi);
3084 }
3085 
3086 /**
3087  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3088  * @vsi: ptr to the VSI
3089  *
3090  * If this function returns with an error, then it's possible one or
3091  * more of the rings is populated (while the rest are not).  It is the
3092  * callers duty to clean those orphaned rings.
3093  *
3094  * Return 0 on success, negative on failure
3095  **/
3096 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3097 {
3098 	int i, err = 0;
3099 
3100 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3101 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3102 
3103 	if (!i40e_enabled_xdp_vsi(vsi))
3104 		return err;
3105 
3106 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3107 		err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3108 
3109 	return err;
3110 }
3111 
3112 /**
3113  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3114  * @vsi: ptr to the VSI
3115  *
3116  * Free VSI's transmit software resources
3117  **/
3118 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3119 {
3120 	int i;
3121 
3122 	if (vsi->tx_rings) {
3123 		for (i = 0; i < vsi->num_queue_pairs; i++)
3124 			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3125 				i40e_free_tx_resources(vsi->tx_rings[i]);
3126 	}
3127 
3128 	if (vsi->xdp_rings) {
3129 		for (i = 0; i < vsi->num_queue_pairs; i++)
3130 			if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3131 				i40e_free_tx_resources(vsi->xdp_rings[i]);
3132 	}
3133 }
3134 
3135 /**
3136  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3137  * @vsi: ptr to the VSI
3138  *
3139  * If this function returns with an error, then it's possible one or
3140  * more of the rings is populated (while the rest are not).  It is the
3141  * callers duty to clean those orphaned rings.
3142  *
3143  * Return 0 on success, negative on failure
3144  **/
3145 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3146 {
3147 	int i, err = 0;
3148 
3149 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3150 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3151 	return err;
3152 }
3153 
3154 /**
3155  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3156  * @vsi: ptr to the VSI
3157  *
3158  * Free all receive software resources
3159  **/
3160 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3161 {
3162 	int i;
3163 
3164 	if (!vsi->rx_rings)
3165 		return;
3166 
3167 	for (i = 0; i < vsi->num_queue_pairs; i++)
3168 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3169 			i40e_free_rx_resources(vsi->rx_rings[i]);
3170 }
3171 
3172 /**
3173  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3174  * @ring: The Tx ring to configure
3175  *
3176  * This enables/disables XPS for a given Tx descriptor ring
3177  * based on the TCs enabled for the VSI that ring belongs to.
3178  **/
3179 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3180 {
3181 	int cpu;
3182 
3183 	if (!ring->q_vector || !ring->netdev || ring->ch)
3184 		return;
3185 
3186 	/* We only initialize XPS once, so as not to overwrite user settings */
3187 	if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3188 		return;
3189 
3190 	cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3191 	netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3192 			    ring->queue_index);
3193 }
3194 
3195 /**
3196  * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3197  * @ring: The Tx or Rx ring
3198  *
3199  * Returns the AF_XDP buffer pool or NULL.
3200  **/
3201 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3202 {
3203 	bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3204 	int qid = ring->queue_index;
3205 
3206 	if (ring_is_xdp(ring))
3207 		qid -= ring->vsi->alloc_queue_pairs;
3208 
3209 	if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3210 		return NULL;
3211 
3212 	return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3213 }
3214 
3215 /**
3216  * i40e_configure_tx_ring - Configure a transmit ring context and rest
3217  * @ring: The Tx ring to configure
3218  *
3219  * Configure the Tx descriptor ring in the HMC context.
3220  **/
3221 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3222 {
3223 	struct i40e_vsi *vsi = ring->vsi;
3224 	u16 pf_q = vsi->base_queue + ring->queue_index;
3225 	struct i40e_hw *hw = &vsi->back->hw;
3226 	struct i40e_hmc_obj_txq tx_ctx;
3227 	i40e_status err = 0;
3228 	u32 qtx_ctl = 0;
3229 
3230 	if (ring_is_xdp(ring))
3231 		ring->xsk_pool = i40e_xsk_pool(ring);
3232 
3233 	/* some ATR related tx ring init */
3234 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3235 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
3236 		ring->atr_count = 0;
3237 	} else {
3238 		ring->atr_sample_rate = 0;
3239 	}
3240 
3241 	/* configure XPS */
3242 	i40e_config_xps_tx_ring(ring);
3243 
3244 	/* clear the context structure first */
3245 	memset(&tx_ctx, 0, sizeof(tx_ctx));
3246 
3247 	tx_ctx.new_context = 1;
3248 	tx_ctx.base = (ring->dma / 128);
3249 	tx_ctx.qlen = ring->count;
3250 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3251 					       I40E_FLAG_FD_ATR_ENABLED));
3252 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3253 	/* FDIR VSI tx ring can still use RS bit and writebacks */
3254 	if (vsi->type != I40E_VSI_FDIR)
3255 		tx_ctx.head_wb_ena = 1;
3256 	tx_ctx.head_wb_addr = ring->dma +
3257 			      (ring->count * sizeof(struct i40e_tx_desc));
3258 
3259 	/* As part of VSI creation/update, FW allocates certain
3260 	 * Tx arbitration queue sets for each TC enabled for
3261 	 * the VSI. The FW returns the handles to these queue
3262 	 * sets as part of the response buffer to Add VSI,
3263 	 * Update VSI, etc. AQ commands. It is expected that
3264 	 * these queue set handles be associated with the Tx
3265 	 * queues by the driver as part of the TX queue context
3266 	 * initialization. This has to be done regardless of
3267 	 * DCB as by default everything is mapped to TC0.
3268 	 */
3269 
3270 	if (ring->ch)
3271 		tx_ctx.rdylist =
3272 			le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3273 
3274 	else
3275 		tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3276 
3277 	tx_ctx.rdylist_act = 0;
3278 
3279 	/* clear the context in the HMC */
3280 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3281 	if (err) {
3282 		dev_info(&vsi->back->pdev->dev,
3283 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3284 			 ring->queue_index, pf_q, err);
3285 		return -ENOMEM;
3286 	}
3287 
3288 	/* set the context in the HMC */
3289 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3290 	if (err) {
3291 		dev_info(&vsi->back->pdev->dev,
3292 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3293 			 ring->queue_index, pf_q, err);
3294 		return -ENOMEM;
3295 	}
3296 
3297 	/* Now associate this queue with this PCI function */
3298 	if (ring->ch) {
3299 		if (ring->ch->type == I40E_VSI_VMDQ2)
3300 			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3301 		else
3302 			return -EINVAL;
3303 
3304 		qtx_ctl |= (ring->ch->vsi_number <<
3305 			    I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3306 			    I40E_QTX_CTL_VFVM_INDX_MASK;
3307 	} else {
3308 		if (vsi->type == I40E_VSI_VMDQ2) {
3309 			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3310 			qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3311 				    I40E_QTX_CTL_VFVM_INDX_MASK;
3312 		} else {
3313 			qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3314 		}
3315 	}
3316 
3317 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3318 		    I40E_QTX_CTL_PF_INDX_MASK);
3319 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3320 	i40e_flush(hw);
3321 
3322 	/* cache tail off for easier writes later */
3323 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3324 
3325 	return 0;
3326 }
3327 
3328 /**
3329  * i40e_rx_offset - Return expected offset into page to access data
3330  * @rx_ring: Ring we are requesting offset of
3331  *
3332  * Returns the offset value for ring into the data buffer.
3333  */
3334 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3335 {
3336 	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3337 }
3338 
3339 /**
3340  * i40e_configure_rx_ring - Configure a receive ring context
3341  * @ring: The Rx ring to configure
3342  *
3343  * Configure the Rx descriptor ring in the HMC context.
3344  **/
3345 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3346 {
3347 	struct i40e_vsi *vsi = ring->vsi;
3348 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3349 	u16 pf_q = vsi->base_queue + ring->queue_index;
3350 	struct i40e_hw *hw = &vsi->back->hw;
3351 	struct i40e_hmc_obj_rxq rx_ctx;
3352 	i40e_status err = 0;
3353 	bool ok;
3354 	int ret;
3355 
3356 	bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3357 
3358 	/* clear the context structure first */
3359 	memset(&rx_ctx, 0, sizeof(rx_ctx));
3360 
3361 	if (ring->vsi->type == I40E_VSI_MAIN)
3362 		xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3363 
3364 	kfree(ring->rx_bi);
3365 	ring->xsk_pool = i40e_xsk_pool(ring);
3366 	if (ring->xsk_pool) {
3367 		ret = i40e_alloc_rx_bi_zc(ring);
3368 		if (ret)
3369 			return ret;
3370 		ring->rx_buf_len =
3371 		  xsk_pool_get_rx_frame_size(ring->xsk_pool);
3372 		/* For AF_XDP ZC, we disallow packets to span on
3373 		 * multiple buffers, thus letting us skip that
3374 		 * handling in the fast-path.
3375 		 */
3376 		chain_len = 1;
3377 		ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3378 						 MEM_TYPE_XSK_BUFF_POOL,
3379 						 NULL);
3380 		if (ret)
3381 			return ret;
3382 		dev_info(&vsi->back->pdev->dev,
3383 			 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3384 			 ring->queue_index);
3385 
3386 	} else {
3387 		ret = i40e_alloc_rx_bi(ring);
3388 		if (ret)
3389 			return ret;
3390 		ring->rx_buf_len = vsi->rx_buf_len;
3391 		if (ring->vsi->type == I40E_VSI_MAIN) {
3392 			ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3393 							 MEM_TYPE_PAGE_SHARED,
3394 							 NULL);
3395 			if (ret)
3396 				return ret;
3397 		}
3398 	}
3399 
3400 	rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3401 				    BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3402 
3403 	rx_ctx.base = (ring->dma / 128);
3404 	rx_ctx.qlen = ring->count;
3405 
3406 	/* use 16 byte descriptors */
3407 	rx_ctx.dsize = 0;
3408 
3409 	/* descriptor type is always zero
3410 	 * rx_ctx.dtype = 0;
3411 	 */
3412 	rx_ctx.hsplit_0 = 0;
3413 
3414 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3415 	if (hw->revision_id == 0)
3416 		rx_ctx.lrxqthresh = 0;
3417 	else
3418 		rx_ctx.lrxqthresh = 1;
3419 	rx_ctx.crcstrip = 1;
3420 	rx_ctx.l2tsel = 1;
3421 	/* this controls whether VLAN is stripped from inner headers */
3422 	rx_ctx.showiv = 0;
3423 	/* set the prefena field to 1 because the manual says to */
3424 	rx_ctx.prefena = 1;
3425 
3426 	/* clear the context in the HMC */
3427 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3428 	if (err) {
3429 		dev_info(&vsi->back->pdev->dev,
3430 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3431 			 ring->queue_index, pf_q, err);
3432 		return -ENOMEM;
3433 	}
3434 
3435 	/* set the context in the HMC */
3436 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3437 	if (err) {
3438 		dev_info(&vsi->back->pdev->dev,
3439 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3440 			 ring->queue_index, pf_q, err);
3441 		return -ENOMEM;
3442 	}
3443 
3444 	/* configure Rx buffer alignment */
3445 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3446 		clear_ring_build_skb_enabled(ring);
3447 	else
3448 		set_ring_build_skb_enabled(ring);
3449 
3450 	ring->rx_offset = i40e_rx_offset(ring);
3451 
3452 	/* cache tail for quicker writes, and clear the reg before use */
3453 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3454 	writel(0, ring->tail);
3455 
3456 	if (ring->xsk_pool) {
3457 		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3458 		ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3459 	} else {
3460 		ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3461 	}
3462 	if (!ok) {
3463 		/* Log this in case the user has forgotten to give the kernel
3464 		 * any buffers, even later in the application.
3465 		 */
3466 		dev_info(&vsi->back->pdev->dev,
3467 			 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3468 			 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3469 			 ring->queue_index, pf_q);
3470 	}
3471 
3472 	return 0;
3473 }
3474 
3475 /**
3476  * i40e_vsi_configure_tx - Configure the VSI for Tx
3477  * @vsi: VSI structure describing this set of rings and resources
3478  *
3479  * Configure the Tx VSI for operation.
3480  **/
3481 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3482 {
3483 	int err = 0;
3484 	u16 i;
3485 
3486 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3487 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3488 
3489 	if (err || !i40e_enabled_xdp_vsi(vsi))
3490 		return err;
3491 
3492 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3493 		err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3494 
3495 	return err;
3496 }
3497 
3498 /**
3499  * i40e_vsi_configure_rx - Configure the VSI for Rx
3500  * @vsi: the VSI being configured
3501  *
3502  * Configure the Rx VSI for operation.
3503  **/
3504 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3505 {
3506 	int err = 0;
3507 	u16 i;
3508 
3509 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3510 		vsi->max_frame = I40E_MAX_RXBUFFER;
3511 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
3512 #if (PAGE_SIZE < 8192)
3513 	} else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3514 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3515 		vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3516 		vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3517 #endif
3518 	} else {
3519 		vsi->max_frame = I40E_MAX_RXBUFFER;
3520 		vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3521 						       I40E_RXBUFFER_2048;
3522 	}
3523 
3524 	/* set up individual rings */
3525 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3526 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3527 
3528 	return err;
3529 }
3530 
3531 /**
3532  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3533  * @vsi: ptr to the VSI
3534  **/
3535 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3536 {
3537 	struct i40e_ring *tx_ring, *rx_ring;
3538 	u16 qoffset, qcount;
3539 	int i, n;
3540 
3541 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3542 		/* Reset the TC information */
3543 		for (i = 0; i < vsi->num_queue_pairs; i++) {
3544 			rx_ring = vsi->rx_rings[i];
3545 			tx_ring = vsi->tx_rings[i];
3546 			rx_ring->dcb_tc = 0;
3547 			tx_ring->dcb_tc = 0;
3548 		}
3549 		return;
3550 	}
3551 
3552 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3553 		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3554 			continue;
3555 
3556 		qoffset = vsi->tc_config.tc_info[n].qoffset;
3557 		qcount = vsi->tc_config.tc_info[n].qcount;
3558 		for (i = qoffset; i < (qoffset + qcount); i++) {
3559 			rx_ring = vsi->rx_rings[i];
3560 			tx_ring = vsi->tx_rings[i];
3561 			rx_ring->dcb_tc = n;
3562 			tx_ring->dcb_tc = n;
3563 		}
3564 	}
3565 }
3566 
3567 /**
3568  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3569  * @vsi: ptr to the VSI
3570  **/
3571 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3572 {
3573 	if (vsi->netdev)
3574 		i40e_set_rx_mode(vsi->netdev);
3575 }
3576 
3577 /**
3578  * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3579  * @pf: Pointer to the targeted PF
3580  *
3581  * Set all flow director counters to 0.
3582  */
3583 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3584 {
3585 	pf->fd_tcp4_filter_cnt = 0;
3586 	pf->fd_udp4_filter_cnt = 0;
3587 	pf->fd_sctp4_filter_cnt = 0;
3588 	pf->fd_ip4_filter_cnt = 0;
3589 	pf->fd_tcp6_filter_cnt = 0;
3590 	pf->fd_udp6_filter_cnt = 0;
3591 	pf->fd_sctp6_filter_cnt = 0;
3592 	pf->fd_ip6_filter_cnt = 0;
3593 }
3594 
3595 /**
3596  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3597  * @vsi: Pointer to the targeted VSI
3598  *
3599  * This function replays the hlist on the hw where all the SB Flow Director
3600  * filters were saved.
3601  **/
3602 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3603 {
3604 	struct i40e_fdir_filter *filter;
3605 	struct i40e_pf *pf = vsi->back;
3606 	struct hlist_node *node;
3607 
3608 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3609 		return;
3610 
3611 	/* Reset FDir counters as we're replaying all existing filters */
3612 	i40e_reset_fdir_filter_cnt(pf);
3613 
3614 	hlist_for_each_entry_safe(filter, node,
3615 				  &pf->fdir_filter_list, fdir_node) {
3616 		i40e_add_del_fdir(vsi, filter, true);
3617 	}
3618 }
3619 
3620 /**
3621  * i40e_vsi_configure - Set up the VSI for action
3622  * @vsi: the VSI being configured
3623  **/
3624 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3625 {
3626 	int err;
3627 
3628 	i40e_set_vsi_rx_mode(vsi);
3629 	i40e_restore_vlan(vsi);
3630 	i40e_vsi_config_dcb_rings(vsi);
3631 	err = i40e_vsi_configure_tx(vsi);
3632 	if (!err)
3633 		err = i40e_vsi_configure_rx(vsi);
3634 
3635 	return err;
3636 }
3637 
3638 /**
3639  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3640  * @vsi: the VSI being configured
3641  **/
3642 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3643 {
3644 	bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3645 	struct i40e_pf *pf = vsi->back;
3646 	struct i40e_hw *hw = &pf->hw;
3647 	u16 vector;
3648 	int i, q;
3649 	u32 qp;
3650 
3651 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
3652 	 * and PFINT_LNKLSTn registers, e.g.:
3653 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3654 	 */
3655 	qp = vsi->base_queue;
3656 	vector = vsi->base_vector;
3657 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3658 		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3659 
3660 		q_vector->rx.next_update = jiffies + 1;
3661 		q_vector->rx.target_itr =
3662 			ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3663 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3664 		     q_vector->rx.target_itr >> 1);
3665 		q_vector->rx.current_itr = q_vector->rx.target_itr;
3666 
3667 		q_vector->tx.next_update = jiffies + 1;
3668 		q_vector->tx.target_itr =
3669 			ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3670 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3671 		     q_vector->tx.target_itr >> 1);
3672 		q_vector->tx.current_itr = q_vector->tx.target_itr;
3673 
3674 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
3675 		     i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3676 
3677 		/* Linked list for the queuepairs assigned to this vector */
3678 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3679 		for (q = 0; q < q_vector->num_ringpairs; q++) {
3680 			u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3681 			u32 val;
3682 
3683 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3684 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3685 			      (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3686 			      (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3687 			      (I40E_QUEUE_TYPE_TX <<
3688 			       I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3689 
3690 			wr32(hw, I40E_QINT_RQCTL(qp), val);
3691 
3692 			if (has_xdp) {
3693 				val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3694 				      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3695 				      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3696 				      (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3697 				      (I40E_QUEUE_TYPE_TX <<
3698 				       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3699 
3700 				wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3701 			}
3702 
3703 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3704 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3705 			      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3706 			      ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3707 			      (I40E_QUEUE_TYPE_RX <<
3708 			       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3709 
3710 			/* Terminate the linked list */
3711 			if (q == (q_vector->num_ringpairs - 1))
3712 				val |= (I40E_QUEUE_END_OF_LIST <<
3713 					I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3714 
3715 			wr32(hw, I40E_QINT_TQCTL(qp), val);
3716 			qp++;
3717 		}
3718 	}
3719 
3720 	i40e_flush(hw);
3721 }
3722 
3723 /**
3724  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3725  * @pf: pointer to private device data structure
3726  **/
3727 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3728 {
3729 	struct i40e_hw *hw = &pf->hw;
3730 	u32 val;
3731 
3732 	/* clear things first */
3733 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3734 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3735 
3736 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3737 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3738 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
3739 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3740 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3741 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3742 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3743 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3744 
3745 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3746 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3747 
3748 	if (pf->flags & I40E_FLAG_PTP)
3749 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3750 
3751 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
3752 
3753 	/* SW_ITR_IDX = 0, but don't change INTENA */
3754 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3755 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3756 
3757 	/* OTHER_ITR_IDX = 0 */
3758 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3759 }
3760 
3761 /**
3762  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3763  * @vsi: the VSI being configured
3764  **/
3765 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3766 {
3767 	u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3768 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3769 	struct i40e_pf *pf = vsi->back;
3770 	struct i40e_hw *hw = &pf->hw;
3771 	u32 val;
3772 
3773 	/* set the ITR configuration */
3774 	q_vector->rx.next_update = jiffies + 1;
3775 	q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3776 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3777 	q_vector->rx.current_itr = q_vector->rx.target_itr;
3778 	q_vector->tx.next_update = jiffies + 1;
3779 	q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3780 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3781 	q_vector->tx.current_itr = q_vector->tx.target_itr;
3782 
3783 	i40e_enable_misc_int_causes(pf);
3784 
3785 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3786 	wr32(hw, I40E_PFINT_LNKLST0, 0);
3787 
3788 	/* Associate the queue pair to the vector and enable the queue int */
3789 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		       |
3790 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3791 	      (nextqp	   << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3792 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3793 
3794 	wr32(hw, I40E_QINT_RQCTL(0), val);
3795 
3796 	if (i40e_enabled_xdp_vsi(vsi)) {
3797 		val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		     |
3798 		      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3799 		      (I40E_QUEUE_TYPE_TX
3800 		       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3801 
3802 		wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3803 	}
3804 
3805 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
3806 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3807 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3808 
3809 	wr32(hw, I40E_QINT_TQCTL(0), val);
3810 	i40e_flush(hw);
3811 }
3812 
3813 /**
3814  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3815  * @pf: board private structure
3816  **/
3817 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3818 {
3819 	struct i40e_hw *hw = &pf->hw;
3820 
3821 	wr32(hw, I40E_PFINT_DYN_CTL0,
3822 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3823 	i40e_flush(hw);
3824 }
3825 
3826 /**
3827  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3828  * @pf: board private structure
3829  **/
3830 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3831 {
3832 	struct i40e_hw *hw = &pf->hw;
3833 	u32 val;
3834 
3835 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3836 	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3837 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3838 
3839 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
3840 	i40e_flush(hw);
3841 }
3842 
3843 /**
3844  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3845  * @irq: interrupt number
3846  * @data: pointer to a q_vector
3847  **/
3848 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3849 {
3850 	struct i40e_q_vector *q_vector = data;
3851 
3852 	if (!q_vector->tx.ring && !q_vector->rx.ring)
3853 		return IRQ_HANDLED;
3854 
3855 	napi_schedule_irqoff(&q_vector->napi);
3856 
3857 	return IRQ_HANDLED;
3858 }
3859 
3860 /**
3861  * i40e_irq_affinity_notify - Callback for affinity changes
3862  * @notify: context as to what irq was changed
3863  * @mask: the new affinity mask
3864  *
3865  * This is a callback function used by the irq_set_affinity_notifier function
3866  * so that we may register to receive changes to the irq affinity masks.
3867  **/
3868 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3869 				     const cpumask_t *mask)
3870 {
3871 	struct i40e_q_vector *q_vector =
3872 		container_of(notify, struct i40e_q_vector, affinity_notify);
3873 
3874 	cpumask_copy(&q_vector->affinity_mask, mask);
3875 }
3876 
3877 /**
3878  * i40e_irq_affinity_release - Callback for affinity notifier release
3879  * @ref: internal core kernel usage
3880  *
3881  * This is a callback function used by the irq_set_affinity_notifier function
3882  * to inform the current notification subscriber that they will no longer
3883  * receive notifications.
3884  **/
3885 static void i40e_irq_affinity_release(struct kref *ref) {}
3886 
3887 /**
3888  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3889  * @vsi: the VSI being configured
3890  * @basename: name for the vector
3891  *
3892  * Allocates MSI-X vectors and requests interrupts from the kernel.
3893  **/
3894 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3895 {
3896 	int q_vectors = vsi->num_q_vectors;
3897 	struct i40e_pf *pf = vsi->back;
3898 	int base = vsi->base_vector;
3899 	int rx_int_idx = 0;
3900 	int tx_int_idx = 0;
3901 	int vector, err;
3902 	int irq_num;
3903 	int cpu;
3904 
3905 	for (vector = 0; vector < q_vectors; vector++) {
3906 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3907 
3908 		irq_num = pf->msix_entries[base + vector].vector;
3909 
3910 		if (q_vector->tx.ring && q_vector->rx.ring) {
3911 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3912 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3913 			tx_int_idx++;
3914 		} else if (q_vector->rx.ring) {
3915 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3916 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3917 		} else if (q_vector->tx.ring) {
3918 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3919 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3920 		} else {
3921 			/* skip this unused q_vector */
3922 			continue;
3923 		}
3924 		err = request_irq(irq_num,
3925 				  vsi->irq_handler,
3926 				  0,
3927 				  q_vector->name,
3928 				  q_vector);
3929 		if (err) {
3930 			dev_info(&pf->pdev->dev,
3931 				 "MSIX request_irq failed, error: %d\n", err);
3932 			goto free_queue_irqs;
3933 		}
3934 
3935 		/* register for affinity change notifications */
3936 		q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3937 		q_vector->affinity_notify.release = i40e_irq_affinity_release;
3938 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3939 		/* Spread affinity hints out across online CPUs.
3940 		 *
3941 		 * get_cpu_mask returns a static constant mask with
3942 		 * a permanent lifetime so it's ok to pass to
3943 		 * irq_update_affinity_hint without making a copy.
3944 		 */
3945 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
3946 		irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
3947 	}
3948 
3949 	vsi->irqs_ready = true;
3950 	return 0;
3951 
3952 free_queue_irqs:
3953 	while (vector) {
3954 		vector--;
3955 		irq_num = pf->msix_entries[base + vector].vector;
3956 		irq_set_affinity_notifier(irq_num, NULL);
3957 		irq_update_affinity_hint(irq_num, NULL);
3958 		free_irq(irq_num, &vsi->q_vectors[vector]);
3959 	}
3960 	return err;
3961 }
3962 
3963 /**
3964  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3965  * @vsi: the VSI being un-configured
3966  **/
3967 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3968 {
3969 	struct i40e_pf *pf = vsi->back;
3970 	struct i40e_hw *hw = &pf->hw;
3971 	int base = vsi->base_vector;
3972 	int i;
3973 
3974 	/* disable interrupt causation from each queue */
3975 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3976 		u32 val;
3977 
3978 		val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3979 		val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3980 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3981 
3982 		val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3983 		val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3984 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3985 
3986 		if (!i40e_enabled_xdp_vsi(vsi))
3987 			continue;
3988 		wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3989 	}
3990 
3991 	/* disable each interrupt */
3992 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3993 		for (i = vsi->base_vector;
3994 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3995 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3996 
3997 		i40e_flush(hw);
3998 		for (i = 0; i < vsi->num_q_vectors; i++)
3999 			synchronize_irq(pf->msix_entries[i + base].vector);
4000 	} else {
4001 		/* Legacy and MSI mode - this stops all interrupt handling */
4002 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4003 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4004 		i40e_flush(hw);
4005 		synchronize_irq(pf->pdev->irq);
4006 	}
4007 }
4008 
4009 /**
4010  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4011  * @vsi: the VSI being configured
4012  **/
4013 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4014 {
4015 	struct i40e_pf *pf = vsi->back;
4016 	int i;
4017 
4018 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4019 		for (i = 0; i < vsi->num_q_vectors; i++)
4020 			i40e_irq_dynamic_enable(vsi, i);
4021 	} else {
4022 		i40e_irq_dynamic_enable_icr0(pf);
4023 	}
4024 
4025 	i40e_flush(&pf->hw);
4026 	return 0;
4027 }
4028 
4029 /**
4030  * i40e_free_misc_vector - Free the vector that handles non-queue events
4031  * @pf: board private structure
4032  **/
4033 static void i40e_free_misc_vector(struct i40e_pf *pf)
4034 {
4035 	/* Disable ICR 0 */
4036 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4037 	i40e_flush(&pf->hw);
4038 
4039 	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4040 		synchronize_irq(pf->msix_entries[0].vector);
4041 		free_irq(pf->msix_entries[0].vector, pf);
4042 		clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4043 	}
4044 }
4045 
4046 /**
4047  * i40e_intr - MSI/Legacy and non-queue interrupt handler
4048  * @irq: interrupt number
4049  * @data: pointer to a q_vector
4050  *
4051  * This is the handler used for all MSI/Legacy interrupts, and deals
4052  * with both queue and non-queue interrupts.  This is also used in
4053  * MSIX mode to handle the non-queue interrupts.
4054  **/
4055 static irqreturn_t i40e_intr(int irq, void *data)
4056 {
4057 	struct i40e_pf *pf = (struct i40e_pf *)data;
4058 	struct i40e_hw *hw = &pf->hw;
4059 	irqreturn_t ret = IRQ_NONE;
4060 	u32 icr0, icr0_remaining;
4061 	u32 val, ena_mask;
4062 
4063 	icr0 = rd32(hw, I40E_PFINT_ICR0);
4064 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4065 
4066 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
4067 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4068 		goto enable_intr;
4069 
4070 	/* if interrupt but no bits showing, must be SWINT */
4071 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4072 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4073 		pf->sw_int_count++;
4074 
4075 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4076 	    (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4077 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4078 		dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4079 		set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4080 	}
4081 
4082 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4083 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4084 		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4085 		struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4086 
4087 		/* We do not have a way to disarm Queue causes while leaving
4088 		 * interrupt enabled for all other causes, ideally
4089 		 * interrupt should be disabled while we are in NAPI but
4090 		 * this is not a performance path and napi_schedule()
4091 		 * can deal with rescheduling.
4092 		 */
4093 		if (!test_bit(__I40E_DOWN, pf->state))
4094 			napi_schedule_irqoff(&q_vector->napi);
4095 	}
4096 
4097 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4098 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4099 		set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4100 		i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4101 	}
4102 
4103 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4104 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4105 		set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4106 	}
4107 
4108 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4109 		/* disable any further VFLR event notifications */
4110 		if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4111 			u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4112 
4113 			reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4114 			wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4115 		} else {
4116 			ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4117 			set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4118 		}
4119 	}
4120 
4121 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4122 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4123 			set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4124 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4125 		val = rd32(hw, I40E_GLGEN_RSTAT);
4126 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4127 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4128 		if (val == I40E_RESET_CORER) {
4129 			pf->corer_count++;
4130 		} else if (val == I40E_RESET_GLOBR) {
4131 			pf->globr_count++;
4132 		} else if (val == I40E_RESET_EMPR) {
4133 			pf->empr_count++;
4134 			set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4135 		}
4136 	}
4137 
4138 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4139 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4140 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4141 		dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4142 			 rd32(hw, I40E_PFHMC_ERRORINFO),
4143 			 rd32(hw, I40E_PFHMC_ERRORDATA));
4144 	}
4145 
4146 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4147 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4148 
4149 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4150 			schedule_work(&pf->ptp_extts0_work);
4151 
4152 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4153 			i40e_ptp_tx_hwtstamp(pf);
4154 
4155 		icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4156 	}
4157 
4158 	/* If a critical error is pending we have no choice but to reset the
4159 	 * device.
4160 	 * Report and mask out any remaining unexpected interrupts.
4161 	 */
4162 	icr0_remaining = icr0 & ena_mask;
4163 	if (icr0_remaining) {
4164 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4165 			 icr0_remaining);
4166 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4167 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4168 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4169 			dev_info(&pf->pdev->dev, "device will be reset\n");
4170 			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4171 			i40e_service_event_schedule(pf);
4172 		}
4173 		ena_mask &= ~icr0_remaining;
4174 	}
4175 	ret = IRQ_HANDLED;
4176 
4177 enable_intr:
4178 	/* re-enable interrupt causes */
4179 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4180 	if (!test_bit(__I40E_DOWN, pf->state) ||
4181 	    test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4182 		i40e_service_event_schedule(pf);
4183 		i40e_irq_dynamic_enable_icr0(pf);
4184 	}
4185 
4186 	return ret;
4187 }
4188 
4189 /**
4190  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4191  * @tx_ring:  tx ring to clean
4192  * @budget:   how many cleans we're allowed
4193  *
4194  * Returns true if there's any budget left (e.g. the clean is finished)
4195  **/
4196 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4197 {
4198 	struct i40e_vsi *vsi = tx_ring->vsi;
4199 	u16 i = tx_ring->next_to_clean;
4200 	struct i40e_tx_buffer *tx_buf;
4201 	struct i40e_tx_desc *tx_desc;
4202 
4203 	tx_buf = &tx_ring->tx_bi[i];
4204 	tx_desc = I40E_TX_DESC(tx_ring, i);
4205 	i -= tx_ring->count;
4206 
4207 	do {
4208 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4209 
4210 		/* if next_to_watch is not set then there is no work pending */
4211 		if (!eop_desc)
4212 			break;
4213 
4214 		/* prevent any other reads prior to eop_desc */
4215 		smp_rmb();
4216 
4217 		/* if the descriptor isn't done, no work yet to do */
4218 		if (!(eop_desc->cmd_type_offset_bsz &
4219 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4220 			break;
4221 
4222 		/* clear next_to_watch to prevent false hangs */
4223 		tx_buf->next_to_watch = NULL;
4224 
4225 		tx_desc->buffer_addr = 0;
4226 		tx_desc->cmd_type_offset_bsz = 0;
4227 		/* move past filter desc */
4228 		tx_buf++;
4229 		tx_desc++;
4230 		i++;
4231 		if (unlikely(!i)) {
4232 			i -= tx_ring->count;
4233 			tx_buf = tx_ring->tx_bi;
4234 			tx_desc = I40E_TX_DESC(tx_ring, 0);
4235 		}
4236 		/* unmap skb header data */
4237 		dma_unmap_single(tx_ring->dev,
4238 				 dma_unmap_addr(tx_buf, dma),
4239 				 dma_unmap_len(tx_buf, len),
4240 				 DMA_TO_DEVICE);
4241 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4242 			kfree(tx_buf->raw_buf);
4243 
4244 		tx_buf->raw_buf = NULL;
4245 		tx_buf->tx_flags = 0;
4246 		tx_buf->next_to_watch = NULL;
4247 		dma_unmap_len_set(tx_buf, len, 0);
4248 		tx_desc->buffer_addr = 0;
4249 		tx_desc->cmd_type_offset_bsz = 0;
4250 
4251 		/* move us past the eop_desc for start of next FD desc */
4252 		tx_buf++;
4253 		tx_desc++;
4254 		i++;
4255 		if (unlikely(!i)) {
4256 			i -= tx_ring->count;
4257 			tx_buf = tx_ring->tx_bi;
4258 			tx_desc = I40E_TX_DESC(tx_ring, 0);
4259 		}
4260 
4261 		/* update budget accounting */
4262 		budget--;
4263 	} while (likely(budget));
4264 
4265 	i += tx_ring->count;
4266 	tx_ring->next_to_clean = i;
4267 
4268 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4269 		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4270 
4271 	return budget > 0;
4272 }
4273 
4274 /**
4275  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4276  * @irq: interrupt number
4277  * @data: pointer to a q_vector
4278  **/
4279 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4280 {
4281 	struct i40e_q_vector *q_vector = data;
4282 	struct i40e_vsi *vsi;
4283 
4284 	if (!q_vector->tx.ring)
4285 		return IRQ_HANDLED;
4286 
4287 	vsi = q_vector->tx.ring->vsi;
4288 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4289 
4290 	return IRQ_HANDLED;
4291 }
4292 
4293 /**
4294  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4295  * @vsi: the VSI being configured
4296  * @v_idx: vector index
4297  * @qp_idx: queue pair index
4298  **/
4299 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4300 {
4301 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4302 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4303 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4304 
4305 	tx_ring->q_vector = q_vector;
4306 	tx_ring->next = q_vector->tx.ring;
4307 	q_vector->tx.ring = tx_ring;
4308 	q_vector->tx.count++;
4309 
4310 	/* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4311 	if (i40e_enabled_xdp_vsi(vsi)) {
4312 		struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4313 
4314 		xdp_ring->q_vector = q_vector;
4315 		xdp_ring->next = q_vector->tx.ring;
4316 		q_vector->tx.ring = xdp_ring;
4317 		q_vector->tx.count++;
4318 	}
4319 
4320 	rx_ring->q_vector = q_vector;
4321 	rx_ring->next = q_vector->rx.ring;
4322 	q_vector->rx.ring = rx_ring;
4323 	q_vector->rx.count++;
4324 }
4325 
4326 /**
4327  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4328  * @vsi: the VSI being configured
4329  *
4330  * This function maps descriptor rings to the queue-specific vectors
4331  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
4332  * one vector per queue pair, but on a constrained vector budget, we
4333  * group the queue pairs as "efficiently" as possible.
4334  **/
4335 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4336 {
4337 	int qp_remaining = vsi->num_queue_pairs;
4338 	int q_vectors = vsi->num_q_vectors;
4339 	int num_ringpairs;
4340 	int v_start = 0;
4341 	int qp_idx = 0;
4342 
4343 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4344 	 * group them so there are multiple queues per vector.
4345 	 * It is also important to go through all the vectors available to be
4346 	 * sure that if we don't use all the vectors, that the remaining vectors
4347 	 * are cleared. This is especially important when decreasing the
4348 	 * number of queues in use.
4349 	 */
4350 	for (; v_start < q_vectors; v_start++) {
4351 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4352 
4353 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4354 
4355 		q_vector->num_ringpairs = num_ringpairs;
4356 		q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4357 
4358 		q_vector->rx.count = 0;
4359 		q_vector->tx.count = 0;
4360 		q_vector->rx.ring = NULL;
4361 		q_vector->tx.ring = NULL;
4362 
4363 		while (num_ringpairs--) {
4364 			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4365 			qp_idx++;
4366 			qp_remaining--;
4367 		}
4368 	}
4369 }
4370 
4371 /**
4372  * i40e_vsi_request_irq - Request IRQ from the OS
4373  * @vsi: the VSI being configured
4374  * @basename: name for the vector
4375  **/
4376 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4377 {
4378 	struct i40e_pf *pf = vsi->back;
4379 	int err;
4380 
4381 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4382 		err = i40e_vsi_request_irq_msix(vsi, basename);
4383 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4384 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
4385 				  pf->int_name, pf);
4386 	else
4387 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4388 				  pf->int_name, pf);
4389 
4390 	if (err)
4391 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4392 
4393 	return err;
4394 }
4395 
4396 #ifdef CONFIG_NET_POLL_CONTROLLER
4397 /**
4398  * i40e_netpoll - A Polling 'interrupt' handler
4399  * @netdev: network interface device structure
4400  *
4401  * This is used by netconsole to send skbs without having to re-enable
4402  * interrupts.  It's not called while the normal interrupt routine is executing.
4403  **/
4404 static void i40e_netpoll(struct net_device *netdev)
4405 {
4406 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4407 	struct i40e_vsi *vsi = np->vsi;
4408 	struct i40e_pf *pf = vsi->back;
4409 	int i;
4410 
4411 	/* if interface is down do nothing */
4412 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
4413 		return;
4414 
4415 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4416 		for (i = 0; i < vsi->num_q_vectors; i++)
4417 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4418 	} else {
4419 		i40e_intr(pf->pdev->irq, netdev);
4420 	}
4421 }
4422 #endif
4423 
4424 #define I40E_QTX_ENA_WAIT_COUNT 50
4425 
4426 /**
4427  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4428  * @pf: the PF being configured
4429  * @pf_q: the PF queue
4430  * @enable: enable or disable state of the queue
4431  *
4432  * This routine will wait for the given Tx queue of the PF to reach the
4433  * enabled or disabled state.
4434  * Returns -ETIMEDOUT in case of failing to reach the requested state after
4435  * multiple retries; else will return 0 in case of success.
4436  **/
4437 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4438 {
4439 	int i;
4440 	u32 tx_reg;
4441 
4442 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4443 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4444 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4445 			break;
4446 
4447 		usleep_range(10, 20);
4448 	}
4449 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4450 		return -ETIMEDOUT;
4451 
4452 	return 0;
4453 }
4454 
4455 /**
4456  * i40e_control_tx_q - Start or stop a particular Tx queue
4457  * @pf: the PF structure
4458  * @pf_q: the PF queue to configure
4459  * @enable: start or stop the queue
4460  *
4461  * This function enables or disables a single queue. Note that any delay
4462  * required after the operation is expected to be handled by the caller of
4463  * this function.
4464  **/
4465 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4466 {
4467 	struct i40e_hw *hw = &pf->hw;
4468 	u32 tx_reg;
4469 	int i;
4470 
4471 	/* warn the TX unit of coming changes */
4472 	i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4473 	if (!enable)
4474 		usleep_range(10, 20);
4475 
4476 	for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4477 		tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4478 		if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4479 		    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4480 			break;
4481 		usleep_range(1000, 2000);
4482 	}
4483 
4484 	/* Skip if the queue is already in the requested state */
4485 	if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4486 		return;
4487 
4488 	/* turn on/off the queue */
4489 	if (enable) {
4490 		wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4491 		tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4492 	} else {
4493 		tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4494 	}
4495 
4496 	wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4497 }
4498 
4499 /**
4500  * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4501  * @seid: VSI SEID
4502  * @pf: the PF structure
4503  * @pf_q: the PF queue to configure
4504  * @is_xdp: true if the queue is used for XDP
4505  * @enable: start or stop the queue
4506  **/
4507 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4508 			   bool is_xdp, bool enable)
4509 {
4510 	int ret;
4511 
4512 	i40e_control_tx_q(pf, pf_q, enable);
4513 
4514 	/* wait for the change to finish */
4515 	ret = i40e_pf_txq_wait(pf, pf_q, enable);
4516 	if (ret) {
4517 		dev_info(&pf->pdev->dev,
4518 			 "VSI seid %d %sTx ring %d %sable timeout\n",
4519 			 seid, (is_xdp ? "XDP " : ""), pf_q,
4520 			 (enable ? "en" : "dis"));
4521 	}
4522 
4523 	return ret;
4524 }
4525 
4526 /**
4527  * i40e_vsi_enable_tx - Start a VSI's rings
4528  * @vsi: the VSI being configured
4529  **/
4530 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4531 {
4532 	struct i40e_pf *pf = vsi->back;
4533 	int i, pf_q, ret = 0;
4534 
4535 	pf_q = vsi->base_queue;
4536 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4537 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
4538 					     pf_q,
4539 					     false /*is xdp*/, true);
4540 		if (ret)
4541 			break;
4542 
4543 		if (!i40e_enabled_xdp_vsi(vsi))
4544 			continue;
4545 
4546 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
4547 					     pf_q + vsi->alloc_queue_pairs,
4548 					     true /*is xdp*/, true);
4549 		if (ret)
4550 			break;
4551 	}
4552 	return ret;
4553 }
4554 
4555 /**
4556  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4557  * @pf: the PF being configured
4558  * @pf_q: the PF queue
4559  * @enable: enable or disable state of the queue
4560  *
4561  * This routine will wait for the given Rx queue of the PF to reach the
4562  * enabled or disabled state.
4563  * Returns -ETIMEDOUT in case of failing to reach the requested state after
4564  * multiple retries; else will return 0 in case of success.
4565  **/
4566 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4567 {
4568 	int i;
4569 	u32 rx_reg;
4570 
4571 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4572 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4573 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4574 			break;
4575 
4576 		usleep_range(10, 20);
4577 	}
4578 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4579 		return -ETIMEDOUT;
4580 
4581 	return 0;
4582 }
4583 
4584 /**
4585  * i40e_control_rx_q - Start or stop a particular Rx queue
4586  * @pf: the PF structure
4587  * @pf_q: the PF queue to configure
4588  * @enable: start or stop the queue
4589  *
4590  * This function enables or disables a single queue. Note that
4591  * any delay required after the operation is expected to be
4592  * handled by the caller of this function.
4593  **/
4594 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4595 {
4596 	struct i40e_hw *hw = &pf->hw;
4597 	u32 rx_reg;
4598 	int i;
4599 
4600 	for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4601 		rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4602 		if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4603 		    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4604 			break;
4605 		usleep_range(1000, 2000);
4606 	}
4607 
4608 	/* Skip if the queue is already in the requested state */
4609 	if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4610 		return;
4611 
4612 	/* turn on/off the queue */
4613 	if (enable)
4614 		rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4615 	else
4616 		rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4617 
4618 	wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4619 }
4620 
4621 /**
4622  * i40e_control_wait_rx_q
4623  * @pf: the PF structure
4624  * @pf_q: queue being configured
4625  * @enable: start or stop the rings
4626  *
4627  * This function enables or disables a single queue along with waiting
4628  * for the change to finish. The caller of this function should handle
4629  * the delays needed in the case of disabling queues.
4630  **/
4631 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4632 {
4633 	int ret = 0;
4634 
4635 	i40e_control_rx_q(pf, pf_q, enable);
4636 
4637 	/* wait for the change to finish */
4638 	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4639 	if (ret)
4640 		return ret;
4641 
4642 	return ret;
4643 }
4644 
4645 /**
4646  * i40e_vsi_enable_rx - Start a VSI's rings
4647  * @vsi: the VSI being configured
4648  **/
4649 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4650 {
4651 	struct i40e_pf *pf = vsi->back;
4652 	int i, pf_q, ret = 0;
4653 
4654 	pf_q = vsi->base_queue;
4655 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4656 		ret = i40e_control_wait_rx_q(pf, pf_q, true);
4657 		if (ret) {
4658 			dev_info(&pf->pdev->dev,
4659 				 "VSI seid %d Rx ring %d enable timeout\n",
4660 				 vsi->seid, pf_q);
4661 			break;
4662 		}
4663 	}
4664 
4665 	return ret;
4666 }
4667 
4668 /**
4669  * i40e_vsi_start_rings - Start a VSI's rings
4670  * @vsi: the VSI being configured
4671  **/
4672 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4673 {
4674 	int ret = 0;
4675 
4676 	/* do rx first for enable and last for disable */
4677 	ret = i40e_vsi_enable_rx(vsi);
4678 	if (ret)
4679 		return ret;
4680 	ret = i40e_vsi_enable_tx(vsi);
4681 
4682 	return ret;
4683 }
4684 
4685 #define I40E_DISABLE_TX_GAP_MSEC	50
4686 
4687 /**
4688  * i40e_vsi_stop_rings - Stop a VSI's rings
4689  * @vsi: the VSI being configured
4690  **/
4691 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4692 {
4693 	struct i40e_pf *pf = vsi->back;
4694 	int pf_q, err, q_end;
4695 
4696 	/* When port TX is suspended, don't wait */
4697 	if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4698 		return i40e_vsi_stop_rings_no_wait(vsi);
4699 
4700 	q_end = vsi->base_queue + vsi->num_queue_pairs;
4701 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4702 		i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4703 
4704 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4705 		err = i40e_control_wait_rx_q(pf, pf_q, false);
4706 		if (err)
4707 			dev_info(&pf->pdev->dev,
4708 				 "VSI seid %d Rx ring %d disable timeout\n",
4709 				 vsi->seid, pf_q);
4710 	}
4711 
4712 	msleep(I40E_DISABLE_TX_GAP_MSEC);
4713 	pf_q = vsi->base_queue;
4714 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4715 		wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4716 
4717 	i40e_vsi_wait_queues_disabled(vsi);
4718 }
4719 
4720 /**
4721  * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4722  * @vsi: the VSI being shutdown
4723  *
4724  * This function stops all the rings for a VSI but does not delay to verify
4725  * that rings have been disabled. It is expected that the caller is shutting
4726  * down multiple VSIs at once and will delay together for all the VSIs after
4727  * initiating the shutdown. This is particularly useful for shutting down lots
4728  * of VFs together. Otherwise, a large delay can be incurred while configuring
4729  * each VSI in serial.
4730  **/
4731 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4732 {
4733 	struct i40e_pf *pf = vsi->back;
4734 	int i, pf_q;
4735 
4736 	pf_q = vsi->base_queue;
4737 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4738 		i40e_control_tx_q(pf, pf_q, false);
4739 		i40e_control_rx_q(pf, pf_q, false);
4740 	}
4741 }
4742 
4743 /**
4744  * i40e_vsi_free_irq - Free the irq association with the OS
4745  * @vsi: the VSI being configured
4746  **/
4747 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4748 {
4749 	struct i40e_pf *pf = vsi->back;
4750 	struct i40e_hw *hw = &pf->hw;
4751 	int base = vsi->base_vector;
4752 	u32 val, qp;
4753 	int i;
4754 
4755 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4756 		if (!vsi->q_vectors)
4757 			return;
4758 
4759 		if (!vsi->irqs_ready)
4760 			return;
4761 
4762 		vsi->irqs_ready = false;
4763 		for (i = 0; i < vsi->num_q_vectors; i++) {
4764 			int irq_num;
4765 			u16 vector;
4766 
4767 			vector = i + base;
4768 			irq_num = pf->msix_entries[vector].vector;
4769 
4770 			/* free only the irqs that were actually requested */
4771 			if (!vsi->q_vectors[i] ||
4772 			    !vsi->q_vectors[i]->num_ringpairs)
4773 				continue;
4774 
4775 			/* clear the affinity notifier in the IRQ descriptor */
4776 			irq_set_affinity_notifier(irq_num, NULL);
4777 			/* remove our suggested affinity mask for this IRQ */
4778 			irq_update_affinity_hint(irq_num, NULL);
4779 			synchronize_irq(irq_num);
4780 			free_irq(irq_num, vsi->q_vectors[i]);
4781 
4782 			/* Tear down the interrupt queue link list
4783 			 *
4784 			 * We know that they come in pairs and always
4785 			 * the Rx first, then the Tx.  To clear the
4786 			 * link list, stick the EOL value into the
4787 			 * next_q field of the registers.
4788 			 */
4789 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4790 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4791 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4792 			val |= I40E_QUEUE_END_OF_LIST
4793 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4794 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4795 
4796 			while (qp != I40E_QUEUE_END_OF_LIST) {
4797 				u32 next;
4798 
4799 				val = rd32(hw, I40E_QINT_RQCTL(qp));
4800 
4801 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4802 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4803 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4804 					 I40E_QINT_RQCTL_INTEVENT_MASK);
4805 
4806 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4807 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4808 
4809 				wr32(hw, I40E_QINT_RQCTL(qp), val);
4810 
4811 				val = rd32(hw, I40E_QINT_TQCTL(qp));
4812 
4813 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4814 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4815 
4816 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4817 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4818 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4819 					 I40E_QINT_TQCTL_INTEVENT_MASK);
4820 
4821 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4822 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4823 
4824 				wr32(hw, I40E_QINT_TQCTL(qp), val);
4825 				qp = next;
4826 			}
4827 		}
4828 	} else {
4829 		free_irq(pf->pdev->irq, pf);
4830 
4831 		val = rd32(hw, I40E_PFINT_LNKLST0);
4832 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4833 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4834 		val |= I40E_QUEUE_END_OF_LIST
4835 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4836 		wr32(hw, I40E_PFINT_LNKLST0, val);
4837 
4838 		val = rd32(hw, I40E_QINT_RQCTL(qp));
4839 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4840 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4841 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4842 			 I40E_QINT_RQCTL_INTEVENT_MASK);
4843 
4844 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4845 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4846 
4847 		wr32(hw, I40E_QINT_RQCTL(qp), val);
4848 
4849 		val = rd32(hw, I40E_QINT_TQCTL(qp));
4850 
4851 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4852 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4853 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4854 			 I40E_QINT_TQCTL_INTEVENT_MASK);
4855 
4856 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4857 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4858 
4859 		wr32(hw, I40E_QINT_TQCTL(qp), val);
4860 	}
4861 }
4862 
4863 /**
4864  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4865  * @vsi: the VSI being configured
4866  * @v_idx: Index of vector to be freed
4867  *
4868  * This function frees the memory allocated to the q_vector.  In addition if
4869  * NAPI is enabled it will delete any references to the NAPI struct prior
4870  * to freeing the q_vector.
4871  **/
4872 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4873 {
4874 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4875 	struct i40e_ring *ring;
4876 
4877 	if (!q_vector)
4878 		return;
4879 
4880 	/* disassociate q_vector from rings */
4881 	i40e_for_each_ring(ring, q_vector->tx)
4882 		ring->q_vector = NULL;
4883 
4884 	i40e_for_each_ring(ring, q_vector->rx)
4885 		ring->q_vector = NULL;
4886 
4887 	/* only VSI w/ an associated netdev is set up w/ NAPI */
4888 	if (vsi->netdev)
4889 		netif_napi_del(&q_vector->napi);
4890 
4891 	vsi->q_vectors[v_idx] = NULL;
4892 
4893 	kfree_rcu(q_vector, rcu);
4894 }
4895 
4896 /**
4897  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4898  * @vsi: the VSI being un-configured
4899  *
4900  * This frees the memory allocated to the q_vectors and
4901  * deletes references to the NAPI struct.
4902  **/
4903 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4904 {
4905 	int v_idx;
4906 
4907 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4908 		i40e_free_q_vector(vsi, v_idx);
4909 }
4910 
4911 /**
4912  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4913  * @pf: board private structure
4914  **/
4915 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4916 {
4917 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4918 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4919 		pci_disable_msix(pf->pdev);
4920 		kfree(pf->msix_entries);
4921 		pf->msix_entries = NULL;
4922 		kfree(pf->irq_pile);
4923 		pf->irq_pile = NULL;
4924 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4925 		pci_disable_msi(pf->pdev);
4926 	}
4927 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4928 }
4929 
4930 /**
4931  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4932  * @pf: board private structure
4933  *
4934  * We go through and clear interrupt specific resources and reset the structure
4935  * to pre-load conditions
4936  **/
4937 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4938 {
4939 	int i;
4940 
4941 	if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
4942 		i40e_free_misc_vector(pf);
4943 
4944 	i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4945 		      I40E_IWARP_IRQ_PILE_ID);
4946 
4947 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4948 	for (i = 0; i < pf->num_alloc_vsi; i++)
4949 		if (pf->vsi[i])
4950 			i40e_vsi_free_q_vectors(pf->vsi[i]);
4951 	i40e_reset_interrupt_capability(pf);
4952 }
4953 
4954 /**
4955  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4956  * @vsi: the VSI being configured
4957  **/
4958 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4959 {
4960 	int q_idx;
4961 
4962 	if (!vsi->netdev)
4963 		return;
4964 
4965 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4966 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4967 
4968 		if (q_vector->rx.ring || q_vector->tx.ring)
4969 			napi_enable(&q_vector->napi);
4970 	}
4971 }
4972 
4973 /**
4974  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4975  * @vsi: the VSI being configured
4976  **/
4977 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4978 {
4979 	int q_idx;
4980 
4981 	if (!vsi->netdev)
4982 		return;
4983 
4984 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4985 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4986 
4987 		if (q_vector->rx.ring || q_vector->tx.ring)
4988 			napi_disable(&q_vector->napi);
4989 	}
4990 }
4991 
4992 /**
4993  * i40e_vsi_close - Shut down a VSI
4994  * @vsi: the vsi to be quelled
4995  **/
4996 static void i40e_vsi_close(struct i40e_vsi *vsi)
4997 {
4998 	struct i40e_pf *pf = vsi->back;
4999 	if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5000 		i40e_down(vsi);
5001 	i40e_vsi_free_irq(vsi);
5002 	i40e_vsi_free_tx_resources(vsi);
5003 	i40e_vsi_free_rx_resources(vsi);
5004 	vsi->current_netdev_flags = 0;
5005 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5006 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5007 		set_bit(__I40E_CLIENT_RESET, pf->state);
5008 }
5009 
5010 /**
5011  * i40e_quiesce_vsi - Pause a given VSI
5012  * @vsi: the VSI being paused
5013  **/
5014 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5015 {
5016 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
5017 		return;
5018 
5019 	set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5020 	if (vsi->netdev && netif_running(vsi->netdev))
5021 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5022 	else
5023 		i40e_vsi_close(vsi);
5024 }
5025 
5026 /**
5027  * i40e_unquiesce_vsi - Resume a given VSI
5028  * @vsi: the VSI being resumed
5029  **/
5030 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5031 {
5032 	if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5033 		return;
5034 
5035 	if (vsi->netdev && netif_running(vsi->netdev))
5036 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5037 	else
5038 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
5039 }
5040 
5041 /**
5042  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5043  * @pf: the PF
5044  **/
5045 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5046 {
5047 	int v;
5048 
5049 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5050 		if (pf->vsi[v])
5051 			i40e_quiesce_vsi(pf->vsi[v]);
5052 	}
5053 }
5054 
5055 /**
5056  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5057  * @pf: the PF
5058  **/
5059 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5060 {
5061 	int v;
5062 
5063 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5064 		if (pf->vsi[v])
5065 			i40e_unquiesce_vsi(pf->vsi[v]);
5066 	}
5067 }
5068 
5069 /**
5070  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5071  * @vsi: the VSI being configured
5072  *
5073  * Wait until all queues on a given VSI have been disabled.
5074  **/
5075 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5076 {
5077 	struct i40e_pf *pf = vsi->back;
5078 	int i, pf_q, ret;
5079 
5080 	pf_q = vsi->base_queue;
5081 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5082 		/* Check and wait for the Tx queue */
5083 		ret = i40e_pf_txq_wait(pf, pf_q, false);
5084 		if (ret) {
5085 			dev_info(&pf->pdev->dev,
5086 				 "VSI seid %d Tx ring %d disable timeout\n",
5087 				 vsi->seid, pf_q);
5088 			return ret;
5089 		}
5090 
5091 		if (!i40e_enabled_xdp_vsi(vsi))
5092 			goto wait_rx;
5093 
5094 		/* Check and wait for the XDP Tx queue */
5095 		ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5096 				       false);
5097 		if (ret) {
5098 			dev_info(&pf->pdev->dev,
5099 				 "VSI seid %d XDP Tx ring %d disable timeout\n",
5100 				 vsi->seid, pf_q);
5101 			return ret;
5102 		}
5103 wait_rx:
5104 		/* Check and wait for the Rx queue */
5105 		ret = i40e_pf_rxq_wait(pf, pf_q, false);
5106 		if (ret) {
5107 			dev_info(&pf->pdev->dev,
5108 				 "VSI seid %d Rx ring %d disable timeout\n",
5109 				 vsi->seid, pf_q);
5110 			return ret;
5111 		}
5112 	}
5113 
5114 	return 0;
5115 }
5116 
5117 #ifdef CONFIG_I40E_DCB
5118 /**
5119  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5120  * @pf: the PF
5121  *
5122  * This function waits for the queues to be in disabled state for all the
5123  * VSIs that are managed by this PF.
5124  **/
5125 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5126 {
5127 	int v, ret = 0;
5128 
5129 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5130 		if (pf->vsi[v]) {
5131 			ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5132 			if (ret)
5133 				break;
5134 		}
5135 	}
5136 
5137 	return ret;
5138 }
5139 
5140 #endif
5141 
5142 /**
5143  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5144  * @pf: pointer to PF
5145  *
5146  * Get TC map for ISCSI PF type that will include iSCSI TC
5147  * and LAN TC.
5148  **/
5149 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5150 {
5151 	struct i40e_dcb_app_priority_table app;
5152 	struct i40e_hw *hw = &pf->hw;
5153 	u8 enabled_tc = 1; /* TC0 is always enabled */
5154 	u8 tc, i;
5155 	/* Get the iSCSI APP TLV */
5156 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5157 
5158 	for (i = 0; i < dcbcfg->numapps; i++) {
5159 		app = dcbcfg->app[i];
5160 		if (app.selector == I40E_APP_SEL_TCPIP &&
5161 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
5162 			tc = dcbcfg->etscfg.prioritytable[app.priority];
5163 			enabled_tc |= BIT(tc);
5164 			break;
5165 		}
5166 	}
5167 
5168 	return enabled_tc;
5169 }
5170 
5171 /**
5172  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
5173  * @dcbcfg: the corresponding DCBx configuration structure
5174  *
5175  * Return the number of TCs from given DCBx configuration
5176  **/
5177 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5178 {
5179 	int i, tc_unused = 0;
5180 	u8 num_tc = 0;
5181 	u8 ret = 0;
5182 
5183 	/* Scan the ETS Config Priority Table to find
5184 	 * traffic class enabled for a given priority
5185 	 * and create a bitmask of enabled TCs
5186 	 */
5187 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5188 		num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5189 
5190 	/* Now scan the bitmask to check for
5191 	 * contiguous TCs starting with TC0
5192 	 */
5193 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5194 		if (num_tc & BIT(i)) {
5195 			if (!tc_unused) {
5196 				ret++;
5197 			} else {
5198 				pr_err("Non-contiguous TC - Disabling DCB\n");
5199 				return 1;
5200 			}
5201 		} else {
5202 			tc_unused = 1;
5203 		}
5204 	}
5205 
5206 	/* There is always at least TC0 */
5207 	if (!ret)
5208 		ret = 1;
5209 
5210 	return ret;
5211 }
5212 
5213 /**
5214  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5215  * @dcbcfg: the corresponding DCBx configuration structure
5216  *
5217  * Query the current DCB configuration and return the number of
5218  * traffic classes enabled from the given DCBX config
5219  **/
5220 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5221 {
5222 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5223 	u8 enabled_tc = 1;
5224 	u8 i;
5225 
5226 	for (i = 0; i < num_tc; i++)
5227 		enabled_tc |= BIT(i);
5228 
5229 	return enabled_tc;
5230 }
5231 
5232 /**
5233  * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5234  * @pf: PF being queried
5235  *
5236  * Query the current MQPRIO configuration and return the number of
5237  * traffic classes enabled.
5238  **/
5239 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5240 {
5241 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5242 	u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5243 	u8 enabled_tc = 1, i;
5244 
5245 	for (i = 1; i < num_tc; i++)
5246 		enabled_tc |= BIT(i);
5247 	return enabled_tc;
5248 }
5249 
5250 /**
5251  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5252  * @pf: PF being queried
5253  *
5254  * Return number of traffic classes enabled for the given PF
5255  **/
5256 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5257 {
5258 	struct i40e_hw *hw = &pf->hw;
5259 	u8 i, enabled_tc = 1;
5260 	u8 num_tc = 0;
5261 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5262 
5263 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5264 		return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5265 
5266 	/* If neither MQPRIO nor DCB is enabled, then always use single TC */
5267 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5268 		return 1;
5269 
5270 	/* SFP mode will be enabled for all TCs on port */
5271 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5272 		return i40e_dcb_get_num_tc(dcbcfg);
5273 
5274 	/* MFP mode return count of enabled TCs for this PF */
5275 	if (pf->hw.func_caps.iscsi)
5276 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
5277 	else
5278 		return 1; /* Only TC0 */
5279 
5280 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5281 		if (enabled_tc & BIT(i))
5282 			num_tc++;
5283 	}
5284 	return num_tc;
5285 }
5286 
5287 /**
5288  * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5289  * @pf: PF being queried
5290  *
5291  * Return a bitmap for enabled traffic classes for this PF.
5292  **/
5293 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5294 {
5295 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5296 		return i40e_mqprio_get_enabled_tc(pf);
5297 
5298 	/* If neither MQPRIO nor DCB is enabled for this PF then just return
5299 	 * default TC
5300 	 */
5301 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5302 		return I40E_DEFAULT_TRAFFIC_CLASS;
5303 
5304 	/* SFP mode we want PF to be enabled for all TCs */
5305 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5306 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5307 
5308 	/* MFP enabled and iSCSI PF type */
5309 	if (pf->hw.func_caps.iscsi)
5310 		return i40e_get_iscsi_tc_map(pf);
5311 	else
5312 		return I40E_DEFAULT_TRAFFIC_CLASS;
5313 }
5314 
5315 /**
5316  * i40e_vsi_get_bw_info - Query VSI BW Information
5317  * @vsi: the VSI being queried
5318  *
5319  * Returns 0 on success, negative value on failure
5320  **/
5321 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5322 {
5323 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5324 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5325 	struct i40e_pf *pf = vsi->back;
5326 	struct i40e_hw *hw = &pf->hw;
5327 	i40e_status ret;
5328 	u32 tc_bw_max;
5329 	int i;
5330 
5331 	/* Get the VSI level BW configuration */
5332 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5333 	if (ret) {
5334 		dev_info(&pf->pdev->dev,
5335 			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5336 			 i40e_stat_str(&pf->hw, ret),
5337 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5338 		return -EINVAL;
5339 	}
5340 
5341 	/* Get the VSI level BW configuration per TC */
5342 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5343 					       NULL);
5344 	if (ret) {
5345 		dev_info(&pf->pdev->dev,
5346 			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5347 			 i40e_stat_str(&pf->hw, ret),
5348 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5349 		return -EINVAL;
5350 	}
5351 
5352 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5353 		dev_info(&pf->pdev->dev,
5354 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5355 			 bw_config.tc_valid_bits,
5356 			 bw_ets_config.tc_valid_bits);
5357 		/* Still continuing */
5358 	}
5359 
5360 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5361 	vsi->bw_max_quanta = bw_config.max_bw;
5362 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5363 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5364 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5365 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5366 		vsi->bw_ets_limit_credits[i] =
5367 					le16_to_cpu(bw_ets_config.credits[i]);
5368 		/* 3 bits out of 4 for each TC */
5369 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5370 	}
5371 
5372 	return 0;
5373 }
5374 
5375 /**
5376  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5377  * @vsi: the VSI being configured
5378  * @enabled_tc: TC bitmap
5379  * @bw_share: BW shared credits per TC
5380  *
5381  * Returns 0 on success, negative value on failure
5382  **/
5383 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5384 				       u8 *bw_share)
5385 {
5386 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5387 	struct i40e_pf *pf = vsi->back;
5388 	i40e_status ret;
5389 	int i;
5390 
5391 	/* There is no need to reset BW when mqprio mode is on.  */
5392 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5393 		return 0;
5394 	if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5395 		ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5396 		if (ret)
5397 			dev_info(&pf->pdev->dev,
5398 				 "Failed to reset tx rate for vsi->seid %u\n",
5399 				 vsi->seid);
5400 		return ret;
5401 	}
5402 	memset(&bw_data, 0, sizeof(bw_data));
5403 	bw_data.tc_valid_bits = enabled_tc;
5404 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5405 		bw_data.tc_bw_credits[i] = bw_share[i];
5406 
5407 	ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5408 	if (ret) {
5409 		dev_info(&pf->pdev->dev,
5410 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
5411 			 pf->hw.aq.asq_last_status);
5412 		return -EINVAL;
5413 	}
5414 
5415 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5416 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5417 
5418 	return 0;
5419 }
5420 
5421 /**
5422  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5423  * @vsi: the VSI being configured
5424  * @enabled_tc: TC map to be enabled
5425  *
5426  **/
5427 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5428 {
5429 	struct net_device *netdev = vsi->netdev;
5430 	struct i40e_pf *pf = vsi->back;
5431 	struct i40e_hw *hw = &pf->hw;
5432 	u8 netdev_tc = 0;
5433 	int i;
5434 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5435 
5436 	if (!netdev)
5437 		return;
5438 
5439 	if (!enabled_tc) {
5440 		netdev_reset_tc(netdev);
5441 		return;
5442 	}
5443 
5444 	/* Set up actual enabled TCs on the VSI */
5445 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5446 		return;
5447 
5448 	/* set per TC queues for the VSI */
5449 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5450 		/* Only set TC queues for enabled tcs
5451 		 *
5452 		 * e.g. For a VSI that has TC0 and TC3 enabled the
5453 		 * enabled_tc bitmap would be 0x00001001; the driver
5454 		 * will set the numtc for netdev as 2 that will be
5455 		 * referenced by the netdev layer as TC 0 and 1.
5456 		 */
5457 		if (vsi->tc_config.enabled_tc & BIT(i))
5458 			netdev_set_tc_queue(netdev,
5459 					vsi->tc_config.tc_info[i].netdev_tc,
5460 					vsi->tc_config.tc_info[i].qcount,
5461 					vsi->tc_config.tc_info[i].qoffset);
5462 	}
5463 
5464 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5465 		return;
5466 
5467 	/* Assign UP2TC map for the VSI */
5468 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5469 		/* Get the actual TC# for the UP */
5470 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5471 		/* Get the mapped netdev TC# for the UP */
5472 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
5473 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
5474 	}
5475 }
5476 
5477 /**
5478  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5479  * @vsi: the VSI being configured
5480  * @ctxt: the ctxt buffer returned from AQ VSI update param command
5481  **/
5482 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5483 				      struct i40e_vsi_context *ctxt)
5484 {
5485 	/* copy just the sections touched not the entire info
5486 	 * since not all sections are valid as returned by
5487 	 * update vsi params
5488 	 */
5489 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
5490 	memcpy(&vsi->info.queue_mapping,
5491 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5492 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5493 	       sizeof(vsi->info.tc_mapping));
5494 }
5495 
5496 /**
5497  * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5498  * @vsi: the VSI being reconfigured
5499  * @vsi_offset: offset from main VF VSI
5500  */
5501 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5502 {
5503 	struct i40e_vsi_context ctxt = {};
5504 	struct i40e_pf *pf;
5505 	struct i40e_hw *hw;
5506 	int ret;
5507 
5508 	if (!vsi)
5509 		return I40E_ERR_PARAM;
5510 	pf = vsi->back;
5511 	hw = &pf->hw;
5512 
5513 	ctxt.seid = vsi->seid;
5514 	ctxt.pf_num = hw->pf_id;
5515 	ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5516 	ctxt.uplink_seid = vsi->uplink_seid;
5517 	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5518 	ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5519 	ctxt.info = vsi->info;
5520 
5521 	i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5522 				 false);
5523 	if (vsi->reconfig_rss) {
5524 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
5525 				      vsi->num_queue_pairs);
5526 		ret = i40e_vsi_config_rss(vsi);
5527 		if (ret) {
5528 			dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5529 			return ret;
5530 		}
5531 		vsi->reconfig_rss = false;
5532 	}
5533 
5534 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5535 	if (ret) {
5536 		dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5537 			 i40e_stat_str(hw, ret),
5538 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5539 		return ret;
5540 	}
5541 	/* update the local VSI info with updated queue map */
5542 	i40e_vsi_update_queue_map(vsi, &ctxt);
5543 	vsi->info.valid_sections = 0;
5544 
5545 	return ret;
5546 }
5547 
5548 /**
5549  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5550  * @vsi: VSI to be configured
5551  * @enabled_tc: TC bitmap
5552  *
5553  * This configures a particular VSI for TCs that are mapped to the
5554  * given TC bitmap. It uses default bandwidth share for TCs across
5555  * VSIs to configure TC for a particular VSI.
5556  *
5557  * NOTE:
5558  * It is expected that the VSI queues have been quisced before calling
5559  * this function.
5560  **/
5561 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5562 {
5563 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5564 	struct i40e_pf *pf = vsi->back;
5565 	struct i40e_hw *hw = &pf->hw;
5566 	struct i40e_vsi_context ctxt;
5567 	int ret = 0;
5568 	int i;
5569 
5570 	/* Check if enabled_tc is same as existing or new TCs */
5571 	if (vsi->tc_config.enabled_tc == enabled_tc &&
5572 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5573 		return ret;
5574 
5575 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
5576 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5577 		if (enabled_tc & BIT(i))
5578 			bw_share[i] = 1;
5579 	}
5580 
5581 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5582 	if (ret) {
5583 		struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5584 
5585 		dev_info(&pf->pdev->dev,
5586 			 "Failed configuring TC map %d for VSI %d\n",
5587 			 enabled_tc, vsi->seid);
5588 		ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5589 						  &bw_config, NULL);
5590 		if (ret) {
5591 			dev_info(&pf->pdev->dev,
5592 				 "Failed querying vsi bw info, err %s aq_err %s\n",
5593 				 i40e_stat_str(hw, ret),
5594 				 i40e_aq_str(hw, hw->aq.asq_last_status));
5595 			goto out;
5596 		}
5597 		if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5598 			u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5599 
5600 			if (!valid_tc)
5601 				valid_tc = bw_config.tc_valid_bits;
5602 			/* Always enable TC0, no matter what */
5603 			valid_tc |= 1;
5604 			dev_info(&pf->pdev->dev,
5605 				 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5606 				 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5607 			enabled_tc = valid_tc;
5608 		}
5609 
5610 		ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5611 		if (ret) {
5612 			dev_err(&pf->pdev->dev,
5613 				"Unable to  configure TC map %d for VSI %d\n",
5614 				enabled_tc, vsi->seid);
5615 			goto out;
5616 		}
5617 	}
5618 
5619 	/* Update Queue Pairs Mapping for currently enabled UPs */
5620 	ctxt.seid = vsi->seid;
5621 	ctxt.pf_num = vsi->back->hw.pf_id;
5622 	ctxt.vf_num = 0;
5623 	ctxt.uplink_seid = vsi->uplink_seid;
5624 	ctxt.info = vsi->info;
5625 	if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5626 		ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5627 		if (ret)
5628 			goto out;
5629 	} else {
5630 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5631 	}
5632 
5633 	/* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5634 	 * queues changed.
5635 	 */
5636 	if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5637 		vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5638 				      vsi->num_queue_pairs);
5639 		ret = i40e_vsi_config_rss(vsi);
5640 		if (ret) {
5641 			dev_info(&vsi->back->pdev->dev,
5642 				 "Failed to reconfig rss for num_queues\n");
5643 			return ret;
5644 		}
5645 		vsi->reconfig_rss = false;
5646 	}
5647 	if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5648 		ctxt.info.valid_sections |=
5649 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5650 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5651 	}
5652 
5653 	/* Update the VSI after updating the VSI queue-mapping
5654 	 * information
5655 	 */
5656 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5657 	if (ret) {
5658 		dev_info(&pf->pdev->dev,
5659 			 "Update vsi tc config failed, err %s aq_err %s\n",
5660 			 i40e_stat_str(hw, ret),
5661 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5662 		goto out;
5663 	}
5664 	/* update the local VSI info with updated queue map */
5665 	i40e_vsi_update_queue_map(vsi, &ctxt);
5666 	vsi->info.valid_sections = 0;
5667 
5668 	/* Update current VSI BW information */
5669 	ret = i40e_vsi_get_bw_info(vsi);
5670 	if (ret) {
5671 		dev_info(&pf->pdev->dev,
5672 			 "Failed updating vsi bw info, err %s aq_err %s\n",
5673 			 i40e_stat_str(hw, ret),
5674 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5675 		goto out;
5676 	}
5677 
5678 	/* Update the netdev TC setup */
5679 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5680 out:
5681 	return ret;
5682 }
5683 
5684 /**
5685  * i40e_get_link_speed - Returns link speed for the interface
5686  * @vsi: VSI to be configured
5687  *
5688  **/
5689 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5690 {
5691 	struct i40e_pf *pf = vsi->back;
5692 
5693 	switch (pf->hw.phy.link_info.link_speed) {
5694 	case I40E_LINK_SPEED_40GB:
5695 		return 40000;
5696 	case I40E_LINK_SPEED_25GB:
5697 		return 25000;
5698 	case I40E_LINK_SPEED_20GB:
5699 		return 20000;
5700 	case I40E_LINK_SPEED_10GB:
5701 		return 10000;
5702 	case I40E_LINK_SPEED_1GB:
5703 		return 1000;
5704 	default:
5705 		return -EINVAL;
5706 	}
5707 }
5708 
5709 /**
5710  * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5711  * @vsi: VSI to be configured
5712  * @seid: seid of the channel/VSI
5713  * @max_tx_rate: max TX rate to be configured as BW limit
5714  *
5715  * Helper function to set BW limit for a given VSI
5716  **/
5717 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5718 {
5719 	struct i40e_pf *pf = vsi->back;
5720 	u64 credits = 0;
5721 	int speed = 0;
5722 	int ret = 0;
5723 
5724 	speed = i40e_get_link_speed(vsi);
5725 	if (max_tx_rate > speed) {
5726 		dev_err(&pf->pdev->dev,
5727 			"Invalid max tx rate %llu specified for VSI seid %d.",
5728 			max_tx_rate, seid);
5729 		return -EINVAL;
5730 	}
5731 	if (max_tx_rate && max_tx_rate < 50) {
5732 		dev_warn(&pf->pdev->dev,
5733 			 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5734 		max_tx_rate = 50;
5735 	}
5736 
5737 	/* Tx rate credits are in values of 50Mbps, 0 is disabled */
5738 	credits = max_tx_rate;
5739 	do_div(credits, I40E_BW_CREDIT_DIVISOR);
5740 	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5741 					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5742 	if (ret)
5743 		dev_err(&pf->pdev->dev,
5744 			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5745 			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5746 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5747 	return ret;
5748 }
5749 
5750 /**
5751  * i40e_remove_queue_channels - Remove queue channels for the TCs
5752  * @vsi: VSI to be configured
5753  *
5754  * Remove queue channels for the TCs
5755  **/
5756 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5757 {
5758 	enum i40e_admin_queue_err last_aq_status;
5759 	struct i40e_cloud_filter *cfilter;
5760 	struct i40e_channel *ch, *ch_tmp;
5761 	struct i40e_pf *pf = vsi->back;
5762 	struct hlist_node *node;
5763 	int ret, i;
5764 
5765 	/* Reset rss size that was stored when reconfiguring rss for
5766 	 * channel VSIs with non-power-of-2 queue count.
5767 	 */
5768 	vsi->current_rss_size = 0;
5769 
5770 	/* perform cleanup for channels if they exist */
5771 	if (list_empty(&vsi->ch_list))
5772 		return;
5773 
5774 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5775 		struct i40e_vsi *p_vsi;
5776 
5777 		list_del(&ch->list);
5778 		p_vsi = ch->parent_vsi;
5779 		if (!p_vsi || !ch->initialized) {
5780 			kfree(ch);
5781 			continue;
5782 		}
5783 		/* Reset queue contexts */
5784 		for (i = 0; i < ch->num_queue_pairs; i++) {
5785 			struct i40e_ring *tx_ring, *rx_ring;
5786 			u16 pf_q;
5787 
5788 			pf_q = ch->base_queue + i;
5789 			tx_ring = vsi->tx_rings[pf_q];
5790 			tx_ring->ch = NULL;
5791 
5792 			rx_ring = vsi->rx_rings[pf_q];
5793 			rx_ring->ch = NULL;
5794 		}
5795 
5796 		/* Reset BW configured for this VSI via mqprio */
5797 		ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5798 		if (ret)
5799 			dev_info(&vsi->back->pdev->dev,
5800 				 "Failed to reset tx rate for ch->seid %u\n",
5801 				 ch->seid);
5802 
5803 		/* delete cloud filters associated with this channel */
5804 		hlist_for_each_entry_safe(cfilter, node,
5805 					  &pf->cloud_filter_list, cloud_node) {
5806 			if (cfilter->seid != ch->seid)
5807 				continue;
5808 
5809 			hash_del(&cfilter->cloud_node);
5810 			if (cfilter->dst_port)
5811 				ret = i40e_add_del_cloud_filter_big_buf(vsi,
5812 									cfilter,
5813 									false);
5814 			else
5815 				ret = i40e_add_del_cloud_filter(vsi, cfilter,
5816 								false);
5817 			last_aq_status = pf->hw.aq.asq_last_status;
5818 			if (ret)
5819 				dev_info(&pf->pdev->dev,
5820 					 "Failed to delete cloud filter, err %s aq_err %s\n",
5821 					 i40e_stat_str(&pf->hw, ret),
5822 					 i40e_aq_str(&pf->hw, last_aq_status));
5823 			kfree(cfilter);
5824 		}
5825 
5826 		/* delete VSI from FW */
5827 		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5828 					     NULL);
5829 		if (ret)
5830 			dev_err(&vsi->back->pdev->dev,
5831 				"unable to remove channel (%d) for parent VSI(%d)\n",
5832 				ch->seid, p_vsi->seid);
5833 		kfree(ch);
5834 	}
5835 	INIT_LIST_HEAD(&vsi->ch_list);
5836 }
5837 
5838 /**
5839  * i40e_get_max_queues_for_channel
5840  * @vsi: ptr to VSI to which channels are associated with
5841  *
5842  * Helper function which returns max value among the queue counts set on the
5843  * channels/TCs created.
5844  **/
5845 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5846 {
5847 	struct i40e_channel *ch, *ch_tmp;
5848 	int max = 0;
5849 
5850 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5851 		if (!ch->initialized)
5852 			continue;
5853 		if (ch->num_queue_pairs > max)
5854 			max = ch->num_queue_pairs;
5855 	}
5856 
5857 	return max;
5858 }
5859 
5860 /**
5861  * i40e_validate_num_queues - validate num_queues w.r.t channel
5862  * @pf: ptr to PF device
5863  * @num_queues: number of queues
5864  * @vsi: the parent VSI
5865  * @reconfig_rss: indicates should the RSS be reconfigured or not
5866  *
5867  * This function validates number of queues in the context of new channel
5868  * which is being established and determines if RSS should be reconfigured
5869  * or not for parent VSI.
5870  **/
5871 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5872 				    struct i40e_vsi *vsi, bool *reconfig_rss)
5873 {
5874 	int max_ch_queues;
5875 
5876 	if (!reconfig_rss)
5877 		return -EINVAL;
5878 
5879 	*reconfig_rss = false;
5880 	if (vsi->current_rss_size) {
5881 		if (num_queues > vsi->current_rss_size) {
5882 			dev_dbg(&pf->pdev->dev,
5883 				"Error: num_queues (%d) > vsi's current_size(%d)\n",
5884 				num_queues, vsi->current_rss_size);
5885 			return -EINVAL;
5886 		} else if ((num_queues < vsi->current_rss_size) &&
5887 			   (!is_power_of_2(num_queues))) {
5888 			dev_dbg(&pf->pdev->dev,
5889 				"Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5890 				num_queues, vsi->current_rss_size);
5891 			return -EINVAL;
5892 		}
5893 	}
5894 
5895 	if (!is_power_of_2(num_queues)) {
5896 		/* Find the max num_queues configured for channel if channel
5897 		 * exist.
5898 		 * if channel exist, then enforce 'num_queues' to be more than
5899 		 * max ever queues configured for channel.
5900 		 */
5901 		max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5902 		if (num_queues < max_ch_queues) {
5903 			dev_dbg(&pf->pdev->dev,
5904 				"Error: num_queues (%d) < max queues configured for channel(%d)\n",
5905 				num_queues, max_ch_queues);
5906 			return -EINVAL;
5907 		}
5908 		*reconfig_rss = true;
5909 	}
5910 
5911 	return 0;
5912 }
5913 
5914 /**
5915  * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5916  * @vsi: the VSI being setup
5917  * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5918  *
5919  * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5920  **/
5921 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5922 {
5923 	struct i40e_pf *pf = vsi->back;
5924 	u8 seed[I40E_HKEY_ARRAY_SIZE];
5925 	struct i40e_hw *hw = &pf->hw;
5926 	int local_rss_size;
5927 	u8 *lut;
5928 	int ret;
5929 
5930 	if (!vsi->rss_size)
5931 		return -EINVAL;
5932 
5933 	if (rss_size > vsi->rss_size)
5934 		return -EINVAL;
5935 
5936 	local_rss_size = min_t(int, vsi->rss_size, rss_size);
5937 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5938 	if (!lut)
5939 		return -ENOMEM;
5940 
5941 	/* Ignoring user configured lut if there is one */
5942 	i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5943 
5944 	/* Use user configured hash key if there is one, otherwise
5945 	 * use default.
5946 	 */
5947 	if (vsi->rss_hkey_user)
5948 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5949 	else
5950 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5951 
5952 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5953 	if (ret) {
5954 		dev_info(&pf->pdev->dev,
5955 			 "Cannot set RSS lut, err %s aq_err %s\n",
5956 			 i40e_stat_str(hw, ret),
5957 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5958 		kfree(lut);
5959 		return ret;
5960 	}
5961 	kfree(lut);
5962 
5963 	/* Do the update w.r.t. storing rss_size */
5964 	if (!vsi->orig_rss_size)
5965 		vsi->orig_rss_size = vsi->rss_size;
5966 	vsi->current_rss_size = local_rss_size;
5967 
5968 	return ret;
5969 }
5970 
5971 /**
5972  * i40e_channel_setup_queue_map - Setup a channel queue map
5973  * @pf: ptr to PF device
5974  * @ctxt: VSI context structure
5975  * @ch: ptr to channel structure
5976  *
5977  * Setup queue map for a specific channel
5978  **/
5979 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5980 					 struct i40e_vsi_context *ctxt,
5981 					 struct i40e_channel *ch)
5982 {
5983 	u16 qcount, qmap, sections = 0;
5984 	u8 offset = 0;
5985 	int pow;
5986 
5987 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5988 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5989 
5990 	qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5991 	ch->num_queue_pairs = qcount;
5992 
5993 	/* find the next higher power-of-2 of num queue pairs */
5994 	pow = ilog2(qcount);
5995 	if (!is_power_of_2(qcount))
5996 		pow++;
5997 
5998 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5999 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6000 
6001 	/* Setup queue TC[0].qmap for given VSI context */
6002 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6003 
6004 	ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6005 	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6006 	ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6007 	ctxt->info.valid_sections |= cpu_to_le16(sections);
6008 }
6009 
6010 /**
6011  * i40e_add_channel - add a channel by adding VSI
6012  * @pf: ptr to PF device
6013  * @uplink_seid: underlying HW switching element (VEB) ID
6014  * @ch: ptr to channel structure
6015  *
6016  * Add a channel (VSI) using add_vsi and queue_map
6017  **/
6018 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6019 			    struct i40e_channel *ch)
6020 {
6021 	struct i40e_hw *hw = &pf->hw;
6022 	struct i40e_vsi_context ctxt;
6023 	u8 enabled_tc = 0x1; /* TC0 enabled */
6024 	int ret;
6025 
6026 	if (ch->type != I40E_VSI_VMDQ2) {
6027 		dev_info(&pf->pdev->dev,
6028 			 "add new vsi failed, ch->type %d\n", ch->type);
6029 		return -EINVAL;
6030 	}
6031 
6032 	memset(&ctxt, 0, sizeof(ctxt));
6033 	ctxt.pf_num = hw->pf_id;
6034 	ctxt.vf_num = 0;
6035 	ctxt.uplink_seid = uplink_seid;
6036 	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6037 	if (ch->type == I40E_VSI_VMDQ2)
6038 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6039 
6040 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6041 		ctxt.info.valid_sections |=
6042 		     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6043 		ctxt.info.switch_id =
6044 		   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6045 	}
6046 
6047 	/* Set queue map for a given VSI context */
6048 	i40e_channel_setup_queue_map(pf, &ctxt, ch);
6049 
6050 	/* Now time to create VSI */
6051 	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6052 	if (ret) {
6053 		dev_info(&pf->pdev->dev,
6054 			 "add new vsi failed, err %s aq_err %s\n",
6055 			 i40e_stat_str(&pf->hw, ret),
6056 			 i40e_aq_str(&pf->hw,
6057 				     pf->hw.aq.asq_last_status));
6058 		return -ENOENT;
6059 	}
6060 
6061 	/* Success, update channel, set enabled_tc only if the channel
6062 	 * is not a macvlan
6063 	 */
6064 	ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6065 	ch->seid = ctxt.seid;
6066 	ch->vsi_number = ctxt.vsi_number;
6067 	ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6068 
6069 	/* copy just the sections touched not the entire info
6070 	 * since not all sections are valid as returned by
6071 	 * update vsi params
6072 	 */
6073 	ch->info.mapping_flags = ctxt.info.mapping_flags;
6074 	memcpy(&ch->info.queue_mapping,
6075 	       &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6076 	memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6077 	       sizeof(ctxt.info.tc_mapping));
6078 
6079 	return 0;
6080 }
6081 
6082 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6083 				  u8 *bw_share)
6084 {
6085 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6086 	i40e_status ret;
6087 	int i;
6088 
6089 	memset(&bw_data, 0, sizeof(bw_data));
6090 	bw_data.tc_valid_bits = ch->enabled_tc;
6091 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6092 		bw_data.tc_bw_credits[i] = bw_share[i];
6093 
6094 	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6095 				       &bw_data, NULL);
6096 	if (ret) {
6097 		dev_info(&vsi->back->pdev->dev,
6098 			 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6099 			 vsi->back->hw.aq.asq_last_status, ch->seid);
6100 		return -EINVAL;
6101 	}
6102 
6103 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6104 		ch->info.qs_handle[i] = bw_data.qs_handles[i];
6105 
6106 	return 0;
6107 }
6108 
6109 /**
6110  * i40e_channel_config_tx_ring - config TX ring associated with new channel
6111  * @pf: ptr to PF device
6112  * @vsi: the VSI being setup
6113  * @ch: ptr to channel structure
6114  *
6115  * Configure TX rings associated with channel (VSI) since queues are being
6116  * from parent VSI.
6117  **/
6118 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6119 				       struct i40e_vsi *vsi,
6120 				       struct i40e_channel *ch)
6121 {
6122 	i40e_status ret;
6123 	int i;
6124 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6125 
6126 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
6127 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6128 		if (ch->enabled_tc & BIT(i))
6129 			bw_share[i] = 1;
6130 	}
6131 
6132 	/* configure BW for new VSI */
6133 	ret = i40e_channel_config_bw(vsi, ch, bw_share);
6134 	if (ret) {
6135 		dev_info(&vsi->back->pdev->dev,
6136 			 "Failed configuring TC map %d for channel (seid %u)\n",
6137 			 ch->enabled_tc, ch->seid);
6138 		return ret;
6139 	}
6140 
6141 	for (i = 0; i < ch->num_queue_pairs; i++) {
6142 		struct i40e_ring *tx_ring, *rx_ring;
6143 		u16 pf_q;
6144 
6145 		pf_q = ch->base_queue + i;
6146 
6147 		/* Get to TX ring ptr of main VSI, for re-setup TX queue
6148 		 * context
6149 		 */
6150 		tx_ring = vsi->tx_rings[pf_q];
6151 		tx_ring->ch = ch;
6152 
6153 		/* Get the RX ring ptr */
6154 		rx_ring = vsi->rx_rings[pf_q];
6155 		rx_ring->ch = ch;
6156 	}
6157 
6158 	return 0;
6159 }
6160 
6161 /**
6162  * i40e_setup_hw_channel - setup new channel
6163  * @pf: ptr to PF device
6164  * @vsi: the VSI being setup
6165  * @ch: ptr to channel structure
6166  * @uplink_seid: underlying HW switching element (VEB) ID
6167  * @type: type of channel to be created (VMDq2/VF)
6168  *
6169  * Setup new channel (VSI) based on specified type (VMDq2/VF)
6170  * and configures TX rings accordingly
6171  **/
6172 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6173 					struct i40e_vsi *vsi,
6174 					struct i40e_channel *ch,
6175 					u16 uplink_seid, u8 type)
6176 {
6177 	int ret;
6178 
6179 	ch->initialized = false;
6180 	ch->base_queue = vsi->next_base_queue;
6181 	ch->type = type;
6182 
6183 	/* Proceed with creation of channel (VMDq2) VSI */
6184 	ret = i40e_add_channel(pf, uplink_seid, ch);
6185 	if (ret) {
6186 		dev_info(&pf->pdev->dev,
6187 			 "failed to add_channel using uplink_seid %u\n",
6188 			 uplink_seid);
6189 		return ret;
6190 	}
6191 
6192 	/* Mark the successful creation of channel */
6193 	ch->initialized = true;
6194 
6195 	/* Reconfigure TX queues using QTX_CTL register */
6196 	ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6197 	if (ret) {
6198 		dev_info(&pf->pdev->dev,
6199 			 "failed to configure TX rings for channel %u\n",
6200 			 ch->seid);
6201 		return ret;
6202 	}
6203 
6204 	/* update 'next_base_queue' */
6205 	vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6206 	dev_dbg(&pf->pdev->dev,
6207 		"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6208 		ch->seid, ch->vsi_number, ch->stat_counter_idx,
6209 		ch->num_queue_pairs,
6210 		vsi->next_base_queue);
6211 	return ret;
6212 }
6213 
6214 /**
6215  * i40e_setup_channel - setup new channel using uplink element
6216  * @pf: ptr to PF device
6217  * @vsi: pointer to the VSI to set up the channel within
6218  * @ch: ptr to channel structure
6219  *
6220  * Setup new channel (VSI) based on specified type (VMDq2/VF)
6221  * and uplink switching element (uplink_seid)
6222  **/
6223 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6224 			       struct i40e_channel *ch)
6225 {
6226 	u8 vsi_type;
6227 	u16 seid;
6228 	int ret;
6229 
6230 	if (vsi->type == I40E_VSI_MAIN) {
6231 		vsi_type = I40E_VSI_VMDQ2;
6232 	} else {
6233 		dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6234 			vsi->type);
6235 		return false;
6236 	}
6237 
6238 	/* underlying switching element */
6239 	seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6240 
6241 	/* create channel (VSI), configure TX rings */
6242 	ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6243 	if (ret) {
6244 		dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6245 		return false;
6246 	}
6247 
6248 	return ch->initialized ? true : false;
6249 }
6250 
6251 /**
6252  * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6253  * @vsi: ptr to VSI which has PF backing
6254  *
6255  * Sets up switch mode correctly if it needs to be changed and perform
6256  * what are allowed modes.
6257  **/
6258 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6259 {
6260 	u8 mode;
6261 	struct i40e_pf *pf = vsi->back;
6262 	struct i40e_hw *hw = &pf->hw;
6263 	int ret;
6264 
6265 	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6266 	if (ret)
6267 		return -EINVAL;
6268 
6269 	if (hw->dev_caps.switch_mode) {
6270 		/* if switch mode is set, support mode2 (non-tunneled for
6271 		 * cloud filter) for now
6272 		 */
6273 		u32 switch_mode = hw->dev_caps.switch_mode &
6274 				  I40E_SWITCH_MODE_MASK;
6275 		if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6276 			if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6277 				return 0;
6278 			dev_err(&pf->pdev->dev,
6279 				"Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6280 				hw->dev_caps.switch_mode);
6281 			return -EINVAL;
6282 		}
6283 	}
6284 
6285 	/* Set Bit 7 to be valid */
6286 	mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6287 
6288 	/* Set L4type for TCP support */
6289 	mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6290 
6291 	/* Set cloud filter mode */
6292 	mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6293 
6294 	/* Prep mode field for set_switch_config */
6295 	ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6296 					pf->last_sw_conf_valid_flags,
6297 					mode, NULL);
6298 	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6299 		dev_err(&pf->pdev->dev,
6300 			"couldn't set switch config bits, err %s aq_err %s\n",
6301 			i40e_stat_str(hw, ret),
6302 			i40e_aq_str(hw,
6303 				    hw->aq.asq_last_status));
6304 
6305 	return ret;
6306 }
6307 
6308 /**
6309  * i40e_create_queue_channel - function to create channel
6310  * @vsi: VSI to be configured
6311  * @ch: ptr to channel (it contains channel specific params)
6312  *
6313  * This function creates channel (VSI) using num_queues specified by user,
6314  * reconfigs RSS if needed.
6315  **/
6316 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6317 			      struct i40e_channel *ch)
6318 {
6319 	struct i40e_pf *pf = vsi->back;
6320 	bool reconfig_rss;
6321 	int err;
6322 
6323 	if (!ch)
6324 		return -EINVAL;
6325 
6326 	if (!ch->num_queue_pairs) {
6327 		dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6328 			ch->num_queue_pairs);
6329 		return -EINVAL;
6330 	}
6331 
6332 	/* validate user requested num_queues for channel */
6333 	err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6334 				       &reconfig_rss);
6335 	if (err) {
6336 		dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6337 			 ch->num_queue_pairs);
6338 		return -EINVAL;
6339 	}
6340 
6341 	/* By default we are in VEPA mode, if this is the first VF/VMDq
6342 	 * VSI to be added switch to VEB mode.
6343 	 */
6344 
6345 	if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6346 		pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6347 
6348 		if (vsi->type == I40E_VSI_MAIN) {
6349 			if (pf->flags & I40E_FLAG_TC_MQPRIO)
6350 				i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6351 			else
6352 				i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6353 		}
6354 		/* now onwards for main VSI, number of queues will be value
6355 		 * of TC0's queue count
6356 		 */
6357 	}
6358 
6359 	/* By this time, vsi->cnt_q_avail shall be set to non-zero and
6360 	 * it should be more than num_queues
6361 	 */
6362 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6363 		dev_dbg(&pf->pdev->dev,
6364 			"Error: cnt_q_avail (%u) less than num_queues %d\n",
6365 			vsi->cnt_q_avail, ch->num_queue_pairs);
6366 		return -EINVAL;
6367 	}
6368 
6369 	/* reconfig_rss only if vsi type is MAIN_VSI */
6370 	if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6371 		err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6372 		if (err) {
6373 			dev_info(&pf->pdev->dev,
6374 				 "Error: unable to reconfig rss for num_queues (%u)\n",
6375 				 ch->num_queue_pairs);
6376 			return -EINVAL;
6377 		}
6378 	}
6379 
6380 	if (!i40e_setup_channel(pf, vsi, ch)) {
6381 		dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6382 		return -EINVAL;
6383 	}
6384 
6385 	dev_info(&pf->pdev->dev,
6386 		 "Setup channel (id:%u) utilizing num_queues %d\n",
6387 		 ch->seid, ch->num_queue_pairs);
6388 
6389 	/* configure VSI for BW limit */
6390 	if (ch->max_tx_rate) {
6391 		u64 credits = ch->max_tx_rate;
6392 
6393 		if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6394 			return -EINVAL;
6395 
6396 		do_div(credits, I40E_BW_CREDIT_DIVISOR);
6397 		dev_dbg(&pf->pdev->dev,
6398 			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6399 			ch->max_tx_rate,
6400 			credits,
6401 			ch->seid);
6402 	}
6403 
6404 	/* in case of VF, this will be main SRIOV VSI */
6405 	ch->parent_vsi = vsi;
6406 
6407 	/* and update main_vsi's count for queue_available to use */
6408 	vsi->cnt_q_avail -= ch->num_queue_pairs;
6409 
6410 	return 0;
6411 }
6412 
6413 /**
6414  * i40e_configure_queue_channels - Add queue channel for the given TCs
6415  * @vsi: VSI to be configured
6416  *
6417  * Configures queue channel mapping to the given TCs
6418  **/
6419 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6420 {
6421 	struct i40e_channel *ch;
6422 	u64 max_rate = 0;
6423 	int ret = 0, i;
6424 
6425 	/* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6426 	vsi->tc_seid_map[0] = vsi->seid;
6427 	for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6428 		if (vsi->tc_config.enabled_tc & BIT(i)) {
6429 			ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6430 			if (!ch) {
6431 				ret = -ENOMEM;
6432 				goto err_free;
6433 			}
6434 
6435 			INIT_LIST_HEAD(&ch->list);
6436 			ch->num_queue_pairs =
6437 				vsi->tc_config.tc_info[i].qcount;
6438 			ch->base_queue =
6439 				vsi->tc_config.tc_info[i].qoffset;
6440 
6441 			/* Bandwidth limit through tc interface is in bytes/s,
6442 			 * change to Mbit/s
6443 			 */
6444 			max_rate = vsi->mqprio_qopt.max_rate[i];
6445 			do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6446 			ch->max_tx_rate = max_rate;
6447 
6448 			list_add_tail(&ch->list, &vsi->ch_list);
6449 
6450 			ret = i40e_create_queue_channel(vsi, ch);
6451 			if (ret) {
6452 				dev_err(&vsi->back->pdev->dev,
6453 					"Failed creating queue channel with TC%d: queues %d\n",
6454 					i, ch->num_queue_pairs);
6455 				goto err_free;
6456 			}
6457 			vsi->tc_seid_map[i] = ch->seid;
6458 		}
6459 	}
6460 	return ret;
6461 
6462 err_free:
6463 	i40e_remove_queue_channels(vsi);
6464 	return ret;
6465 }
6466 
6467 /**
6468  * i40e_veb_config_tc - Configure TCs for given VEB
6469  * @veb: given VEB
6470  * @enabled_tc: TC bitmap
6471  *
6472  * Configures given TC bitmap for VEB (switching) element
6473  **/
6474 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6475 {
6476 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6477 	struct i40e_pf *pf = veb->pf;
6478 	int ret = 0;
6479 	int i;
6480 
6481 	/* No TCs or already enabled TCs just return */
6482 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
6483 		return ret;
6484 
6485 	bw_data.tc_valid_bits = enabled_tc;
6486 	/* bw_data.absolute_credits is not set (relative) */
6487 
6488 	/* Enable ETS TCs with equal BW Share for now */
6489 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6490 		if (enabled_tc & BIT(i))
6491 			bw_data.tc_bw_share_credits[i] = 1;
6492 	}
6493 
6494 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6495 						   &bw_data, NULL);
6496 	if (ret) {
6497 		dev_info(&pf->pdev->dev,
6498 			 "VEB bw config failed, err %s aq_err %s\n",
6499 			 i40e_stat_str(&pf->hw, ret),
6500 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6501 		goto out;
6502 	}
6503 
6504 	/* Update the BW information */
6505 	ret = i40e_veb_get_bw_info(veb);
6506 	if (ret) {
6507 		dev_info(&pf->pdev->dev,
6508 			 "Failed getting veb bw config, err %s aq_err %s\n",
6509 			 i40e_stat_str(&pf->hw, ret),
6510 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6511 	}
6512 
6513 out:
6514 	return ret;
6515 }
6516 
6517 #ifdef CONFIG_I40E_DCB
6518 /**
6519  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6520  * @pf: PF struct
6521  *
6522  * Reconfigure VEB/VSIs on a given PF; it is assumed that
6523  * the caller would've quiesce all the VSIs before calling
6524  * this function
6525  **/
6526 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6527 {
6528 	u8 tc_map = 0;
6529 	int ret;
6530 	u8 v;
6531 
6532 	/* Enable the TCs available on PF to all VEBs */
6533 	tc_map = i40e_pf_get_tc_map(pf);
6534 	if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6535 		return;
6536 
6537 	for (v = 0; v < I40E_MAX_VEB; v++) {
6538 		if (!pf->veb[v])
6539 			continue;
6540 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6541 		if (ret) {
6542 			dev_info(&pf->pdev->dev,
6543 				 "Failed configuring TC for VEB seid=%d\n",
6544 				 pf->veb[v]->seid);
6545 			/* Will try to configure as many components */
6546 		}
6547 	}
6548 
6549 	/* Update each VSI */
6550 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6551 		if (!pf->vsi[v])
6552 			continue;
6553 
6554 		/* - Enable all TCs for the LAN VSI
6555 		 * - For all others keep them at TC0 for now
6556 		 */
6557 		if (v == pf->lan_vsi)
6558 			tc_map = i40e_pf_get_tc_map(pf);
6559 		else
6560 			tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6561 
6562 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6563 		if (ret) {
6564 			dev_info(&pf->pdev->dev,
6565 				 "Failed configuring TC for VSI seid=%d\n",
6566 				 pf->vsi[v]->seid);
6567 			/* Will try to configure as many components */
6568 		} else {
6569 			/* Re-configure VSI vectors based on updated TC map */
6570 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6571 			if (pf->vsi[v]->netdev)
6572 				i40e_dcbnl_set_all(pf->vsi[v]);
6573 		}
6574 	}
6575 }
6576 
6577 /**
6578  * i40e_resume_port_tx - Resume port Tx
6579  * @pf: PF struct
6580  *
6581  * Resume a port's Tx and issue a PF reset in case of failure to
6582  * resume.
6583  **/
6584 static int i40e_resume_port_tx(struct i40e_pf *pf)
6585 {
6586 	struct i40e_hw *hw = &pf->hw;
6587 	int ret;
6588 
6589 	ret = i40e_aq_resume_port_tx(hw, NULL);
6590 	if (ret) {
6591 		dev_info(&pf->pdev->dev,
6592 			 "Resume Port Tx failed, err %s aq_err %s\n",
6593 			  i40e_stat_str(&pf->hw, ret),
6594 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6595 		/* Schedule PF reset to recover */
6596 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6597 		i40e_service_event_schedule(pf);
6598 	}
6599 
6600 	return ret;
6601 }
6602 
6603 /**
6604  * i40e_suspend_port_tx - Suspend port Tx
6605  * @pf: PF struct
6606  *
6607  * Suspend a port's Tx and issue a PF reset in case of failure.
6608  **/
6609 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6610 {
6611 	struct i40e_hw *hw = &pf->hw;
6612 	int ret;
6613 
6614 	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6615 	if (ret) {
6616 		dev_info(&pf->pdev->dev,
6617 			 "Suspend Port Tx failed, err %s aq_err %s\n",
6618 			 i40e_stat_str(&pf->hw, ret),
6619 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6620 		/* Schedule PF reset to recover */
6621 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6622 		i40e_service_event_schedule(pf);
6623 	}
6624 
6625 	return ret;
6626 }
6627 
6628 /**
6629  * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6630  * @pf: PF being configured
6631  * @new_cfg: New DCBX configuration
6632  *
6633  * Program DCB settings into HW and reconfigure VEB/VSIs on
6634  * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6635  **/
6636 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6637 				  struct i40e_dcbx_config *new_cfg)
6638 {
6639 	struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6640 	int ret;
6641 
6642 	/* Check if need reconfiguration */
6643 	if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6644 		dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6645 		return 0;
6646 	}
6647 
6648 	/* Config change disable all VSIs */
6649 	i40e_pf_quiesce_all_vsi(pf);
6650 
6651 	/* Copy the new config to the current config */
6652 	*old_cfg = *new_cfg;
6653 	old_cfg->etsrec = old_cfg->etscfg;
6654 	ret = i40e_set_dcb_config(&pf->hw);
6655 	if (ret) {
6656 		dev_info(&pf->pdev->dev,
6657 			 "Set DCB Config failed, err %s aq_err %s\n",
6658 			 i40e_stat_str(&pf->hw, ret),
6659 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6660 		goto out;
6661 	}
6662 
6663 	/* Changes in configuration update VEB/VSI */
6664 	i40e_dcb_reconfigure(pf);
6665 out:
6666 	/* In case of reset do not try to resume anything */
6667 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6668 		/* Re-start the VSIs if disabled */
6669 		ret = i40e_resume_port_tx(pf);
6670 		/* In case of error no point in resuming VSIs */
6671 		if (ret)
6672 			goto err;
6673 		i40e_pf_unquiesce_all_vsi(pf);
6674 	}
6675 err:
6676 	return ret;
6677 }
6678 
6679 /**
6680  * i40e_hw_dcb_config - Program new DCBX settings into HW
6681  * @pf: PF being configured
6682  * @new_cfg: New DCBX configuration
6683  *
6684  * Program DCB settings into HW and reconfigure VEB/VSIs on
6685  * given PF
6686  **/
6687 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6688 {
6689 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6690 	u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6691 	u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6692 	struct i40e_dcbx_config *old_cfg;
6693 	u8 mode[I40E_MAX_TRAFFIC_CLASS];
6694 	struct i40e_rx_pb_config pb_cfg;
6695 	struct i40e_hw *hw = &pf->hw;
6696 	u8 num_ports = hw->num_ports;
6697 	bool need_reconfig;
6698 	int ret = -EINVAL;
6699 	u8 lltc_map = 0;
6700 	u8 tc_map = 0;
6701 	u8 new_numtc;
6702 	u8 i;
6703 
6704 	dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6705 	/* Un-pack information to Program ETS HW via shared API
6706 	 * numtc, tcmap
6707 	 * LLTC map
6708 	 * ETS/NON-ETS arbiter mode
6709 	 * max exponent (credit refills)
6710 	 * Total number of ports
6711 	 * PFC priority bit-map
6712 	 * Priority Table
6713 	 * BW % per TC
6714 	 * Arbiter mode between UPs sharing same TC
6715 	 * TSA table (ETS or non-ETS)
6716 	 * EEE enabled or not
6717 	 * MFS TC table
6718 	 */
6719 
6720 	new_numtc = i40e_dcb_get_num_tc(new_cfg);
6721 
6722 	memset(&ets_data, 0, sizeof(ets_data));
6723 	for (i = 0; i < new_numtc; i++) {
6724 		tc_map |= BIT(i);
6725 		switch (new_cfg->etscfg.tsatable[i]) {
6726 		case I40E_IEEE_TSA_ETS:
6727 			prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6728 			ets_data.tc_bw_share_credits[i] =
6729 					new_cfg->etscfg.tcbwtable[i];
6730 			break;
6731 		case I40E_IEEE_TSA_STRICT:
6732 			prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6733 			lltc_map |= BIT(i);
6734 			ets_data.tc_bw_share_credits[i] =
6735 					I40E_DCB_STRICT_PRIO_CREDITS;
6736 			break;
6737 		default:
6738 			/* Invalid TSA type */
6739 			need_reconfig = false;
6740 			goto out;
6741 		}
6742 	}
6743 
6744 	old_cfg = &hw->local_dcbx_config;
6745 	/* Check if need reconfiguration */
6746 	need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6747 
6748 	/* If needed, enable/disable frame tagging, disable all VSIs
6749 	 * and suspend port tx
6750 	 */
6751 	if (need_reconfig) {
6752 		/* Enable DCB tagging only when more than one TC */
6753 		if (new_numtc > 1)
6754 			pf->flags |= I40E_FLAG_DCB_ENABLED;
6755 		else
6756 			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6757 
6758 		set_bit(__I40E_PORT_SUSPENDED, pf->state);
6759 		/* Reconfiguration needed quiesce all VSIs */
6760 		i40e_pf_quiesce_all_vsi(pf);
6761 		ret = i40e_suspend_port_tx(pf);
6762 		if (ret)
6763 			goto err;
6764 	}
6765 
6766 	/* Configure Port ETS Tx Scheduler */
6767 	ets_data.tc_valid_bits = tc_map;
6768 	ets_data.tc_strict_priority_flags = lltc_map;
6769 	ret = i40e_aq_config_switch_comp_ets
6770 		(hw, pf->mac_seid, &ets_data,
6771 		 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6772 	if (ret) {
6773 		dev_info(&pf->pdev->dev,
6774 			 "Modify Port ETS failed, err %s aq_err %s\n",
6775 			 i40e_stat_str(&pf->hw, ret),
6776 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6777 		goto out;
6778 	}
6779 
6780 	/* Configure Rx ETS HW */
6781 	memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6782 	i40e_dcb_hw_set_num_tc(hw, new_numtc);
6783 	i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6784 				   I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6785 				   I40E_DCB_DEFAULT_MAX_EXPONENT,
6786 				   lltc_map);
6787 	i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6788 	i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6789 				     prio_type);
6790 	i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6791 			       new_cfg->etscfg.prioritytable);
6792 	i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6793 
6794 	/* Configure Rx Packet Buffers in HW */
6795 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6796 		mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6797 		mfs_tc[i] += I40E_PACKET_HDR_PAD;
6798 	}
6799 
6800 	i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6801 					 false, new_cfg->pfc.pfcenable,
6802 					 mfs_tc, &pb_cfg);
6803 	i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
6804 
6805 	/* Update the local Rx Packet buffer config */
6806 	pf->pb_cfg = pb_cfg;
6807 
6808 	/* Inform the FW about changes to DCB configuration */
6809 	ret = i40e_aq_dcb_updated(&pf->hw, NULL);
6810 	if (ret) {
6811 		dev_info(&pf->pdev->dev,
6812 			 "DCB Updated failed, err %s aq_err %s\n",
6813 			 i40e_stat_str(&pf->hw, ret),
6814 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6815 		goto out;
6816 	}
6817 
6818 	/* Update the port DCBx configuration */
6819 	*old_cfg = *new_cfg;
6820 
6821 	/* Changes in configuration update VEB/VSI */
6822 	i40e_dcb_reconfigure(pf);
6823 out:
6824 	/* Re-start the VSIs if disabled */
6825 	if (need_reconfig) {
6826 		ret = i40e_resume_port_tx(pf);
6827 
6828 		clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6829 		/* In case of error no point in resuming VSIs */
6830 		if (ret)
6831 			goto err;
6832 
6833 		/* Wait for the PF's queues to be disabled */
6834 		ret = i40e_pf_wait_queues_disabled(pf);
6835 		if (ret) {
6836 			/* Schedule PF reset to recover */
6837 			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6838 			i40e_service_event_schedule(pf);
6839 			goto err;
6840 		} else {
6841 			i40e_pf_unquiesce_all_vsi(pf);
6842 			set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6843 			set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
6844 		}
6845 		/* registers are set, lets apply */
6846 		if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
6847 			ret = i40e_hw_set_dcb_config(pf, new_cfg);
6848 	}
6849 
6850 err:
6851 	return ret;
6852 }
6853 
6854 /**
6855  * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
6856  * @pf: PF being queried
6857  *
6858  * Set default DCB configuration in case DCB is to be done in SW.
6859  **/
6860 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
6861 {
6862 	struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
6863 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6864 	struct i40e_hw *hw = &pf->hw;
6865 	int err;
6866 
6867 	if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
6868 		/* Update the local cached instance with TC0 ETS */
6869 		memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
6870 		pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6871 		pf->tmp_cfg.etscfg.maxtcs = 0;
6872 		pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6873 		pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
6874 		pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
6875 		pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
6876 		/* FW needs one App to configure HW */
6877 		pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
6878 		pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
6879 		pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
6880 		pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
6881 
6882 		return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
6883 	}
6884 
6885 	memset(&ets_data, 0, sizeof(ets_data));
6886 	ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
6887 	ets_data.tc_strict_priority_flags = 0; /* ETS */
6888 	ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
6889 
6890 	/* Enable ETS on the Physical port */
6891 	err = i40e_aq_config_switch_comp_ets
6892 		(hw, pf->mac_seid, &ets_data,
6893 		 i40e_aqc_opc_enable_switching_comp_ets, NULL);
6894 	if (err) {
6895 		dev_info(&pf->pdev->dev,
6896 			 "Enable Port ETS failed, err %s aq_err %s\n",
6897 			 i40e_stat_str(&pf->hw, err),
6898 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6899 		err = -ENOENT;
6900 		goto out;
6901 	}
6902 
6903 	/* Update the local cached instance with TC0 ETS */
6904 	dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6905 	dcb_cfg->etscfg.cbs = 0;
6906 	dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
6907 	dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6908 
6909 out:
6910 	return err;
6911 }
6912 
6913 /**
6914  * i40e_init_pf_dcb - Initialize DCB configuration
6915  * @pf: PF being configured
6916  *
6917  * Query the current DCB configuration and cache it
6918  * in the hardware structure
6919  **/
6920 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6921 {
6922 	struct i40e_hw *hw = &pf->hw;
6923 	int err;
6924 
6925 	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6926 	 * Also do not enable DCBx if FW LLDP agent is disabled
6927 	 */
6928 	if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
6929 		dev_info(&pf->pdev->dev, "DCB is not supported.\n");
6930 		err = I40E_NOT_SUPPORTED;
6931 		goto out;
6932 	}
6933 	if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
6934 		dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
6935 		err = i40e_dcb_sw_default_config(pf);
6936 		if (err) {
6937 			dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
6938 			goto out;
6939 		}
6940 		dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
6941 		pf->dcbx_cap = DCB_CAP_DCBX_HOST |
6942 			       DCB_CAP_DCBX_VER_IEEE;
6943 		/* at init capable but disabled */
6944 		pf->flags |= I40E_FLAG_DCB_CAPABLE;
6945 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6946 		goto out;
6947 	}
6948 	err = i40e_init_dcb(hw, true);
6949 	if (!err) {
6950 		/* Device/Function is not DCBX capable */
6951 		if ((!hw->func_caps.dcb) ||
6952 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6953 			dev_info(&pf->pdev->dev,
6954 				 "DCBX offload is not supported or is disabled for this PF.\n");
6955 		} else {
6956 			/* When status is not DISABLED then DCBX in FW */
6957 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6958 				       DCB_CAP_DCBX_VER_IEEE;
6959 
6960 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
6961 			/* Enable DCB tagging only when more than one TC
6962 			 * or explicitly disable if only one TC
6963 			 */
6964 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6965 				pf->flags |= I40E_FLAG_DCB_ENABLED;
6966 			else
6967 				pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6968 			dev_dbg(&pf->pdev->dev,
6969 				"DCBX offload is supported for this PF.\n");
6970 		}
6971 	} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6972 		dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6973 		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6974 	} else {
6975 		dev_info(&pf->pdev->dev,
6976 			 "Query for DCB configuration failed, err %s aq_err %s\n",
6977 			 i40e_stat_str(&pf->hw, err),
6978 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6979 	}
6980 
6981 out:
6982 	return err;
6983 }
6984 #endif /* CONFIG_I40E_DCB */
6985 
6986 /**
6987  * i40e_print_link_message - print link up or down
6988  * @vsi: the VSI for which link needs a message
6989  * @isup: true of link is up, false otherwise
6990  */
6991 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6992 {
6993 	enum i40e_aq_link_speed new_speed;
6994 	struct i40e_pf *pf = vsi->back;
6995 	char *speed = "Unknown";
6996 	char *fc = "Unknown";
6997 	char *fec = "";
6998 	char *req_fec = "";
6999 	char *an = "";
7000 
7001 	if (isup)
7002 		new_speed = pf->hw.phy.link_info.link_speed;
7003 	else
7004 		new_speed = I40E_LINK_SPEED_UNKNOWN;
7005 
7006 	if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7007 		return;
7008 	vsi->current_isup = isup;
7009 	vsi->current_speed = new_speed;
7010 	if (!isup) {
7011 		netdev_info(vsi->netdev, "NIC Link is Down\n");
7012 		return;
7013 	}
7014 
7015 	/* Warn user if link speed on NPAR enabled partition is not at
7016 	 * least 10GB
7017 	 */
7018 	if (pf->hw.func_caps.npar_enable &&
7019 	    (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7020 	     pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7021 		netdev_warn(vsi->netdev,
7022 			    "The partition detected link speed that is less than 10Gbps\n");
7023 
7024 	switch (pf->hw.phy.link_info.link_speed) {
7025 	case I40E_LINK_SPEED_40GB:
7026 		speed = "40 G";
7027 		break;
7028 	case I40E_LINK_SPEED_20GB:
7029 		speed = "20 G";
7030 		break;
7031 	case I40E_LINK_SPEED_25GB:
7032 		speed = "25 G";
7033 		break;
7034 	case I40E_LINK_SPEED_10GB:
7035 		speed = "10 G";
7036 		break;
7037 	case I40E_LINK_SPEED_5GB:
7038 		speed = "5 G";
7039 		break;
7040 	case I40E_LINK_SPEED_2_5GB:
7041 		speed = "2.5 G";
7042 		break;
7043 	case I40E_LINK_SPEED_1GB:
7044 		speed = "1000 M";
7045 		break;
7046 	case I40E_LINK_SPEED_100MB:
7047 		speed = "100 M";
7048 		break;
7049 	default:
7050 		break;
7051 	}
7052 
7053 	switch (pf->hw.fc.current_mode) {
7054 	case I40E_FC_FULL:
7055 		fc = "RX/TX";
7056 		break;
7057 	case I40E_FC_TX_PAUSE:
7058 		fc = "TX";
7059 		break;
7060 	case I40E_FC_RX_PAUSE:
7061 		fc = "RX";
7062 		break;
7063 	default:
7064 		fc = "None";
7065 		break;
7066 	}
7067 
7068 	if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7069 		req_fec = "None";
7070 		fec = "None";
7071 		an = "False";
7072 
7073 		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7074 			an = "True";
7075 
7076 		if (pf->hw.phy.link_info.fec_info &
7077 		    I40E_AQ_CONFIG_FEC_KR_ENA)
7078 			fec = "CL74 FC-FEC/BASE-R";
7079 		else if (pf->hw.phy.link_info.fec_info &
7080 			 I40E_AQ_CONFIG_FEC_RS_ENA)
7081 			fec = "CL108 RS-FEC";
7082 
7083 		/* 'CL108 RS-FEC' should be displayed when RS is requested, or
7084 		 * both RS and FC are requested
7085 		 */
7086 		if (vsi->back->hw.phy.link_info.req_fec_info &
7087 		    (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7088 			if (vsi->back->hw.phy.link_info.req_fec_info &
7089 			    I40E_AQ_REQUEST_FEC_RS)
7090 				req_fec = "CL108 RS-FEC";
7091 			else
7092 				req_fec = "CL74 FC-FEC/BASE-R";
7093 		}
7094 		netdev_info(vsi->netdev,
7095 			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7096 			    speed, req_fec, fec, an, fc);
7097 	} else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7098 		req_fec = "None";
7099 		fec = "None";
7100 		an = "False";
7101 
7102 		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7103 			an = "True";
7104 
7105 		if (pf->hw.phy.link_info.fec_info &
7106 		    I40E_AQ_CONFIG_FEC_KR_ENA)
7107 			fec = "CL74 FC-FEC/BASE-R";
7108 
7109 		if (pf->hw.phy.link_info.req_fec_info &
7110 		    I40E_AQ_REQUEST_FEC_KR)
7111 			req_fec = "CL74 FC-FEC/BASE-R";
7112 
7113 		netdev_info(vsi->netdev,
7114 			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7115 			    speed, req_fec, fec, an, fc);
7116 	} else {
7117 		netdev_info(vsi->netdev,
7118 			    "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7119 			    speed, fc);
7120 	}
7121 
7122 }
7123 
7124 /**
7125  * i40e_up_complete - Finish the last steps of bringing up a connection
7126  * @vsi: the VSI being configured
7127  **/
7128 static int i40e_up_complete(struct i40e_vsi *vsi)
7129 {
7130 	struct i40e_pf *pf = vsi->back;
7131 	int err;
7132 
7133 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7134 		i40e_vsi_configure_msix(vsi);
7135 	else
7136 		i40e_configure_msi_and_legacy(vsi);
7137 
7138 	/* start rings */
7139 	err = i40e_vsi_start_rings(vsi);
7140 	if (err)
7141 		return err;
7142 
7143 	clear_bit(__I40E_VSI_DOWN, vsi->state);
7144 	i40e_napi_enable_all(vsi);
7145 	i40e_vsi_enable_irq(vsi);
7146 
7147 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7148 	    (vsi->netdev)) {
7149 		i40e_print_link_message(vsi, true);
7150 		netif_tx_start_all_queues(vsi->netdev);
7151 		netif_carrier_on(vsi->netdev);
7152 	}
7153 
7154 	/* replay FDIR SB filters */
7155 	if (vsi->type == I40E_VSI_FDIR) {
7156 		/* reset fd counters */
7157 		pf->fd_add_err = 0;
7158 		pf->fd_atr_cnt = 0;
7159 		i40e_fdir_filter_restore(vsi);
7160 	}
7161 
7162 	/* On the next run of the service_task, notify any clients of the new
7163 	 * opened netdev
7164 	 */
7165 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7166 	i40e_service_event_schedule(pf);
7167 
7168 	return 0;
7169 }
7170 
7171 /**
7172  * i40e_vsi_reinit_locked - Reset the VSI
7173  * @vsi: the VSI being configured
7174  *
7175  * Rebuild the ring structs after some configuration
7176  * has changed, e.g. MTU size.
7177  **/
7178 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7179 {
7180 	struct i40e_pf *pf = vsi->back;
7181 
7182 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7183 		usleep_range(1000, 2000);
7184 	i40e_down(vsi);
7185 
7186 	i40e_up(vsi);
7187 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
7188 }
7189 
7190 /**
7191  * i40e_force_link_state - Force the link status
7192  * @pf: board private structure
7193  * @is_up: whether the link state should be forced up or down
7194  **/
7195 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7196 {
7197 	struct i40e_aq_get_phy_abilities_resp abilities;
7198 	struct i40e_aq_set_phy_config config = {0};
7199 	bool non_zero_phy_type = is_up;
7200 	struct i40e_hw *hw = &pf->hw;
7201 	i40e_status err;
7202 	u64 mask;
7203 	u8 speed;
7204 
7205 	/* Card might've been put in an unstable state by other drivers
7206 	 * and applications, which causes incorrect speed values being
7207 	 * set on startup. In order to clear speed registers, we call
7208 	 * get_phy_capabilities twice, once to get initial state of
7209 	 * available speeds, and once to get current PHY config.
7210 	 */
7211 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7212 					   NULL);
7213 	if (err) {
7214 		dev_err(&pf->pdev->dev,
7215 			"failed to get phy cap., ret =  %s last_status =  %s\n",
7216 			i40e_stat_str(hw, err),
7217 			i40e_aq_str(hw, hw->aq.asq_last_status));
7218 		return err;
7219 	}
7220 	speed = abilities.link_speed;
7221 
7222 	/* Get the current phy config */
7223 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7224 					   NULL);
7225 	if (err) {
7226 		dev_err(&pf->pdev->dev,
7227 			"failed to get phy cap., ret =  %s last_status =  %s\n",
7228 			i40e_stat_str(hw, err),
7229 			i40e_aq_str(hw, hw->aq.asq_last_status));
7230 		return err;
7231 	}
7232 
7233 	/* If link needs to go up, but was not forced to go down,
7234 	 * and its speed values are OK, no need for a flap
7235 	 * if non_zero_phy_type was set, still need to force up
7236 	 */
7237 	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7238 		non_zero_phy_type = true;
7239 	else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7240 		return I40E_SUCCESS;
7241 
7242 	/* To force link we need to set bits for all supported PHY types,
7243 	 * but there are now more than 32, so we need to split the bitmap
7244 	 * across two fields.
7245 	 */
7246 	mask = I40E_PHY_TYPES_BITMASK;
7247 	config.phy_type =
7248 		non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7249 	config.phy_type_ext =
7250 		non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7251 	/* Copy the old settings, except of phy_type */
7252 	config.abilities = abilities.abilities;
7253 	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7254 		if (is_up)
7255 			config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7256 		else
7257 			config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7258 	}
7259 	if (abilities.link_speed != 0)
7260 		config.link_speed = abilities.link_speed;
7261 	else
7262 		config.link_speed = speed;
7263 	config.eee_capability = abilities.eee_capability;
7264 	config.eeer = abilities.eeer_val;
7265 	config.low_power_ctrl = abilities.d3_lpan;
7266 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7267 			    I40E_AQ_PHY_FEC_CONFIG_MASK;
7268 	err = i40e_aq_set_phy_config(hw, &config, NULL);
7269 
7270 	if (err) {
7271 		dev_err(&pf->pdev->dev,
7272 			"set phy config ret =  %s last_status =  %s\n",
7273 			i40e_stat_str(&pf->hw, err),
7274 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7275 		return err;
7276 	}
7277 
7278 	/* Update the link info */
7279 	err = i40e_update_link_info(hw);
7280 	if (err) {
7281 		/* Wait a little bit (on 40G cards it sometimes takes a really
7282 		 * long time for link to come back from the atomic reset)
7283 		 * and try once more
7284 		 */
7285 		msleep(1000);
7286 		i40e_update_link_info(hw);
7287 	}
7288 
7289 	i40e_aq_set_link_restart_an(hw, is_up, NULL);
7290 
7291 	return I40E_SUCCESS;
7292 }
7293 
7294 /**
7295  * i40e_up - Bring the connection back up after being down
7296  * @vsi: the VSI being configured
7297  **/
7298 int i40e_up(struct i40e_vsi *vsi)
7299 {
7300 	int err;
7301 
7302 	if (vsi->type == I40E_VSI_MAIN &&
7303 	    (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7304 	     vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7305 		i40e_force_link_state(vsi->back, true);
7306 
7307 	err = i40e_vsi_configure(vsi);
7308 	if (!err)
7309 		err = i40e_up_complete(vsi);
7310 
7311 	return err;
7312 }
7313 
7314 /**
7315  * i40e_down - Shutdown the connection processing
7316  * @vsi: the VSI being stopped
7317  **/
7318 void i40e_down(struct i40e_vsi *vsi)
7319 {
7320 	int i;
7321 
7322 	/* It is assumed that the caller of this function
7323 	 * sets the vsi->state __I40E_VSI_DOWN bit.
7324 	 */
7325 	if (vsi->netdev) {
7326 		netif_carrier_off(vsi->netdev);
7327 		netif_tx_disable(vsi->netdev);
7328 	}
7329 	i40e_vsi_disable_irq(vsi);
7330 	i40e_vsi_stop_rings(vsi);
7331 	if (vsi->type == I40E_VSI_MAIN &&
7332 	   (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7333 	    vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7334 		i40e_force_link_state(vsi->back, false);
7335 	i40e_napi_disable_all(vsi);
7336 
7337 	for (i = 0; i < vsi->num_queue_pairs; i++) {
7338 		i40e_clean_tx_ring(vsi->tx_rings[i]);
7339 		if (i40e_enabled_xdp_vsi(vsi)) {
7340 			/* Make sure that in-progress ndo_xdp_xmit and
7341 			 * ndo_xsk_wakeup calls are completed.
7342 			 */
7343 			synchronize_rcu();
7344 			i40e_clean_tx_ring(vsi->xdp_rings[i]);
7345 		}
7346 		i40e_clean_rx_ring(vsi->rx_rings[i]);
7347 	}
7348 
7349 }
7350 
7351 /**
7352  * i40e_validate_mqprio_qopt- validate queue mapping info
7353  * @vsi: the VSI being configured
7354  * @mqprio_qopt: queue parametrs
7355  **/
7356 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7357 				     struct tc_mqprio_qopt_offload *mqprio_qopt)
7358 {
7359 	u64 sum_max_rate = 0;
7360 	u64 max_rate = 0;
7361 	int i;
7362 
7363 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7364 	    mqprio_qopt->qopt.num_tc < 1 ||
7365 	    mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7366 		return -EINVAL;
7367 	for (i = 0; ; i++) {
7368 		if (!mqprio_qopt->qopt.count[i])
7369 			return -EINVAL;
7370 		if (mqprio_qopt->min_rate[i]) {
7371 			dev_err(&vsi->back->pdev->dev,
7372 				"Invalid min tx rate (greater than 0) specified\n");
7373 			return -EINVAL;
7374 		}
7375 		max_rate = mqprio_qopt->max_rate[i];
7376 		do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7377 		sum_max_rate += max_rate;
7378 
7379 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7380 			break;
7381 		if (mqprio_qopt->qopt.offset[i + 1] !=
7382 		    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7383 			return -EINVAL;
7384 	}
7385 	if (vsi->num_queue_pairs <
7386 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7387 		dev_err(&vsi->back->pdev->dev,
7388 			"Failed to create traffic channel, insufficient number of queues.\n");
7389 		return -EINVAL;
7390 	}
7391 	if (sum_max_rate > i40e_get_link_speed(vsi)) {
7392 		dev_err(&vsi->back->pdev->dev,
7393 			"Invalid max tx rate specified\n");
7394 		return -EINVAL;
7395 	}
7396 	return 0;
7397 }
7398 
7399 /**
7400  * i40e_vsi_set_default_tc_config - set default values for tc configuration
7401  * @vsi: the VSI being configured
7402  **/
7403 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7404 {
7405 	u16 qcount;
7406 	int i;
7407 
7408 	/* Only TC0 is enabled */
7409 	vsi->tc_config.numtc = 1;
7410 	vsi->tc_config.enabled_tc = 1;
7411 	qcount = min_t(int, vsi->alloc_queue_pairs,
7412 		       i40e_pf_get_max_q_per_tc(vsi->back));
7413 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7414 		/* For the TC that is not enabled set the offset to default
7415 		 * queue and allocate one queue for the given TC.
7416 		 */
7417 		vsi->tc_config.tc_info[i].qoffset = 0;
7418 		if (i == 0)
7419 			vsi->tc_config.tc_info[i].qcount = qcount;
7420 		else
7421 			vsi->tc_config.tc_info[i].qcount = 1;
7422 		vsi->tc_config.tc_info[i].netdev_tc = 0;
7423 	}
7424 }
7425 
7426 /**
7427  * i40e_del_macvlan_filter
7428  * @hw: pointer to the HW structure
7429  * @seid: seid of the channel VSI
7430  * @macaddr: the mac address to apply as a filter
7431  * @aq_err: store the admin Q error
7432  *
7433  * This function deletes a mac filter on the channel VSI which serves as the
7434  * macvlan. Returns 0 on success.
7435  **/
7436 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7437 					   const u8 *macaddr, int *aq_err)
7438 {
7439 	struct i40e_aqc_remove_macvlan_element_data element;
7440 	i40e_status status;
7441 
7442 	memset(&element, 0, sizeof(element));
7443 	ether_addr_copy(element.mac_addr, macaddr);
7444 	element.vlan_tag = 0;
7445 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7446 	status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7447 	*aq_err = hw->aq.asq_last_status;
7448 
7449 	return status;
7450 }
7451 
7452 /**
7453  * i40e_add_macvlan_filter
7454  * @hw: pointer to the HW structure
7455  * @seid: seid of the channel VSI
7456  * @macaddr: the mac address to apply as a filter
7457  * @aq_err: store the admin Q error
7458  *
7459  * This function adds a mac filter on the channel VSI which serves as the
7460  * macvlan. Returns 0 on success.
7461  **/
7462 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7463 					   const u8 *macaddr, int *aq_err)
7464 {
7465 	struct i40e_aqc_add_macvlan_element_data element;
7466 	i40e_status status;
7467 	u16 cmd_flags = 0;
7468 
7469 	ether_addr_copy(element.mac_addr, macaddr);
7470 	element.vlan_tag = 0;
7471 	element.queue_number = 0;
7472 	element.match_method = I40E_AQC_MM_ERR_NO_RES;
7473 	cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7474 	element.flags = cpu_to_le16(cmd_flags);
7475 	status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7476 	*aq_err = hw->aq.asq_last_status;
7477 
7478 	return status;
7479 }
7480 
7481 /**
7482  * i40e_reset_ch_rings - Reset the queue contexts in a channel
7483  * @vsi: the VSI we want to access
7484  * @ch: the channel we want to access
7485  */
7486 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7487 {
7488 	struct i40e_ring *tx_ring, *rx_ring;
7489 	u16 pf_q;
7490 	int i;
7491 
7492 	for (i = 0; i < ch->num_queue_pairs; i++) {
7493 		pf_q = ch->base_queue + i;
7494 		tx_ring = vsi->tx_rings[pf_q];
7495 		tx_ring->ch = NULL;
7496 		rx_ring = vsi->rx_rings[pf_q];
7497 		rx_ring->ch = NULL;
7498 	}
7499 }
7500 
7501 /**
7502  * i40e_free_macvlan_channels
7503  * @vsi: the VSI we want to access
7504  *
7505  * This function frees the Qs of the channel VSI from
7506  * the stack and also deletes the channel VSIs which
7507  * serve as macvlans.
7508  */
7509 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7510 {
7511 	struct i40e_channel *ch, *ch_tmp;
7512 	int ret;
7513 
7514 	if (list_empty(&vsi->macvlan_list))
7515 		return;
7516 
7517 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7518 		struct i40e_vsi *parent_vsi;
7519 
7520 		if (i40e_is_channel_macvlan(ch)) {
7521 			i40e_reset_ch_rings(vsi, ch);
7522 			clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7523 			netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7524 			netdev_set_sb_channel(ch->fwd->netdev, 0);
7525 			kfree(ch->fwd);
7526 			ch->fwd = NULL;
7527 		}
7528 
7529 		list_del(&ch->list);
7530 		parent_vsi = ch->parent_vsi;
7531 		if (!parent_vsi || !ch->initialized) {
7532 			kfree(ch);
7533 			continue;
7534 		}
7535 
7536 		/* remove the VSI */
7537 		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7538 					     NULL);
7539 		if (ret)
7540 			dev_err(&vsi->back->pdev->dev,
7541 				"unable to remove channel (%d) for parent VSI(%d)\n",
7542 				ch->seid, parent_vsi->seid);
7543 		kfree(ch);
7544 	}
7545 	vsi->macvlan_cnt = 0;
7546 }
7547 
7548 /**
7549  * i40e_fwd_ring_up - bring the macvlan device up
7550  * @vsi: the VSI we want to access
7551  * @vdev: macvlan netdevice
7552  * @fwd: the private fwd structure
7553  */
7554 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7555 			    struct i40e_fwd_adapter *fwd)
7556 {
7557 	struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7558 	int ret = 0, num_tc = 1,  i, aq_err;
7559 	struct i40e_pf *pf = vsi->back;
7560 	struct i40e_hw *hw = &pf->hw;
7561 
7562 	/* Go through the list and find an available channel */
7563 	list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7564 		if (!i40e_is_channel_macvlan(iter)) {
7565 			iter->fwd = fwd;
7566 			/* record configuration for macvlan interface in vdev */
7567 			for (i = 0; i < num_tc; i++)
7568 				netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7569 							     i,
7570 							     iter->num_queue_pairs,
7571 							     iter->base_queue);
7572 			for (i = 0; i < iter->num_queue_pairs; i++) {
7573 				struct i40e_ring *tx_ring, *rx_ring;
7574 				u16 pf_q;
7575 
7576 				pf_q = iter->base_queue + i;
7577 
7578 				/* Get to TX ring ptr */
7579 				tx_ring = vsi->tx_rings[pf_q];
7580 				tx_ring->ch = iter;
7581 
7582 				/* Get the RX ring ptr */
7583 				rx_ring = vsi->rx_rings[pf_q];
7584 				rx_ring->ch = iter;
7585 			}
7586 			ch = iter;
7587 			break;
7588 		}
7589 	}
7590 
7591 	if (!ch)
7592 		return -EINVAL;
7593 
7594 	/* Guarantee all rings are updated before we update the
7595 	 * MAC address filter.
7596 	 */
7597 	wmb();
7598 
7599 	/* Add a mac filter */
7600 	ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7601 	if (ret) {
7602 		/* if we cannot add the MAC rule then disable the offload */
7603 		macvlan_release_l2fw_offload(vdev);
7604 		for (i = 0; i < ch->num_queue_pairs; i++) {
7605 			struct i40e_ring *rx_ring;
7606 			u16 pf_q;
7607 
7608 			pf_q = ch->base_queue + i;
7609 			rx_ring = vsi->rx_rings[pf_q];
7610 			rx_ring->netdev = NULL;
7611 		}
7612 		dev_info(&pf->pdev->dev,
7613 			 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7614 			  i40e_stat_str(hw, ret),
7615 			  i40e_aq_str(hw, aq_err));
7616 		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7617 	}
7618 
7619 	return ret;
7620 }
7621 
7622 /**
7623  * i40e_setup_macvlans - create the channels which will be macvlans
7624  * @vsi: the VSI we want to access
7625  * @macvlan_cnt: no. of macvlans to be setup
7626  * @qcnt: no. of Qs per macvlan
7627  * @vdev: macvlan netdevice
7628  */
7629 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7630 			       struct net_device *vdev)
7631 {
7632 	struct i40e_pf *pf = vsi->back;
7633 	struct i40e_hw *hw = &pf->hw;
7634 	struct i40e_vsi_context ctxt;
7635 	u16 sections, qmap, num_qps;
7636 	struct i40e_channel *ch;
7637 	int i, pow, ret = 0;
7638 	u8 offset = 0;
7639 
7640 	if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7641 		return -EINVAL;
7642 
7643 	num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7644 
7645 	/* find the next higher power-of-2 of num queue pairs */
7646 	pow = fls(roundup_pow_of_two(num_qps) - 1);
7647 
7648 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7649 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7650 
7651 	/* Setup context bits for the main VSI */
7652 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7653 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7654 	memset(&ctxt, 0, sizeof(ctxt));
7655 	ctxt.seid = vsi->seid;
7656 	ctxt.pf_num = vsi->back->hw.pf_id;
7657 	ctxt.vf_num = 0;
7658 	ctxt.uplink_seid = vsi->uplink_seid;
7659 	ctxt.info = vsi->info;
7660 	ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7661 	ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7662 	ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7663 	ctxt.info.valid_sections |= cpu_to_le16(sections);
7664 
7665 	/* Reconfigure RSS for main VSI with new max queue count */
7666 	vsi->rss_size = max_t(u16, num_qps, qcnt);
7667 	ret = i40e_vsi_config_rss(vsi);
7668 	if (ret) {
7669 		dev_info(&pf->pdev->dev,
7670 			 "Failed to reconfig RSS for num_queues (%u)\n",
7671 			 vsi->rss_size);
7672 		return ret;
7673 	}
7674 	vsi->reconfig_rss = true;
7675 	dev_dbg(&vsi->back->pdev->dev,
7676 		"Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7677 	vsi->next_base_queue = num_qps;
7678 	vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7679 
7680 	/* Update the VSI after updating the VSI queue-mapping
7681 	 * information
7682 	 */
7683 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7684 	if (ret) {
7685 		dev_info(&pf->pdev->dev,
7686 			 "Update vsi tc config failed, err %s aq_err %s\n",
7687 			 i40e_stat_str(hw, ret),
7688 			 i40e_aq_str(hw, hw->aq.asq_last_status));
7689 		return ret;
7690 	}
7691 	/* update the local VSI info with updated queue map */
7692 	i40e_vsi_update_queue_map(vsi, &ctxt);
7693 	vsi->info.valid_sections = 0;
7694 
7695 	/* Create channels for macvlans */
7696 	INIT_LIST_HEAD(&vsi->macvlan_list);
7697 	for (i = 0; i < macvlan_cnt; i++) {
7698 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7699 		if (!ch) {
7700 			ret = -ENOMEM;
7701 			goto err_free;
7702 		}
7703 		INIT_LIST_HEAD(&ch->list);
7704 		ch->num_queue_pairs = qcnt;
7705 		if (!i40e_setup_channel(pf, vsi, ch)) {
7706 			ret = -EINVAL;
7707 			kfree(ch);
7708 			goto err_free;
7709 		}
7710 		ch->parent_vsi = vsi;
7711 		vsi->cnt_q_avail -= ch->num_queue_pairs;
7712 		vsi->macvlan_cnt++;
7713 		list_add_tail(&ch->list, &vsi->macvlan_list);
7714 	}
7715 
7716 	return ret;
7717 
7718 err_free:
7719 	dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7720 	i40e_free_macvlan_channels(vsi);
7721 
7722 	return ret;
7723 }
7724 
7725 /**
7726  * i40e_fwd_add - configure macvlans
7727  * @netdev: net device to configure
7728  * @vdev: macvlan netdevice
7729  **/
7730 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7731 {
7732 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7733 	u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7734 	struct i40e_vsi *vsi = np->vsi;
7735 	struct i40e_pf *pf = vsi->back;
7736 	struct i40e_fwd_adapter *fwd;
7737 	int avail_macvlan, ret;
7738 
7739 	if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7740 		netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7741 		return ERR_PTR(-EINVAL);
7742 	}
7743 	if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7744 		netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7745 		return ERR_PTR(-EINVAL);
7746 	}
7747 	if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7748 		netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7749 		return ERR_PTR(-EINVAL);
7750 	}
7751 
7752 	/* The macvlan device has to be a single Q device so that the
7753 	 * tc_to_txq field can be reused to pick the tx queue.
7754 	 */
7755 	if (netif_is_multiqueue(vdev))
7756 		return ERR_PTR(-ERANGE);
7757 
7758 	if (!vsi->macvlan_cnt) {
7759 		/* reserve bit 0 for the pf device */
7760 		set_bit(0, vsi->fwd_bitmask);
7761 
7762 		/* Try to reserve as many queues as possible for macvlans. First
7763 		 * reserve 3/4th of max vectors, then half, then quarter and
7764 		 * calculate Qs per macvlan as you go
7765 		 */
7766 		vectors = pf->num_lan_msix;
7767 		if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7768 			/* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7769 			q_per_macvlan = 4;
7770 			macvlan_cnt = (vectors - 32) / 4;
7771 		} else if (vectors <= 64 && vectors > 32) {
7772 			/* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7773 			q_per_macvlan = 2;
7774 			macvlan_cnt = (vectors - 16) / 2;
7775 		} else if (vectors <= 32 && vectors > 16) {
7776 			/* allocate 1 Q per macvlan and 16 Qs to the PF*/
7777 			q_per_macvlan = 1;
7778 			macvlan_cnt = vectors - 16;
7779 		} else if (vectors <= 16 && vectors > 8) {
7780 			/* allocate 1 Q per macvlan and 8 Qs to the PF */
7781 			q_per_macvlan = 1;
7782 			macvlan_cnt = vectors - 8;
7783 		} else {
7784 			/* allocate 1 Q per macvlan and 1 Q to the PF */
7785 			q_per_macvlan = 1;
7786 			macvlan_cnt = vectors - 1;
7787 		}
7788 
7789 		if (macvlan_cnt == 0)
7790 			return ERR_PTR(-EBUSY);
7791 
7792 		/* Quiesce VSI queues */
7793 		i40e_quiesce_vsi(vsi);
7794 
7795 		/* sets up the macvlans but does not "enable" them */
7796 		ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7797 					  vdev);
7798 		if (ret)
7799 			return ERR_PTR(ret);
7800 
7801 		/* Unquiesce VSI */
7802 		i40e_unquiesce_vsi(vsi);
7803 	}
7804 	avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7805 					    vsi->macvlan_cnt);
7806 	if (avail_macvlan >= I40E_MAX_MACVLANS)
7807 		return ERR_PTR(-EBUSY);
7808 
7809 	/* create the fwd struct */
7810 	fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7811 	if (!fwd)
7812 		return ERR_PTR(-ENOMEM);
7813 
7814 	set_bit(avail_macvlan, vsi->fwd_bitmask);
7815 	fwd->bit_no = avail_macvlan;
7816 	netdev_set_sb_channel(vdev, avail_macvlan);
7817 	fwd->netdev = vdev;
7818 
7819 	if (!netif_running(netdev))
7820 		return fwd;
7821 
7822 	/* Set fwd ring up */
7823 	ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7824 	if (ret) {
7825 		/* unbind the queues and drop the subordinate channel config */
7826 		netdev_unbind_sb_channel(netdev, vdev);
7827 		netdev_set_sb_channel(vdev, 0);
7828 
7829 		kfree(fwd);
7830 		return ERR_PTR(-EINVAL);
7831 	}
7832 
7833 	return fwd;
7834 }
7835 
7836 /**
7837  * i40e_del_all_macvlans - Delete all the mac filters on the channels
7838  * @vsi: the VSI we want to access
7839  */
7840 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7841 {
7842 	struct i40e_channel *ch, *ch_tmp;
7843 	struct i40e_pf *pf = vsi->back;
7844 	struct i40e_hw *hw = &pf->hw;
7845 	int aq_err, ret = 0;
7846 
7847 	if (list_empty(&vsi->macvlan_list))
7848 		return;
7849 
7850 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7851 		if (i40e_is_channel_macvlan(ch)) {
7852 			ret = i40e_del_macvlan_filter(hw, ch->seid,
7853 						      i40e_channel_mac(ch),
7854 						      &aq_err);
7855 			if (!ret) {
7856 				/* Reset queue contexts */
7857 				i40e_reset_ch_rings(vsi, ch);
7858 				clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7859 				netdev_unbind_sb_channel(vsi->netdev,
7860 							 ch->fwd->netdev);
7861 				netdev_set_sb_channel(ch->fwd->netdev, 0);
7862 				kfree(ch->fwd);
7863 				ch->fwd = NULL;
7864 			}
7865 		}
7866 	}
7867 }
7868 
7869 /**
7870  * i40e_fwd_del - delete macvlan interfaces
7871  * @netdev: net device to configure
7872  * @vdev: macvlan netdevice
7873  */
7874 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7875 {
7876 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7877 	struct i40e_fwd_adapter *fwd = vdev;
7878 	struct i40e_channel *ch, *ch_tmp;
7879 	struct i40e_vsi *vsi = np->vsi;
7880 	struct i40e_pf *pf = vsi->back;
7881 	struct i40e_hw *hw = &pf->hw;
7882 	int aq_err, ret = 0;
7883 
7884 	/* Find the channel associated with the macvlan and del mac filter */
7885 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7886 		if (i40e_is_channel_macvlan(ch) &&
7887 		    ether_addr_equal(i40e_channel_mac(ch),
7888 				     fwd->netdev->dev_addr)) {
7889 			ret = i40e_del_macvlan_filter(hw, ch->seid,
7890 						      i40e_channel_mac(ch),
7891 						      &aq_err);
7892 			if (!ret) {
7893 				/* Reset queue contexts */
7894 				i40e_reset_ch_rings(vsi, ch);
7895 				clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7896 				netdev_unbind_sb_channel(netdev, fwd->netdev);
7897 				netdev_set_sb_channel(fwd->netdev, 0);
7898 				kfree(ch->fwd);
7899 				ch->fwd = NULL;
7900 			} else {
7901 				dev_info(&pf->pdev->dev,
7902 					 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7903 					  i40e_stat_str(hw, ret),
7904 					  i40e_aq_str(hw, aq_err));
7905 			}
7906 			break;
7907 		}
7908 	}
7909 }
7910 
7911 /**
7912  * i40e_setup_tc - configure multiple traffic classes
7913  * @netdev: net device to configure
7914  * @type_data: tc offload data
7915  **/
7916 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7917 {
7918 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7919 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7920 	struct i40e_vsi *vsi = np->vsi;
7921 	struct i40e_pf *pf = vsi->back;
7922 	u8 enabled_tc = 0, num_tc, hw;
7923 	bool need_reset = false;
7924 	int old_queue_pairs;
7925 	int ret = -EINVAL;
7926 	u16 mode;
7927 	int i;
7928 
7929 	old_queue_pairs = vsi->num_queue_pairs;
7930 	num_tc = mqprio_qopt->qopt.num_tc;
7931 	hw = mqprio_qopt->qopt.hw;
7932 	mode = mqprio_qopt->mode;
7933 	if (!hw) {
7934 		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7935 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7936 		goto config_tc;
7937 	}
7938 
7939 	/* Check if MFP enabled */
7940 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7941 		netdev_info(netdev,
7942 			    "Configuring TC not supported in MFP mode\n");
7943 		return ret;
7944 	}
7945 	switch (mode) {
7946 	case TC_MQPRIO_MODE_DCB:
7947 		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7948 
7949 		/* Check if DCB enabled to continue */
7950 		if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7951 			netdev_info(netdev,
7952 				    "DCB is not enabled for adapter\n");
7953 			return ret;
7954 		}
7955 
7956 		/* Check whether tc count is within enabled limit */
7957 		if (num_tc > i40e_pf_get_num_tc(pf)) {
7958 			netdev_info(netdev,
7959 				    "TC count greater than enabled on link for adapter\n");
7960 			return ret;
7961 		}
7962 		break;
7963 	case TC_MQPRIO_MODE_CHANNEL:
7964 		if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7965 			netdev_info(netdev,
7966 				    "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7967 			return ret;
7968 		}
7969 		if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7970 			return ret;
7971 		ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7972 		if (ret)
7973 			return ret;
7974 		memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7975 		       sizeof(*mqprio_qopt));
7976 		pf->flags |= I40E_FLAG_TC_MQPRIO;
7977 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7978 		break;
7979 	default:
7980 		return -EINVAL;
7981 	}
7982 
7983 config_tc:
7984 	/* Generate TC map for number of tc requested */
7985 	for (i = 0; i < num_tc; i++)
7986 		enabled_tc |= BIT(i);
7987 
7988 	/* Requesting same TC configuration as already enabled */
7989 	if (enabled_tc == vsi->tc_config.enabled_tc &&
7990 	    mode != TC_MQPRIO_MODE_CHANNEL)
7991 		return 0;
7992 
7993 	/* Quiesce VSI queues */
7994 	i40e_quiesce_vsi(vsi);
7995 
7996 	if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7997 		i40e_remove_queue_channels(vsi);
7998 
7999 	/* Configure VSI for enabled TCs */
8000 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
8001 	if (ret) {
8002 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8003 			    vsi->seid);
8004 		need_reset = true;
8005 		goto exit;
8006 	} else if (enabled_tc &&
8007 		   (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8008 		netdev_info(netdev,
8009 			    "Failed to create channel. Override queues (%u) not power of 2\n",
8010 			    vsi->tc_config.tc_info[0].qcount);
8011 		ret = -EINVAL;
8012 		need_reset = true;
8013 		goto exit;
8014 	}
8015 
8016 	dev_info(&vsi->back->pdev->dev,
8017 		 "Setup channel (id:%u) utilizing num_queues %d\n",
8018 		 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8019 
8020 	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
8021 		if (vsi->mqprio_qopt.max_rate[0]) {
8022 			u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8023 
8024 			do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
8025 			ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8026 			if (!ret) {
8027 				u64 credits = max_tx_rate;
8028 
8029 				do_div(credits, I40E_BW_CREDIT_DIVISOR);
8030 				dev_dbg(&vsi->back->pdev->dev,
8031 					"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8032 					max_tx_rate,
8033 					credits,
8034 					vsi->seid);
8035 			} else {
8036 				need_reset = true;
8037 				goto exit;
8038 			}
8039 		}
8040 		ret = i40e_configure_queue_channels(vsi);
8041 		if (ret) {
8042 			vsi->num_queue_pairs = old_queue_pairs;
8043 			netdev_info(netdev,
8044 				    "Failed configuring queue channels\n");
8045 			need_reset = true;
8046 			goto exit;
8047 		}
8048 	}
8049 
8050 exit:
8051 	/* Reset the configuration data to defaults, only TC0 is enabled */
8052 	if (need_reset) {
8053 		i40e_vsi_set_default_tc_config(vsi);
8054 		need_reset = false;
8055 	}
8056 
8057 	/* Unquiesce VSI */
8058 	i40e_unquiesce_vsi(vsi);
8059 	return ret;
8060 }
8061 
8062 /**
8063  * i40e_set_cld_element - sets cloud filter element data
8064  * @filter: cloud filter rule
8065  * @cld: ptr to cloud filter element data
8066  *
8067  * This is helper function to copy data into cloud filter element
8068  **/
8069 static inline void
8070 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8071 		     struct i40e_aqc_cloud_filters_element_data *cld)
8072 {
8073 	u32 ipa;
8074 	int i;
8075 
8076 	memset(cld, 0, sizeof(*cld));
8077 	ether_addr_copy(cld->outer_mac, filter->dst_mac);
8078 	ether_addr_copy(cld->inner_mac, filter->src_mac);
8079 
8080 	if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8081 		return;
8082 
8083 	if (filter->n_proto == ETH_P_IPV6) {
8084 #define IPV6_MAX_INDEX	(ARRAY_SIZE(filter->dst_ipv6) - 1)
8085 		for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8086 			ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8087 
8088 			*(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8089 		}
8090 	} else {
8091 		ipa = be32_to_cpu(filter->dst_ipv4);
8092 
8093 		memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8094 	}
8095 
8096 	cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8097 
8098 	/* tenant_id is not supported by FW now, once the support is enabled
8099 	 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8100 	 */
8101 	if (filter->tenant_id)
8102 		return;
8103 }
8104 
8105 /**
8106  * i40e_add_del_cloud_filter - Add/del cloud filter
8107  * @vsi: pointer to VSI
8108  * @filter: cloud filter rule
8109  * @add: if true, add, if false, delete
8110  *
8111  * Add or delete a cloud filter for a specific flow spec.
8112  * Returns 0 if the filter were successfully added.
8113  **/
8114 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8115 			      struct i40e_cloud_filter *filter, bool add)
8116 {
8117 	struct i40e_aqc_cloud_filters_element_data cld_filter;
8118 	struct i40e_pf *pf = vsi->back;
8119 	int ret;
8120 	static const u16 flag_table[128] = {
8121 		[I40E_CLOUD_FILTER_FLAGS_OMAC]  =
8122 			I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8123 		[I40E_CLOUD_FILTER_FLAGS_IMAC]  =
8124 			I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8125 		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
8126 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8127 		[I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8128 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8129 		[I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8130 			I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8131 		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8132 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8133 		[I40E_CLOUD_FILTER_FLAGS_IIP] =
8134 			I40E_AQC_ADD_CLOUD_FILTER_IIP,
8135 	};
8136 
8137 	if (filter->flags >= ARRAY_SIZE(flag_table))
8138 		return I40E_ERR_CONFIG;
8139 
8140 	memset(&cld_filter, 0, sizeof(cld_filter));
8141 
8142 	/* copy element needed to add cloud filter from filter */
8143 	i40e_set_cld_element(filter, &cld_filter);
8144 
8145 	if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8146 		cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8147 					     I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8148 
8149 	if (filter->n_proto == ETH_P_IPV6)
8150 		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8151 						I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8152 	else
8153 		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8154 						I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8155 
8156 	if (add)
8157 		ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8158 						&cld_filter, 1);
8159 	else
8160 		ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8161 						&cld_filter, 1);
8162 	if (ret)
8163 		dev_dbg(&pf->pdev->dev,
8164 			"Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8165 			add ? "add" : "delete", filter->dst_port, ret,
8166 			pf->hw.aq.asq_last_status);
8167 	else
8168 		dev_info(&pf->pdev->dev,
8169 			 "%s cloud filter for VSI: %d\n",
8170 			 add ? "Added" : "Deleted", filter->seid);
8171 	return ret;
8172 }
8173 
8174 /**
8175  * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8176  * @vsi: pointer to VSI
8177  * @filter: cloud filter rule
8178  * @add: if true, add, if false, delete
8179  *
8180  * Add or delete a cloud filter for a specific flow spec using big buffer.
8181  * Returns 0 if the filter were successfully added.
8182  **/
8183 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8184 				      struct i40e_cloud_filter *filter,
8185 				      bool add)
8186 {
8187 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
8188 	struct i40e_pf *pf = vsi->back;
8189 	int ret;
8190 
8191 	/* Both (src/dst) valid mac_addr are not supported */
8192 	if ((is_valid_ether_addr(filter->dst_mac) &&
8193 	     is_valid_ether_addr(filter->src_mac)) ||
8194 	    (is_multicast_ether_addr(filter->dst_mac) &&
8195 	     is_multicast_ether_addr(filter->src_mac)))
8196 		return -EOPNOTSUPP;
8197 
8198 	/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8199 	 * ports are not supported via big buffer now.
8200 	 */
8201 	if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8202 		return -EOPNOTSUPP;
8203 
8204 	/* adding filter using src_port/src_ip is not supported at this stage */
8205 	if (filter->src_port ||
8206 	    (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8207 	    !ipv6_addr_any(&filter->ip.v6.src_ip6))
8208 		return -EOPNOTSUPP;
8209 
8210 	memset(&cld_filter, 0, sizeof(cld_filter));
8211 
8212 	/* copy element needed to add cloud filter from filter */
8213 	i40e_set_cld_element(filter, &cld_filter.element);
8214 
8215 	if (is_valid_ether_addr(filter->dst_mac) ||
8216 	    is_valid_ether_addr(filter->src_mac) ||
8217 	    is_multicast_ether_addr(filter->dst_mac) ||
8218 	    is_multicast_ether_addr(filter->src_mac)) {
8219 		/* MAC + IP : unsupported mode */
8220 		if (filter->dst_ipv4)
8221 			return -EOPNOTSUPP;
8222 
8223 		/* since we validated that L4 port must be valid before
8224 		 * we get here, start with respective "flags" value
8225 		 * and update if vlan is present or not
8226 		 */
8227 		cld_filter.element.flags =
8228 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8229 
8230 		if (filter->vlan_id) {
8231 			cld_filter.element.flags =
8232 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8233 		}
8234 
8235 	} else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8236 		   !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8237 		cld_filter.element.flags =
8238 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8239 		if (filter->n_proto == ETH_P_IPV6)
8240 			cld_filter.element.flags |=
8241 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8242 		else
8243 			cld_filter.element.flags |=
8244 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8245 	} else {
8246 		dev_err(&pf->pdev->dev,
8247 			"either mac or ip has to be valid for cloud filter\n");
8248 		return -EINVAL;
8249 	}
8250 
8251 	/* Now copy L4 port in Byte 6..7 in general fields */
8252 	cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8253 						be16_to_cpu(filter->dst_port);
8254 
8255 	if (add) {
8256 		/* Validate current device switch mode, change if necessary */
8257 		ret = i40e_validate_and_set_switch_mode(vsi);
8258 		if (ret) {
8259 			dev_err(&pf->pdev->dev,
8260 				"failed to set switch mode, ret %d\n",
8261 				ret);
8262 			return ret;
8263 		}
8264 
8265 		ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8266 						   &cld_filter, 1);
8267 	} else {
8268 		ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8269 						   &cld_filter, 1);
8270 	}
8271 
8272 	if (ret)
8273 		dev_dbg(&pf->pdev->dev,
8274 			"Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8275 			add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8276 	else
8277 		dev_info(&pf->pdev->dev,
8278 			 "%s cloud filter for VSI: %d, L4 port: %d\n",
8279 			 add ? "add" : "delete", filter->seid,
8280 			 ntohs(filter->dst_port));
8281 	return ret;
8282 }
8283 
8284 /**
8285  * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8286  * @vsi: Pointer to VSI
8287  * @f: Pointer to struct flow_cls_offload
8288  * @filter: Pointer to cloud filter structure
8289  *
8290  **/
8291 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8292 				 struct flow_cls_offload *f,
8293 				 struct i40e_cloud_filter *filter)
8294 {
8295 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8296 	struct flow_dissector *dissector = rule->match.dissector;
8297 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8298 	struct i40e_pf *pf = vsi->back;
8299 	u8 field_flags = 0;
8300 
8301 	if (dissector->used_keys &
8302 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8303 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
8304 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8305 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
8306 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8307 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8308 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
8309 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8310 		dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8311 			dissector->used_keys);
8312 		return -EOPNOTSUPP;
8313 	}
8314 
8315 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8316 		struct flow_match_enc_keyid match;
8317 
8318 		flow_rule_match_enc_keyid(rule, &match);
8319 		if (match.mask->keyid != 0)
8320 			field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8321 
8322 		filter->tenant_id = be32_to_cpu(match.key->keyid);
8323 	}
8324 
8325 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8326 		struct flow_match_basic match;
8327 
8328 		flow_rule_match_basic(rule, &match);
8329 		n_proto_key = ntohs(match.key->n_proto);
8330 		n_proto_mask = ntohs(match.mask->n_proto);
8331 
8332 		if (n_proto_key == ETH_P_ALL) {
8333 			n_proto_key = 0;
8334 			n_proto_mask = 0;
8335 		}
8336 		filter->n_proto = n_proto_key & n_proto_mask;
8337 		filter->ip_proto = match.key->ip_proto;
8338 	}
8339 
8340 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8341 		struct flow_match_eth_addrs match;
8342 
8343 		flow_rule_match_eth_addrs(rule, &match);
8344 
8345 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
8346 		if (!is_zero_ether_addr(match.mask->dst)) {
8347 			if (is_broadcast_ether_addr(match.mask->dst)) {
8348 				field_flags |= I40E_CLOUD_FIELD_OMAC;
8349 			} else {
8350 				dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8351 					match.mask->dst);
8352 				return I40E_ERR_CONFIG;
8353 			}
8354 		}
8355 
8356 		if (!is_zero_ether_addr(match.mask->src)) {
8357 			if (is_broadcast_ether_addr(match.mask->src)) {
8358 				field_flags |= I40E_CLOUD_FIELD_IMAC;
8359 			} else {
8360 				dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8361 					match.mask->src);
8362 				return I40E_ERR_CONFIG;
8363 			}
8364 		}
8365 		ether_addr_copy(filter->dst_mac, match.key->dst);
8366 		ether_addr_copy(filter->src_mac, match.key->src);
8367 	}
8368 
8369 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8370 		struct flow_match_vlan match;
8371 
8372 		flow_rule_match_vlan(rule, &match);
8373 		if (match.mask->vlan_id) {
8374 			if (match.mask->vlan_id == VLAN_VID_MASK) {
8375 				field_flags |= I40E_CLOUD_FIELD_IVLAN;
8376 
8377 			} else {
8378 				dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8379 					match.mask->vlan_id);
8380 				return I40E_ERR_CONFIG;
8381 			}
8382 		}
8383 
8384 		filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8385 	}
8386 
8387 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8388 		struct flow_match_control match;
8389 
8390 		flow_rule_match_control(rule, &match);
8391 		addr_type = match.key->addr_type;
8392 	}
8393 
8394 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8395 		struct flow_match_ipv4_addrs match;
8396 
8397 		flow_rule_match_ipv4_addrs(rule, &match);
8398 		if (match.mask->dst) {
8399 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8400 				field_flags |= I40E_CLOUD_FIELD_IIP;
8401 			} else {
8402 				dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8403 					&match.mask->dst);
8404 				return I40E_ERR_CONFIG;
8405 			}
8406 		}
8407 
8408 		if (match.mask->src) {
8409 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
8410 				field_flags |= I40E_CLOUD_FIELD_IIP;
8411 			} else {
8412 				dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8413 					&match.mask->src);
8414 				return I40E_ERR_CONFIG;
8415 			}
8416 		}
8417 
8418 		if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8419 			dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8420 			return I40E_ERR_CONFIG;
8421 		}
8422 		filter->dst_ipv4 = match.key->dst;
8423 		filter->src_ipv4 = match.key->src;
8424 	}
8425 
8426 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8427 		struct flow_match_ipv6_addrs match;
8428 
8429 		flow_rule_match_ipv6_addrs(rule, &match);
8430 
8431 		/* src and dest IPV6 address should not be LOOPBACK
8432 		 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8433 		 */
8434 		if (ipv6_addr_loopback(&match.key->dst) ||
8435 		    ipv6_addr_loopback(&match.key->src)) {
8436 			dev_err(&pf->pdev->dev,
8437 				"Bad ipv6, addr is LOOPBACK\n");
8438 			return I40E_ERR_CONFIG;
8439 		}
8440 		if (!ipv6_addr_any(&match.mask->dst) ||
8441 		    !ipv6_addr_any(&match.mask->src))
8442 			field_flags |= I40E_CLOUD_FIELD_IIP;
8443 
8444 		memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8445 		       sizeof(filter->src_ipv6));
8446 		memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8447 		       sizeof(filter->dst_ipv6));
8448 	}
8449 
8450 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8451 		struct flow_match_ports match;
8452 
8453 		flow_rule_match_ports(rule, &match);
8454 		if (match.mask->src) {
8455 			if (match.mask->src == cpu_to_be16(0xffff)) {
8456 				field_flags |= I40E_CLOUD_FIELD_IIP;
8457 			} else {
8458 				dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8459 					be16_to_cpu(match.mask->src));
8460 				return I40E_ERR_CONFIG;
8461 			}
8462 		}
8463 
8464 		if (match.mask->dst) {
8465 			if (match.mask->dst == cpu_to_be16(0xffff)) {
8466 				field_flags |= I40E_CLOUD_FIELD_IIP;
8467 			} else {
8468 				dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8469 					be16_to_cpu(match.mask->dst));
8470 				return I40E_ERR_CONFIG;
8471 			}
8472 		}
8473 
8474 		filter->dst_port = match.key->dst;
8475 		filter->src_port = match.key->src;
8476 
8477 		switch (filter->ip_proto) {
8478 		case IPPROTO_TCP:
8479 		case IPPROTO_UDP:
8480 			break;
8481 		default:
8482 			dev_err(&pf->pdev->dev,
8483 				"Only UDP and TCP transport are supported\n");
8484 			return -EINVAL;
8485 		}
8486 	}
8487 	filter->flags = field_flags;
8488 	return 0;
8489 }
8490 
8491 /**
8492  * i40e_handle_tclass: Forward to a traffic class on the device
8493  * @vsi: Pointer to VSI
8494  * @tc: traffic class index on the device
8495  * @filter: Pointer to cloud filter structure
8496  *
8497  **/
8498 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8499 			      struct i40e_cloud_filter *filter)
8500 {
8501 	struct i40e_channel *ch, *ch_tmp;
8502 
8503 	/* direct to a traffic class on the same device */
8504 	if (tc == 0) {
8505 		filter->seid = vsi->seid;
8506 		return 0;
8507 	} else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8508 		if (!filter->dst_port) {
8509 			dev_err(&vsi->back->pdev->dev,
8510 				"Specify destination port to direct to traffic class that is not default\n");
8511 			return -EINVAL;
8512 		}
8513 		if (list_empty(&vsi->ch_list))
8514 			return -EINVAL;
8515 		list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8516 					 list) {
8517 			if (ch->seid == vsi->tc_seid_map[tc])
8518 				filter->seid = ch->seid;
8519 		}
8520 		return 0;
8521 	}
8522 	dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8523 	return -EINVAL;
8524 }
8525 
8526 /**
8527  * i40e_configure_clsflower - Configure tc flower filters
8528  * @vsi: Pointer to VSI
8529  * @cls_flower: Pointer to struct flow_cls_offload
8530  *
8531  **/
8532 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8533 				    struct flow_cls_offload *cls_flower)
8534 {
8535 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8536 	struct i40e_cloud_filter *filter = NULL;
8537 	struct i40e_pf *pf = vsi->back;
8538 	int err = 0;
8539 
8540 	if (tc < 0) {
8541 		dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8542 		return -EOPNOTSUPP;
8543 	}
8544 
8545 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8546 	    test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8547 		return -EBUSY;
8548 
8549 	if (pf->fdir_pf_active_filters ||
8550 	    (!hlist_empty(&pf->fdir_filter_list))) {
8551 		dev_err(&vsi->back->pdev->dev,
8552 			"Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8553 		return -EINVAL;
8554 	}
8555 
8556 	if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8557 		dev_err(&vsi->back->pdev->dev,
8558 			"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8559 		vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8560 		vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8561 	}
8562 
8563 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8564 	if (!filter)
8565 		return -ENOMEM;
8566 
8567 	filter->cookie = cls_flower->cookie;
8568 
8569 	err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8570 	if (err < 0)
8571 		goto err;
8572 
8573 	err = i40e_handle_tclass(vsi, tc, filter);
8574 	if (err < 0)
8575 		goto err;
8576 
8577 	/* Add cloud filter */
8578 	if (filter->dst_port)
8579 		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8580 	else
8581 		err = i40e_add_del_cloud_filter(vsi, filter, true);
8582 
8583 	if (err) {
8584 		dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8585 			err);
8586 		goto err;
8587 	}
8588 
8589 	/* add filter to the ordered list */
8590 	INIT_HLIST_NODE(&filter->cloud_node);
8591 
8592 	hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8593 
8594 	pf->num_cloud_filters++;
8595 
8596 	return err;
8597 err:
8598 	kfree(filter);
8599 	return err;
8600 }
8601 
8602 /**
8603  * i40e_find_cloud_filter - Find the could filter in the list
8604  * @vsi: Pointer to VSI
8605  * @cookie: filter specific cookie
8606  *
8607  **/
8608 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8609 							unsigned long *cookie)
8610 {
8611 	struct i40e_cloud_filter *filter = NULL;
8612 	struct hlist_node *node2;
8613 
8614 	hlist_for_each_entry_safe(filter, node2,
8615 				  &vsi->back->cloud_filter_list, cloud_node)
8616 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8617 			return filter;
8618 	return NULL;
8619 }
8620 
8621 /**
8622  * i40e_delete_clsflower - Remove tc flower filters
8623  * @vsi: Pointer to VSI
8624  * @cls_flower: Pointer to struct flow_cls_offload
8625  *
8626  **/
8627 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8628 				 struct flow_cls_offload *cls_flower)
8629 {
8630 	struct i40e_cloud_filter *filter = NULL;
8631 	struct i40e_pf *pf = vsi->back;
8632 	int err = 0;
8633 
8634 	filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8635 
8636 	if (!filter)
8637 		return -EINVAL;
8638 
8639 	hash_del(&filter->cloud_node);
8640 
8641 	if (filter->dst_port)
8642 		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8643 	else
8644 		err = i40e_add_del_cloud_filter(vsi, filter, false);
8645 
8646 	kfree(filter);
8647 	if (err) {
8648 		dev_err(&pf->pdev->dev,
8649 			"Failed to delete cloud filter, err %s\n",
8650 			i40e_stat_str(&pf->hw, err));
8651 		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8652 	}
8653 
8654 	pf->num_cloud_filters--;
8655 	if (!pf->num_cloud_filters)
8656 		if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8657 		    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8658 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8659 			pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8660 			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8661 		}
8662 	return 0;
8663 }
8664 
8665 /**
8666  * i40e_setup_tc_cls_flower - flower classifier offloads
8667  * @np: net device to configure
8668  * @cls_flower: offload data
8669  **/
8670 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8671 				    struct flow_cls_offload *cls_flower)
8672 {
8673 	struct i40e_vsi *vsi = np->vsi;
8674 
8675 	switch (cls_flower->command) {
8676 	case FLOW_CLS_REPLACE:
8677 		return i40e_configure_clsflower(vsi, cls_flower);
8678 	case FLOW_CLS_DESTROY:
8679 		return i40e_delete_clsflower(vsi, cls_flower);
8680 	case FLOW_CLS_STATS:
8681 		return -EOPNOTSUPP;
8682 	default:
8683 		return -EOPNOTSUPP;
8684 	}
8685 }
8686 
8687 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8688 				  void *cb_priv)
8689 {
8690 	struct i40e_netdev_priv *np = cb_priv;
8691 
8692 	if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8693 		return -EOPNOTSUPP;
8694 
8695 	switch (type) {
8696 	case TC_SETUP_CLSFLOWER:
8697 		return i40e_setup_tc_cls_flower(np, type_data);
8698 
8699 	default:
8700 		return -EOPNOTSUPP;
8701 	}
8702 }
8703 
8704 static LIST_HEAD(i40e_block_cb_list);
8705 
8706 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8707 			   void *type_data)
8708 {
8709 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8710 
8711 	switch (type) {
8712 	case TC_SETUP_QDISC_MQPRIO:
8713 		return i40e_setup_tc(netdev, type_data);
8714 	case TC_SETUP_BLOCK:
8715 		return flow_block_cb_setup_simple(type_data,
8716 						  &i40e_block_cb_list,
8717 						  i40e_setup_tc_block_cb,
8718 						  np, np, true);
8719 	default:
8720 		return -EOPNOTSUPP;
8721 	}
8722 }
8723 
8724 /**
8725  * i40e_open - Called when a network interface is made active
8726  * @netdev: network interface device structure
8727  *
8728  * The open entry point is called when a network interface is made
8729  * active by the system (IFF_UP).  At this point all resources needed
8730  * for transmit and receive operations are allocated, the interrupt
8731  * handler is registered with the OS, the netdev watchdog subtask is
8732  * enabled, and the stack is notified that the interface is ready.
8733  *
8734  * Returns 0 on success, negative value on failure
8735  **/
8736 int i40e_open(struct net_device *netdev)
8737 {
8738 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8739 	struct i40e_vsi *vsi = np->vsi;
8740 	struct i40e_pf *pf = vsi->back;
8741 	int err;
8742 
8743 	/* disallow open during test or if eeprom is broken */
8744 	if (test_bit(__I40E_TESTING, pf->state) ||
8745 	    test_bit(__I40E_BAD_EEPROM, pf->state))
8746 		return -EBUSY;
8747 
8748 	netif_carrier_off(netdev);
8749 
8750 	if (i40e_force_link_state(pf, true))
8751 		return -EAGAIN;
8752 
8753 	err = i40e_vsi_open(vsi);
8754 	if (err)
8755 		return err;
8756 
8757 	/* configure global TSO hardware offload settings */
8758 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8759 						       TCP_FLAG_FIN) >> 16);
8760 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8761 						       TCP_FLAG_FIN |
8762 						       TCP_FLAG_CWR) >> 16);
8763 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8764 	udp_tunnel_get_rx_info(netdev);
8765 
8766 	return 0;
8767 }
8768 
8769 /**
8770  * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
8771  * @vsi: vsi structure
8772  *
8773  * This updates netdev's number of tx/rx queues
8774  *
8775  * Returns status of setting tx/rx queues
8776  **/
8777 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
8778 {
8779 	int ret;
8780 
8781 	ret = netif_set_real_num_rx_queues(vsi->netdev,
8782 					   vsi->num_queue_pairs);
8783 	if (ret)
8784 		return ret;
8785 
8786 	return netif_set_real_num_tx_queues(vsi->netdev,
8787 					    vsi->num_queue_pairs);
8788 }
8789 
8790 /**
8791  * i40e_vsi_open -
8792  * @vsi: the VSI to open
8793  *
8794  * Finish initialization of the VSI.
8795  *
8796  * Returns 0 on success, negative value on failure
8797  *
8798  * Note: expects to be called while under rtnl_lock()
8799  **/
8800 int i40e_vsi_open(struct i40e_vsi *vsi)
8801 {
8802 	struct i40e_pf *pf = vsi->back;
8803 	char int_name[I40E_INT_NAME_STR_LEN];
8804 	int err;
8805 
8806 	/* allocate descriptors */
8807 	err = i40e_vsi_setup_tx_resources(vsi);
8808 	if (err)
8809 		goto err_setup_tx;
8810 	err = i40e_vsi_setup_rx_resources(vsi);
8811 	if (err)
8812 		goto err_setup_rx;
8813 
8814 	err = i40e_vsi_configure(vsi);
8815 	if (err)
8816 		goto err_setup_rx;
8817 
8818 	if (vsi->netdev) {
8819 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8820 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8821 		err = i40e_vsi_request_irq(vsi, int_name);
8822 		if (err)
8823 			goto err_setup_rx;
8824 
8825 		/* Notify the stack of the actual queue counts. */
8826 		err = i40e_netif_set_realnum_tx_rx_queues(vsi);
8827 		if (err)
8828 			goto err_set_queues;
8829 
8830 	} else if (vsi->type == I40E_VSI_FDIR) {
8831 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8832 			 dev_driver_string(&pf->pdev->dev),
8833 			 dev_name(&pf->pdev->dev));
8834 		err = i40e_vsi_request_irq(vsi, int_name);
8835 		if (err)
8836 			goto err_setup_rx;
8837 
8838 	} else {
8839 		err = -EINVAL;
8840 		goto err_setup_rx;
8841 	}
8842 
8843 	err = i40e_up_complete(vsi);
8844 	if (err)
8845 		goto err_up_complete;
8846 
8847 	return 0;
8848 
8849 err_up_complete:
8850 	i40e_down(vsi);
8851 err_set_queues:
8852 	i40e_vsi_free_irq(vsi);
8853 err_setup_rx:
8854 	i40e_vsi_free_rx_resources(vsi);
8855 err_setup_tx:
8856 	i40e_vsi_free_tx_resources(vsi);
8857 	if (vsi == pf->vsi[pf->lan_vsi])
8858 		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8859 
8860 	return err;
8861 }
8862 
8863 /**
8864  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8865  * @pf: Pointer to PF
8866  *
8867  * This function destroys the hlist where all the Flow Director
8868  * filters were saved.
8869  **/
8870 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8871 {
8872 	struct i40e_fdir_filter *filter;
8873 	struct i40e_flex_pit *pit_entry, *tmp;
8874 	struct hlist_node *node2;
8875 
8876 	hlist_for_each_entry_safe(filter, node2,
8877 				  &pf->fdir_filter_list, fdir_node) {
8878 		hlist_del(&filter->fdir_node);
8879 		kfree(filter);
8880 	}
8881 
8882 	list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8883 		list_del(&pit_entry->list);
8884 		kfree(pit_entry);
8885 	}
8886 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8887 
8888 	list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8889 		list_del(&pit_entry->list);
8890 		kfree(pit_entry);
8891 	}
8892 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8893 
8894 	pf->fdir_pf_active_filters = 0;
8895 	i40e_reset_fdir_filter_cnt(pf);
8896 
8897 	/* Reprogram the default input set for TCP/IPv4 */
8898 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8899 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8900 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8901 
8902 	/* Reprogram the default input set for TCP/IPv6 */
8903 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8904 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8905 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8906 
8907 	/* Reprogram the default input set for UDP/IPv4 */
8908 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8909 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8910 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8911 
8912 	/* Reprogram the default input set for UDP/IPv6 */
8913 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8914 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8915 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8916 
8917 	/* Reprogram the default input set for SCTP/IPv4 */
8918 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8919 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8920 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8921 
8922 	/* Reprogram the default input set for SCTP/IPv6 */
8923 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8924 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8925 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8926 
8927 	/* Reprogram the default input set for Other/IPv4 */
8928 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8929 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8930 
8931 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8932 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8933 
8934 	/* Reprogram the default input set for Other/IPv6 */
8935 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8936 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8937 
8938 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
8939 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8940 }
8941 
8942 /**
8943  * i40e_cloud_filter_exit - Cleans up the cloud filters
8944  * @pf: Pointer to PF
8945  *
8946  * This function destroys the hlist where all the cloud filters
8947  * were saved.
8948  **/
8949 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8950 {
8951 	struct i40e_cloud_filter *cfilter;
8952 	struct hlist_node *node;
8953 
8954 	hlist_for_each_entry_safe(cfilter, node,
8955 				  &pf->cloud_filter_list, cloud_node) {
8956 		hlist_del(&cfilter->cloud_node);
8957 		kfree(cfilter);
8958 	}
8959 	pf->num_cloud_filters = 0;
8960 
8961 	if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8962 	    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8963 		pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8964 		pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8965 		pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8966 	}
8967 }
8968 
8969 /**
8970  * i40e_close - Disables a network interface
8971  * @netdev: network interface device structure
8972  *
8973  * The close entry point is called when an interface is de-activated
8974  * by the OS.  The hardware is still under the driver's control, but
8975  * this netdev interface is disabled.
8976  *
8977  * Returns 0, this is not allowed to fail
8978  **/
8979 int i40e_close(struct net_device *netdev)
8980 {
8981 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8982 	struct i40e_vsi *vsi = np->vsi;
8983 
8984 	i40e_vsi_close(vsi);
8985 
8986 	return 0;
8987 }
8988 
8989 /**
8990  * i40e_do_reset - Start a PF or Core Reset sequence
8991  * @pf: board private structure
8992  * @reset_flags: which reset is requested
8993  * @lock_acquired: indicates whether or not the lock has been acquired
8994  * before this function was called.
8995  *
8996  * The essential difference in resets is that the PF Reset
8997  * doesn't clear the packet buffers, doesn't reset the PE
8998  * firmware, and doesn't bother the other PFs on the chip.
8999  **/
9000 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9001 {
9002 	u32 val;
9003 
9004 	/* do the biggest reset indicated */
9005 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9006 
9007 		/* Request a Global Reset
9008 		 *
9009 		 * This will start the chip's countdown to the actual full
9010 		 * chip reset event, and a warning interrupt to be sent
9011 		 * to all PFs, including the requestor.  Our handler
9012 		 * for the warning interrupt will deal with the shutdown
9013 		 * and recovery of the switch setup.
9014 		 */
9015 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9016 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9017 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9018 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9019 
9020 	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9021 
9022 		/* Request a Core Reset
9023 		 *
9024 		 * Same as Global Reset, except does *not* include the MAC/PHY
9025 		 */
9026 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9027 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9028 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
9029 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9030 		i40e_flush(&pf->hw);
9031 
9032 	} else if (reset_flags & I40E_PF_RESET_FLAG) {
9033 
9034 		/* Request a PF Reset
9035 		 *
9036 		 * Resets only the PF-specific registers
9037 		 *
9038 		 * This goes directly to the tear-down and rebuild of
9039 		 * the switch, since we need to do all the recovery as
9040 		 * for the Core Reset.
9041 		 */
9042 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
9043 		i40e_handle_reset_warning(pf, lock_acquired);
9044 
9045 	} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9046 		/* Request a PF Reset
9047 		 *
9048 		 * Resets PF and reinitializes PFs VSI.
9049 		 */
9050 		i40e_prep_for_reset(pf);
9051 		i40e_reset_and_rebuild(pf, true, lock_acquired);
9052 		dev_info(&pf->pdev->dev,
9053 			 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9054 			 "FW LLDP is disabled\n" :
9055 			 "FW LLDP is enabled\n");
9056 
9057 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9058 		int v;
9059 
9060 		/* Find the VSI(s) that requested a re-init */
9061 		dev_info(&pf->pdev->dev,
9062 			 "VSI reinit requested\n");
9063 		for (v = 0; v < pf->num_alloc_vsi; v++) {
9064 			struct i40e_vsi *vsi = pf->vsi[v];
9065 
9066 			if (vsi != NULL &&
9067 			    test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9068 					       vsi->state))
9069 				i40e_vsi_reinit_locked(pf->vsi[v]);
9070 		}
9071 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9072 		int v;
9073 
9074 		/* Find the VSI(s) that needs to be brought down */
9075 		dev_info(&pf->pdev->dev, "VSI down requested\n");
9076 		for (v = 0; v < pf->num_alloc_vsi; v++) {
9077 			struct i40e_vsi *vsi = pf->vsi[v];
9078 
9079 			if (vsi != NULL &&
9080 			    test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9081 					       vsi->state)) {
9082 				set_bit(__I40E_VSI_DOWN, vsi->state);
9083 				i40e_down(vsi);
9084 			}
9085 		}
9086 	} else {
9087 		dev_info(&pf->pdev->dev,
9088 			 "bad reset request 0x%08x\n", reset_flags);
9089 	}
9090 }
9091 
9092 #ifdef CONFIG_I40E_DCB
9093 /**
9094  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9095  * @pf: board private structure
9096  * @old_cfg: current DCB config
9097  * @new_cfg: new DCB config
9098  **/
9099 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9100 			    struct i40e_dcbx_config *old_cfg,
9101 			    struct i40e_dcbx_config *new_cfg)
9102 {
9103 	bool need_reconfig = false;
9104 
9105 	/* Check if ETS configuration has changed */
9106 	if (memcmp(&new_cfg->etscfg,
9107 		   &old_cfg->etscfg,
9108 		   sizeof(new_cfg->etscfg))) {
9109 		/* If Priority Table has changed reconfig is needed */
9110 		if (memcmp(&new_cfg->etscfg.prioritytable,
9111 			   &old_cfg->etscfg.prioritytable,
9112 			   sizeof(new_cfg->etscfg.prioritytable))) {
9113 			need_reconfig = true;
9114 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9115 		}
9116 
9117 		if (memcmp(&new_cfg->etscfg.tcbwtable,
9118 			   &old_cfg->etscfg.tcbwtable,
9119 			   sizeof(new_cfg->etscfg.tcbwtable)))
9120 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9121 
9122 		if (memcmp(&new_cfg->etscfg.tsatable,
9123 			   &old_cfg->etscfg.tsatable,
9124 			   sizeof(new_cfg->etscfg.tsatable)))
9125 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9126 	}
9127 
9128 	/* Check if PFC configuration has changed */
9129 	if (memcmp(&new_cfg->pfc,
9130 		   &old_cfg->pfc,
9131 		   sizeof(new_cfg->pfc))) {
9132 		need_reconfig = true;
9133 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9134 	}
9135 
9136 	/* Check if APP Table has changed */
9137 	if (memcmp(&new_cfg->app,
9138 		   &old_cfg->app,
9139 		   sizeof(new_cfg->app))) {
9140 		need_reconfig = true;
9141 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9142 	}
9143 
9144 	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9145 	return need_reconfig;
9146 }
9147 
9148 /**
9149  * i40e_handle_lldp_event - Handle LLDP Change MIB event
9150  * @pf: board private structure
9151  * @e: event info posted on ARQ
9152  **/
9153 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9154 				  struct i40e_arq_event_info *e)
9155 {
9156 	struct i40e_aqc_lldp_get_mib *mib =
9157 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9158 	struct i40e_hw *hw = &pf->hw;
9159 	struct i40e_dcbx_config tmp_dcbx_cfg;
9160 	bool need_reconfig = false;
9161 	int ret = 0;
9162 	u8 type;
9163 
9164 	/* X710-T*L 2.5G and 5G speeds don't support DCB */
9165 	if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9166 	    (hw->phy.link_info.link_speed &
9167 	     ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9168 	     !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9169 		/* let firmware decide if the DCB should be disabled */
9170 		pf->flags |= I40E_FLAG_DCB_CAPABLE;
9171 
9172 	/* Not DCB capable or capability disabled */
9173 	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9174 		return ret;
9175 
9176 	/* Ignore if event is not for Nearest Bridge */
9177 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9178 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9179 	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9180 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9181 		return ret;
9182 
9183 	/* Check MIB Type and return if event for Remote MIB update */
9184 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9185 	dev_dbg(&pf->pdev->dev,
9186 		"LLDP event mib type %s\n", type ? "remote" : "local");
9187 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9188 		/* Update the remote cached instance and return */
9189 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9190 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9191 				&hw->remote_dcbx_config);
9192 		goto exit;
9193 	}
9194 
9195 	/* Store the old configuration */
9196 	tmp_dcbx_cfg = hw->local_dcbx_config;
9197 
9198 	/* Reset the old DCBx configuration data */
9199 	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9200 	/* Get updated DCBX data from firmware */
9201 	ret = i40e_get_dcb_config(&pf->hw);
9202 	if (ret) {
9203 		/* X710-T*L 2.5G and 5G speeds don't support DCB */
9204 		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9205 		    (hw->phy.link_info.link_speed &
9206 		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9207 			dev_warn(&pf->pdev->dev,
9208 				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9209 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9210 		} else {
9211 			dev_info(&pf->pdev->dev,
9212 				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9213 				 i40e_stat_str(&pf->hw, ret),
9214 				 i40e_aq_str(&pf->hw,
9215 					     pf->hw.aq.asq_last_status));
9216 		}
9217 		goto exit;
9218 	}
9219 
9220 	/* No change detected in DCBX configs */
9221 	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9222 		    sizeof(tmp_dcbx_cfg))) {
9223 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9224 		goto exit;
9225 	}
9226 
9227 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9228 					       &hw->local_dcbx_config);
9229 
9230 	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9231 
9232 	if (!need_reconfig)
9233 		goto exit;
9234 
9235 	/* Enable DCB tagging only when more than one TC */
9236 	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9237 		pf->flags |= I40E_FLAG_DCB_ENABLED;
9238 	else
9239 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9240 
9241 	set_bit(__I40E_PORT_SUSPENDED, pf->state);
9242 	/* Reconfiguration needed quiesce all VSIs */
9243 	i40e_pf_quiesce_all_vsi(pf);
9244 
9245 	/* Changes in configuration update VEB/VSI */
9246 	i40e_dcb_reconfigure(pf);
9247 
9248 	ret = i40e_resume_port_tx(pf);
9249 
9250 	clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9251 	/* In case of error no point in resuming VSIs */
9252 	if (ret)
9253 		goto exit;
9254 
9255 	/* Wait for the PF's queues to be disabled */
9256 	ret = i40e_pf_wait_queues_disabled(pf);
9257 	if (ret) {
9258 		/* Schedule PF reset to recover */
9259 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9260 		i40e_service_event_schedule(pf);
9261 	} else {
9262 		i40e_pf_unquiesce_all_vsi(pf);
9263 		set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9264 		set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9265 	}
9266 
9267 exit:
9268 	return ret;
9269 }
9270 #endif /* CONFIG_I40E_DCB */
9271 
9272 /**
9273  * i40e_do_reset_safe - Protected reset path for userland calls.
9274  * @pf: board private structure
9275  * @reset_flags: which reset is requested
9276  *
9277  **/
9278 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9279 {
9280 	rtnl_lock();
9281 	i40e_do_reset(pf, reset_flags, true);
9282 	rtnl_unlock();
9283 }
9284 
9285 /**
9286  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9287  * @pf: board private structure
9288  * @e: event info posted on ARQ
9289  *
9290  * Handler for LAN Queue Overflow Event generated by the firmware for PF
9291  * and VF queues
9292  **/
9293 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9294 					   struct i40e_arq_event_info *e)
9295 {
9296 	struct i40e_aqc_lan_overflow *data =
9297 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9298 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
9299 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9300 	struct i40e_hw *hw = &pf->hw;
9301 	struct i40e_vf *vf;
9302 	u16 vf_id;
9303 
9304 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9305 		queue, qtx_ctl);
9306 
9307 	/* Queue belongs to VF, find the VF and issue VF reset */
9308 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9309 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9310 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9311 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9312 		vf_id -= hw->func_caps.vf_base_id;
9313 		vf = &pf->vf[vf_id];
9314 		i40e_vc_notify_vf_reset(vf);
9315 		/* Allow VF to process pending reset notification */
9316 		msleep(20);
9317 		i40e_reset_vf(vf, false);
9318 	}
9319 }
9320 
9321 /**
9322  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9323  * @pf: board private structure
9324  **/
9325 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9326 {
9327 	u32 val, fcnt_prog;
9328 
9329 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9330 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9331 	return fcnt_prog;
9332 }
9333 
9334 /**
9335  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9336  * @pf: board private structure
9337  **/
9338 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9339 {
9340 	u32 val, fcnt_prog;
9341 
9342 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9343 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9344 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9345 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9346 	return fcnt_prog;
9347 }
9348 
9349 /**
9350  * i40e_get_global_fd_count - Get total FD filters programmed on device
9351  * @pf: board private structure
9352  **/
9353 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9354 {
9355 	u32 val, fcnt_prog;
9356 
9357 	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9358 	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9359 		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9360 		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9361 	return fcnt_prog;
9362 }
9363 
9364 /**
9365  * i40e_reenable_fdir_sb - Restore FDir SB capability
9366  * @pf: board private structure
9367  **/
9368 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9369 {
9370 	if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9371 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9372 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
9373 			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9374 }
9375 
9376 /**
9377  * i40e_reenable_fdir_atr - Restore FDir ATR capability
9378  * @pf: board private structure
9379  **/
9380 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9381 {
9382 	if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9383 		/* ATR uses the same filtering logic as SB rules. It only
9384 		 * functions properly if the input set mask is at the default
9385 		 * settings. It is safe to restore the default input set
9386 		 * because there are no active TCPv4 filter rules.
9387 		 */
9388 		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9389 					I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9390 					I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9391 
9392 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9393 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
9394 			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9395 	}
9396 }
9397 
9398 /**
9399  * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9400  * @pf: board private structure
9401  * @filter: FDir filter to remove
9402  */
9403 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9404 				       struct i40e_fdir_filter *filter)
9405 {
9406 	/* Update counters */
9407 	pf->fdir_pf_active_filters--;
9408 	pf->fd_inv = 0;
9409 
9410 	switch (filter->flow_type) {
9411 	case TCP_V4_FLOW:
9412 		pf->fd_tcp4_filter_cnt--;
9413 		break;
9414 	case UDP_V4_FLOW:
9415 		pf->fd_udp4_filter_cnt--;
9416 		break;
9417 	case SCTP_V4_FLOW:
9418 		pf->fd_sctp4_filter_cnt--;
9419 		break;
9420 	case TCP_V6_FLOW:
9421 		pf->fd_tcp6_filter_cnt--;
9422 		break;
9423 	case UDP_V6_FLOW:
9424 		pf->fd_udp6_filter_cnt--;
9425 		break;
9426 	case SCTP_V6_FLOW:
9427 		pf->fd_udp6_filter_cnt--;
9428 		break;
9429 	case IP_USER_FLOW:
9430 		switch (filter->ipl4_proto) {
9431 		case IPPROTO_TCP:
9432 			pf->fd_tcp4_filter_cnt--;
9433 			break;
9434 		case IPPROTO_UDP:
9435 			pf->fd_udp4_filter_cnt--;
9436 			break;
9437 		case IPPROTO_SCTP:
9438 			pf->fd_sctp4_filter_cnt--;
9439 			break;
9440 		case IPPROTO_IP:
9441 			pf->fd_ip4_filter_cnt--;
9442 			break;
9443 		}
9444 		break;
9445 	case IPV6_USER_FLOW:
9446 		switch (filter->ipl4_proto) {
9447 		case IPPROTO_TCP:
9448 			pf->fd_tcp6_filter_cnt--;
9449 			break;
9450 		case IPPROTO_UDP:
9451 			pf->fd_udp6_filter_cnt--;
9452 			break;
9453 		case IPPROTO_SCTP:
9454 			pf->fd_sctp6_filter_cnt--;
9455 			break;
9456 		case IPPROTO_IP:
9457 			pf->fd_ip6_filter_cnt--;
9458 			break;
9459 		}
9460 		break;
9461 	}
9462 
9463 	/* Remove the filter from the list and free memory */
9464 	hlist_del(&filter->fdir_node);
9465 	kfree(filter);
9466 }
9467 
9468 /**
9469  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9470  * @pf: board private structure
9471  **/
9472 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9473 {
9474 	struct i40e_fdir_filter *filter;
9475 	u32 fcnt_prog, fcnt_avail;
9476 	struct hlist_node *node;
9477 
9478 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9479 		return;
9480 
9481 	/* Check if we have enough room to re-enable FDir SB capability. */
9482 	fcnt_prog = i40e_get_global_fd_count(pf);
9483 	fcnt_avail = pf->fdir_pf_filter_count;
9484 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9485 	    (pf->fd_add_err == 0) ||
9486 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9487 		i40e_reenable_fdir_sb(pf);
9488 
9489 	/* We should wait for even more space before re-enabling ATR.
9490 	 * Additionally, we cannot enable ATR as long as we still have TCP SB
9491 	 * rules active.
9492 	 */
9493 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9494 	    pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9495 		i40e_reenable_fdir_atr(pf);
9496 
9497 	/* if hw had a problem adding a filter, delete it */
9498 	if (pf->fd_inv > 0) {
9499 		hlist_for_each_entry_safe(filter, node,
9500 					  &pf->fdir_filter_list, fdir_node)
9501 			if (filter->fd_id == pf->fd_inv)
9502 				i40e_delete_invalid_filter(pf, filter);
9503 	}
9504 }
9505 
9506 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9507 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9508 /**
9509  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9510  * @pf: board private structure
9511  **/
9512 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9513 {
9514 	unsigned long min_flush_time;
9515 	int flush_wait_retry = 50;
9516 	bool disable_atr = false;
9517 	int fd_room;
9518 	int reg;
9519 
9520 	if (!time_after(jiffies, pf->fd_flush_timestamp +
9521 				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9522 		return;
9523 
9524 	/* If the flush is happening too quick and we have mostly SB rules we
9525 	 * should not re-enable ATR for some time.
9526 	 */
9527 	min_flush_time = pf->fd_flush_timestamp +
9528 			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9529 	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9530 
9531 	if (!(time_after(jiffies, min_flush_time)) &&
9532 	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9533 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
9534 			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9535 		disable_atr = true;
9536 	}
9537 
9538 	pf->fd_flush_timestamp = jiffies;
9539 	set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9540 	/* flush all filters */
9541 	wr32(&pf->hw, I40E_PFQF_CTL_1,
9542 	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9543 	i40e_flush(&pf->hw);
9544 	pf->fd_flush_cnt++;
9545 	pf->fd_add_err = 0;
9546 	do {
9547 		/* Check FD flush status every 5-6msec */
9548 		usleep_range(5000, 6000);
9549 		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9550 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9551 			break;
9552 	} while (flush_wait_retry--);
9553 	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9554 		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9555 	} else {
9556 		/* replay sideband filters */
9557 		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9558 		if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9559 			clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9560 		clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9561 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
9562 			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9563 	}
9564 }
9565 
9566 /**
9567  * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9568  * @pf: board private structure
9569  **/
9570 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9571 {
9572 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9573 }
9574 
9575 /**
9576  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9577  * @pf: board private structure
9578  **/
9579 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9580 {
9581 
9582 	/* if interface is down do nothing */
9583 	if (test_bit(__I40E_DOWN, pf->state))
9584 		return;
9585 
9586 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9587 		i40e_fdir_flush_and_replay(pf);
9588 
9589 	i40e_fdir_check_and_reenable(pf);
9590 
9591 }
9592 
9593 /**
9594  * i40e_vsi_link_event - notify VSI of a link event
9595  * @vsi: vsi to be notified
9596  * @link_up: link up or down
9597  **/
9598 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9599 {
9600 	if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9601 		return;
9602 
9603 	switch (vsi->type) {
9604 	case I40E_VSI_MAIN:
9605 		if (!vsi->netdev || !vsi->netdev_registered)
9606 			break;
9607 
9608 		if (link_up) {
9609 			netif_carrier_on(vsi->netdev);
9610 			netif_tx_wake_all_queues(vsi->netdev);
9611 		} else {
9612 			netif_carrier_off(vsi->netdev);
9613 			netif_tx_stop_all_queues(vsi->netdev);
9614 		}
9615 		break;
9616 
9617 	case I40E_VSI_SRIOV:
9618 	case I40E_VSI_VMDQ2:
9619 	case I40E_VSI_CTRL:
9620 	case I40E_VSI_IWARP:
9621 	case I40E_VSI_MIRROR:
9622 	default:
9623 		/* there is no notification for other VSIs */
9624 		break;
9625 	}
9626 }
9627 
9628 /**
9629  * i40e_veb_link_event - notify elements on the veb of a link event
9630  * @veb: veb to be notified
9631  * @link_up: link up or down
9632  **/
9633 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9634 {
9635 	struct i40e_pf *pf;
9636 	int i;
9637 
9638 	if (!veb || !veb->pf)
9639 		return;
9640 	pf = veb->pf;
9641 
9642 	/* depth first... */
9643 	for (i = 0; i < I40E_MAX_VEB; i++)
9644 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9645 			i40e_veb_link_event(pf->veb[i], link_up);
9646 
9647 	/* ... now the local VSIs */
9648 	for (i = 0; i < pf->num_alloc_vsi; i++)
9649 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9650 			i40e_vsi_link_event(pf->vsi[i], link_up);
9651 }
9652 
9653 /**
9654  * i40e_link_event - Update netif_carrier status
9655  * @pf: board private structure
9656  **/
9657 static void i40e_link_event(struct i40e_pf *pf)
9658 {
9659 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9660 	u8 new_link_speed, old_link_speed;
9661 	i40e_status status;
9662 	bool new_link, old_link;
9663 #ifdef CONFIG_I40E_DCB
9664 	int err;
9665 #endif /* CONFIG_I40E_DCB */
9666 
9667 	/* set this to force the get_link_status call to refresh state */
9668 	pf->hw.phy.get_link_info = true;
9669 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9670 	status = i40e_get_link_status(&pf->hw, &new_link);
9671 
9672 	/* On success, disable temp link polling */
9673 	if (status == I40E_SUCCESS) {
9674 		clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9675 	} else {
9676 		/* Enable link polling temporarily until i40e_get_link_status
9677 		 * returns I40E_SUCCESS
9678 		 */
9679 		set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9680 		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9681 			status);
9682 		return;
9683 	}
9684 
9685 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
9686 	new_link_speed = pf->hw.phy.link_info.link_speed;
9687 
9688 	if (new_link == old_link &&
9689 	    new_link_speed == old_link_speed &&
9690 	    (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9691 	     new_link == netif_carrier_ok(vsi->netdev)))
9692 		return;
9693 
9694 	i40e_print_link_message(vsi, new_link);
9695 
9696 	/* Notify the base of the switch tree connected to
9697 	 * the link.  Floating VEBs are not notified.
9698 	 */
9699 	if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9700 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9701 	else
9702 		i40e_vsi_link_event(vsi, new_link);
9703 
9704 	if (pf->vf)
9705 		i40e_vc_notify_link_state(pf);
9706 
9707 	if (pf->flags & I40E_FLAG_PTP)
9708 		i40e_ptp_set_increment(pf);
9709 #ifdef CONFIG_I40E_DCB
9710 	if (new_link == old_link)
9711 		return;
9712 	/* Not SW DCB so firmware will take care of default settings */
9713 	if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9714 		return;
9715 
9716 	/* We cover here only link down, as after link up in case of SW DCB
9717 	 * SW LLDP agent will take care of setting it up
9718 	 */
9719 	if (!new_link) {
9720 		dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9721 		memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9722 		err = i40e_dcb_sw_default_config(pf);
9723 		if (err) {
9724 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9725 				       I40E_FLAG_DCB_ENABLED);
9726 		} else {
9727 			pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9728 				       DCB_CAP_DCBX_VER_IEEE;
9729 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
9730 			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9731 		}
9732 	}
9733 #endif /* CONFIG_I40E_DCB */
9734 }
9735 
9736 /**
9737  * i40e_watchdog_subtask - periodic checks not using event driven response
9738  * @pf: board private structure
9739  **/
9740 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9741 {
9742 	int i;
9743 
9744 	/* if interface is down do nothing */
9745 	if (test_bit(__I40E_DOWN, pf->state) ||
9746 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
9747 		return;
9748 
9749 	/* make sure we don't do these things too often */
9750 	if (time_before(jiffies, (pf->service_timer_previous +
9751 				  pf->service_timer_period)))
9752 		return;
9753 	pf->service_timer_previous = jiffies;
9754 
9755 	if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9756 	    test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9757 		i40e_link_event(pf);
9758 
9759 	/* Update the stats for active netdevs so the network stack
9760 	 * can look at updated numbers whenever it cares to
9761 	 */
9762 	for (i = 0; i < pf->num_alloc_vsi; i++)
9763 		if (pf->vsi[i] && pf->vsi[i]->netdev)
9764 			i40e_update_stats(pf->vsi[i]);
9765 
9766 	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9767 		/* Update the stats for the active switching components */
9768 		for (i = 0; i < I40E_MAX_VEB; i++)
9769 			if (pf->veb[i])
9770 				i40e_update_veb_stats(pf->veb[i]);
9771 	}
9772 
9773 	i40e_ptp_rx_hang(pf);
9774 	i40e_ptp_tx_hang(pf);
9775 }
9776 
9777 /**
9778  * i40e_reset_subtask - Set up for resetting the device and driver
9779  * @pf: board private structure
9780  **/
9781 static void i40e_reset_subtask(struct i40e_pf *pf)
9782 {
9783 	u32 reset_flags = 0;
9784 
9785 	if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9786 		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9787 		clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9788 	}
9789 	if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9790 		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9791 		clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9792 	}
9793 	if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9794 		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9795 		clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9796 	}
9797 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9798 		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9799 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9800 	}
9801 	if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9802 		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9803 		clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9804 	}
9805 
9806 	/* If there's a recovery already waiting, it takes
9807 	 * precedence before starting a new reset sequence.
9808 	 */
9809 	if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9810 		i40e_prep_for_reset(pf);
9811 		i40e_reset(pf);
9812 		i40e_rebuild(pf, false, false);
9813 	}
9814 
9815 	/* If we're already down or resetting, just bail */
9816 	if (reset_flags &&
9817 	    !test_bit(__I40E_DOWN, pf->state) &&
9818 	    !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9819 		i40e_do_reset(pf, reset_flags, false);
9820 	}
9821 }
9822 
9823 /**
9824  * i40e_handle_link_event - Handle link event
9825  * @pf: board private structure
9826  * @e: event info posted on ARQ
9827  **/
9828 static void i40e_handle_link_event(struct i40e_pf *pf,
9829 				   struct i40e_arq_event_info *e)
9830 {
9831 	struct i40e_aqc_get_link_status *status =
9832 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9833 
9834 	/* Do a new status request to re-enable LSE reporting
9835 	 * and load new status information into the hw struct
9836 	 * This completely ignores any state information
9837 	 * in the ARQ event info, instead choosing to always
9838 	 * issue the AQ update link status command.
9839 	 */
9840 	i40e_link_event(pf);
9841 
9842 	/* Check if module meets thermal requirements */
9843 	if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9844 		dev_err(&pf->pdev->dev,
9845 			"Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9846 		dev_err(&pf->pdev->dev,
9847 			"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9848 	} else {
9849 		/* check for unqualified module, if link is down, suppress
9850 		 * the message if link was forced to be down.
9851 		 */
9852 		if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9853 		    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9854 		    (!(status->link_info & I40E_AQ_LINK_UP)) &&
9855 		    (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9856 			dev_err(&pf->pdev->dev,
9857 				"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9858 			dev_err(&pf->pdev->dev,
9859 				"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9860 		}
9861 	}
9862 }
9863 
9864 /**
9865  * i40e_clean_adminq_subtask - Clean the AdminQ rings
9866  * @pf: board private structure
9867  **/
9868 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9869 {
9870 	struct i40e_arq_event_info event;
9871 	struct i40e_hw *hw = &pf->hw;
9872 	u16 pending, i = 0;
9873 	i40e_status ret;
9874 	u16 opcode;
9875 	u32 oldval;
9876 	u32 val;
9877 
9878 	/* Do not run clean AQ when PF reset fails */
9879 	if (test_bit(__I40E_RESET_FAILED, pf->state))
9880 		return;
9881 
9882 	/* check for error indications */
9883 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
9884 	oldval = val;
9885 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9886 		if (hw->debug_mask & I40E_DEBUG_AQ)
9887 			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9888 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9889 	}
9890 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9891 		if (hw->debug_mask & I40E_DEBUG_AQ)
9892 			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9893 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9894 		pf->arq_overflows++;
9895 	}
9896 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9897 		if (hw->debug_mask & I40E_DEBUG_AQ)
9898 			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9899 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9900 	}
9901 	if (oldval != val)
9902 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
9903 
9904 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
9905 	oldval = val;
9906 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9907 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9908 			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9909 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9910 	}
9911 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9912 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9913 			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9914 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9915 	}
9916 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9917 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9918 			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9919 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9920 	}
9921 	if (oldval != val)
9922 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
9923 
9924 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9925 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9926 	if (!event.msg_buf)
9927 		return;
9928 
9929 	do {
9930 		ret = i40e_clean_arq_element(hw, &event, &pending);
9931 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9932 			break;
9933 		else if (ret) {
9934 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9935 			break;
9936 		}
9937 
9938 		opcode = le16_to_cpu(event.desc.opcode);
9939 		switch (opcode) {
9940 
9941 		case i40e_aqc_opc_get_link_status:
9942 			rtnl_lock();
9943 			i40e_handle_link_event(pf, &event);
9944 			rtnl_unlock();
9945 			break;
9946 		case i40e_aqc_opc_send_msg_to_pf:
9947 			ret = i40e_vc_process_vf_msg(pf,
9948 					le16_to_cpu(event.desc.retval),
9949 					le32_to_cpu(event.desc.cookie_high),
9950 					le32_to_cpu(event.desc.cookie_low),
9951 					event.msg_buf,
9952 					event.msg_len);
9953 			break;
9954 		case i40e_aqc_opc_lldp_update_mib:
9955 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9956 #ifdef CONFIG_I40E_DCB
9957 			rtnl_lock();
9958 			i40e_handle_lldp_event(pf, &event);
9959 			rtnl_unlock();
9960 #endif /* CONFIG_I40E_DCB */
9961 			break;
9962 		case i40e_aqc_opc_event_lan_overflow:
9963 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9964 			i40e_handle_lan_overflow_event(pf, &event);
9965 			break;
9966 		case i40e_aqc_opc_send_msg_to_peer:
9967 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9968 			break;
9969 		case i40e_aqc_opc_nvm_erase:
9970 		case i40e_aqc_opc_nvm_update:
9971 		case i40e_aqc_opc_oem_post_update:
9972 			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9973 				   "ARQ NVM operation 0x%04x completed\n",
9974 				   opcode);
9975 			break;
9976 		default:
9977 			dev_info(&pf->pdev->dev,
9978 				 "ARQ: Unknown event 0x%04x ignored\n",
9979 				 opcode);
9980 			break;
9981 		}
9982 	} while (i++ < pf->adminq_work_limit);
9983 
9984 	if (i < pf->adminq_work_limit)
9985 		clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9986 
9987 	/* re-enable Admin queue interrupt cause */
9988 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
9989 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9990 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
9991 	i40e_flush(hw);
9992 
9993 	kfree(event.msg_buf);
9994 }
9995 
9996 /**
9997  * i40e_verify_eeprom - make sure eeprom is good to use
9998  * @pf: board private structure
9999  **/
10000 static void i40e_verify_eeprom(struct i40e_pf *pf)
10001 {
10002 	int err;
10003 
10004 	err = i40e_diag_eeprom_test(&pf->hw);
10005 	if (err) {
10006 		/* retry in case of garbage read */
10007 		err = i40e_diag_eeprom_test(&pf->hw);
10008 		if (err) {
10009 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10010 				 err);
10011 			set_bit(__I40E_BAD_EEPROM, pf->state);
10012 		}
10013 	}
10014 
10015 	if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10016 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10017 		clear_bit(__I40E_BAD_EEPROM, pf->state);
10018 	}
10019 }
10020 
10021 /**
10022  * i40e_enable_pf_switch_lb
10023  * @pf: pointer to the PF structure
10024  *
10025  * enable switch loop back or die - no point in a return value
10026  **/
10027 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10028 {
10029 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10030 	struct i40e_vsi_context ctxt;
10031 	int ret;
10032 
10033 	ctxt.seid = pf->main_vsi_seid;
10034 	ctxt.pf_num = pf->hw.pf_id;
10035 	ctxt.vf_num = 0;
10036 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10037 	if (ret) {
10038 		dev_info(&pf->pdev->dev,
10039 			 "couldn't get PF vsi config, err %s aq_err %s\n",
10040 			 i40e_stat_str(&pf->hw, ret),
10041 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10042 		return;
10043 	}
10044 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10045 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10046 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10047 
10048 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10049 	if (ret) {
10050 		dev_info(&pf->pdev->dev,
10051 			 "update vsi switch failed, err %s aq_err %s\n",
10052 			 i40e_stat_str(&pf->hw, ret),
10053 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10054 	}
10055 }
10056 
10057 /**
10058  * i40e_disable_pf_switch_lb
10059  * @pf: pointer to the PF structure
10060  *
10061  * disable switch loop back or die - no point in a return value
10062  **/
10063 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10064 {
10065 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10066 	struct i40e_vsi_context ctxt;
10067 	int ret;
10068 
10069 	ctxt.seid = pf->main_vsi_seid;
10070 	ctxt.pf_num = pf->hw.pf_id;
10071 	ctxt.vf_num = 0;
10072 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10073 	if (ret) {
10074 		dev_info(&pf->pdev->dev,
10075 			 "couldn't get PF vsi config, err %s aq_err %s\n",
10076 			 i40e_stat_str(&pf->hw, ret),
10077 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10078 		return;
10079 	}
10080 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10081 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10082 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10083 
10084 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10085 	if (ret) {
10086 		dev_info(&pf->pdev->dev,
10087 			 "update vsi switch failed, err %s aq_err %s\n",
10088 			 i40e_stat_str(&pf->hw, ret),
10089 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10090 	}
10091 }
10092 
10093 /**
10094  * i40e_config_bridge_mode - Configure the HW bridge mode
10095  * @veb: pointer to the bridge instance
10096  *
10097  * Configure the loop back mode for the LAN VSI that is downlink to the
10098  * specified HW bridge instance. It is expected this function is called
10099  * when a new HW bridge is instantiated.
10100  **/
10101 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10102 {
10103 	struct i40e_pf *pf = veb->pf;
10104 
10105 	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10106 		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10107 			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10108 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10109 		i40e_disable_pf_switch_lb(pf);
10110 	else
10111 		i40e_enable_pf_switch_lb(pf);
10112 }
10113 
10114 /**
10115  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10116  * @veb: pointer to the VEB instance
10117  *
10118  * This is a recursive function that first builds the attached VSIs then
10119  * recurses in to build the next layer of VEB.  We track the connections
10120  * through our own index numbers because the seid's from the HW could
10121  * change across the reset.
10122  **/
10123 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10124 {
10125 	struct i40e_vsi *ctl_vsi = NULL;
10126 	struct i40e_pf *pf = veb->pf;
10127 	int v, veb_idx;
10128 	int ret;
10129 
10130 	/* build VSI that owns this VEB, temporarily attached to base VEB */
10131 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10132 		if (pf->vsi[v] &&
10133 		    pf->vsi[v]->veb_idx == veb->idx &&
10134 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10135 			ctl_vsi = pf->vsi[v];
10136 			break;
10137 		}
10138 	}
10139 	if (!ctl_vsi) {
10140 		dev_info(&pf->pdev->dev,
10141 			 "missing owner VSI for veb_idx %d\n", veb->idx);
10142 		ret = -ENOENT;
10143 		goto end_reconstitute;
10144 	}
10145 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
10146 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10147 	ret = i40e_add_vsi(ctl_vsi);
10148 	if (ret) {
10149 		dev_info(&pf->pdev->dev,
10150 			 "rebuild of veb_idx %d owner VSI failed: %d\n",
10151 			 veb->idx, ret);
10152 		goto end_reconstitute;
10153 	}
10154 	i40e_vsi_reset_stats(ctl_vsi);
10155 
10156 	/* create the VEB in the switch and move the VSI onto the VEB */
10157 	ret = i40e_add_veb(veb, ctl_vsi);
10158 	if (ret)
10159 		goto end_reconstitute;
10160 
10161 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10162 		veb->bridge_mode = BRIDGE_MODE_VEB;
10163 	else
10164 		veb->bridge_mode = BRIDGE_MODE_VEPA;
10165 	i40e_config_bridge_mode(veb);
10166 
10167 	/* create the remaining VSIs attached to this VEB */
10168 	for (v = 0; v < pf->num_alloc_vsi; v++) {
10169 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10170 			continue;
10171 
10172 		if (pf->vsi[v]->veb_idx == veb->idx) {
10173 			struct i40e_vsi *vsi = pf->vsi[v];
10174 
10175 			vsi->uplink_seid = veb->seid;
10176 			ret = i40e_add_vsi(vsi);
10177 			if (ret) {
10178 				dev_info(&pf->pdev->dev,
10179 					 "rebuild of vsi_idx %d failed: %d\n",
10180 					 v, ret);
10181 				goto end_reconstitute;
10182 			}
10183 			i40e_vsi_reset_stats(vsi);
10184 		}
10185 	}
10186 
10187 	/* create any VEBs attached to this VEB - RECURSION */
10188 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10189 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10190 			pf->veb[veb_idx]->uplink_seid = veb->seid;
10191 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10192 			if (ret)
10193 				break;
10194 		}
10195 	}
10196 
10197 end_reconstitute:
10198 	return ret;
10199 }
10200 
10201 /**
10202  * i40e_get_capabilities - get info about the HW
10203  * @pf: the PF struct
10204  * @list_type: AQ capability to be queried
10205  **/
10206 static int i40e_get_capabilities(struct i40e_pf *pf,
10207 				 enum i40e_admin_queue_opc list_type)
10208 {
10209 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10210 	u16 data_size;
10211 	int buf_len;
10212 	int err;
10213 
10214 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10215 	do {
10216 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
10217 		if (!cap_buf)
10218 			return -ENOMEM;
10219 
10220 		/* this loads the data into the hw struct for us */
10221 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10222 						    &data_size, list_type,
10223 						    NULL);
10224 		/* data loaded, buffer no longer needed */
10225 		kfree(cap_buf);
10226 
10227 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10228 			/* retry with a larger buffer */
10229 			buf_len = data_size;
10230 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10231 			dev_info(&pf->pdev->dev,
10232 				 "capability discovery failed, err %s aq_err %s\n",
10233 				 i40e_stat_str(&pf->hw, err),
10234 				 i40e_aq_str(&pf->hw,
10235 					     pf->hw.aq.asq_last_status));
10236 			return -ENODEV;
10237 		}
10238 	} while (err);
10239 
10240 	if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10241 		if (list_type == i40e_aqc_opc_list_func_capabilities) {
10242 			dev_info(&pf->pdev->dev,
10243 				 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10244 				 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10245 				 pf->hw.func_caps.num_msix_vectors,
10246 				 pf->hw.func_caps.num_msix_vectors_vf,
10247 				 pf->hw.func_caps.fd_filters_guaranteed,
10248 				 pf->hw.func_caps.fd_filters_best_effort,
10249 				 pf->hw.func_caps.num_tx_qp,
10250 				 pf->hw.func_caps.num_vsis);
10251 		} else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10252 			dev_info(&pf->pdev->dev,
10253 				 "switch_mode=0x%04x, function_valid=0x%08x\n",
10254 				 pf->hw.dev_caps.switch_mode,
10255 				 pf->hw.dev_caps.valid_functions);
10256 			dev_info(&pf->pdev->dev,
10257 				 "SR-IOV=%d, num_vfs for all function=%u\n",
10258 				 pf->hw.dev_caps.sr_iov_1_1,
10259 				 pf->hw.dev_caps.num_vfs);
10260 			dev_info(&pf->pdev->dev,
10261 				 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10262 				 pf->hw.dev_caps.num_vsis,
10263 				 pf->hw.dev_caps.num_rx_qp,
10264 				 pf->hw.dev_caps.num_tx_qp);
10265 		}
10266 	}
10267 	if (list_type == i40e_aqc_opc_list_func_capabilities) {
10268 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10269 		       + pf->hw.func_caps.num_vfs)
10270 		if (pf->hw.revision_id == 0 &&
10271 		    pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10272 			dev_info(&pf->pdev->dev,
10273 				 "got num_vsis %d, setting num_vsis to %d\n",
10274 				 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10275 			pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10276 		}
10277 	}
10278 	return 0;
10279 }
10280 
10281 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10282 
10283 /**
10284  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10285  * @pf: board private structure
10286  **/
10287 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10288 {
10289 	struct i40e_vsi *vsi;
10290 
10291 	/* quick workaround for an NVM issue that leaves a critical register
10292 	 * uninitialized
10293 	 */
10294 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10295 		static const u32 hkey[] = {
10296 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10297 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10298 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10299 			0x95b3a76d};
10300 		int i;
10301 
10302 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10303 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10304 	}
10305 
10306 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10307 		return;
10308 
10309 	/* find existing VSI and see if it needs configuring */
10310 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10311 
10312 	/* create a new VSI if none exists */
10313 	if (!vsi) {
10314 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10315 				     pf->vsi[pf->lan_vsi]->seid, 0);
10316 		if (!vsi) {
10317 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10318 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10319 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10320 			return;
10321 		}
10322 	}
10323 
10324 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10325 }
10326 
10327 /**
10328  * i40e_fdir_teardown - release the Flow Director resources
10329  * @pf: board private structure
10330  **/
10331 static void i40e_fdir_teardown(struct i40e_pf *pf)
10332 {
10333 	struct i40e_vsi *vsi;
10334 
10335 	i40e_fdir_filter_exit(pf);
10336 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10337 	if (vsi)
10338 		i40e_vsi_release(vsi);
10339 }
10340 
10341 /**
10342  * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10343  * @vsi: PF main vsi
10344  * @seid: seid of main or channel VSIs
10345  *
10346  * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10347  * existed before reset
10348  **/
10349 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10350 {
10351 	struct i40e_cloud_filter *cfilter;
10352 	struct i40e_pf *pf = vsi->back;
10353 	struct hlist_node *node;
10354 	i40e_status ret;
10355 
10356 	/* Add cloud filters back if they exist */
10357 	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10358 				  cloud_node) {
10359 		if (cfilter->seid != seid)
10360 			continue;
10361 
10362 		if (cfilter->dst_port)
10363 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10364 								true);
10365 		else
10366 			ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10367 
10368 		if (ret) {
10369 			dev_dbg(&pf->pdev->dev,
10370 				"Failed to rebuild cloud filter, err %s aq_err %s\n",
10371 				i40e_stat_str(&pf->hw, ret),
10372 				i40e_aq_str(&pf->hw,
10373 					    pf->hw.aq.asq_last_status));
10374 			return ret;
10375 		}
10376 	}
10377 	return 0;
10378 }
10379 
10380 /**
10381  * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10382  * @vsi: PF main vsi
10383  *
10384  * Rebuilds channel VSIs if they existed before reset
10385  **/
10386 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10387 {
10388 	struct i40e_channel *ch, *ch_tmp;
10389 	i40e_status ret;
10390 
10391 	if (list_empty(&vsi->ch_list))
10392 		return 0;
10393 
10394 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10395 		if (!ch->initialized)
10396 			break;
10397 		/* Proceed with creation of channel (VMDq2) VSI */
10398 		ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10399 		if (ret) {
10400 			dev_info(&vsi->back->pdev->dev,
10401 				 "failed to rebuild channels using uplink_seid %u\n",
10402 				 vsi->uplink_seid);
10403 			return ret;
10404 		}
10405 		/* Reconfigure TX queues using QTX_CTL register */
10406 		ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10407 		if (ret) {
10408 			dev_info(&vsi->back->pdev->dev,
10409 				 "failed to configure TX rings for channel %u\n",
10410 				 ch->seid);
10411 			return ret;
10412 		}
10413 		/* update 'next_base_queue' */
10414 		vsi->next_base_queue = vsi->next_base_queue +
10415 							ch->num_queue_pairs;
10416 		if (ch->max_tx_rate) {
10417 			u64 credits = ch->max_tx_rate;
10418 
10419 			if (i40e_set_bw_limit(vsi, ch->seid,
10420 					      ch->max_tx_rate))
10421 				return -EINVAL;
10422 
10423 			do_div(credits, I40E_BW_CREDIT_DIVISOR);
10424 			dev_dbg(&vsi->back->pdev->dev,
10425 				"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10426 				ch->max_tx_rate,
10427 				credits,
10428 				ch->seid);
10429 		}
10430 		ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10431 		if (ret) {
10432 			dev_dbg(&vsi->back->pdev->dev,
10433 				"Failed to rebuild cloud filters for channel VSI %u\n",
10434 				ch->seid);
10435 			return ret;
10436 		}
10437 	}
10438 	return 0;
10439 }
10440 
10441 /**
10442  * i40e_prep_for_reset - prep for the core to reset
10443  * @pf: board private structure
10444  *
10445  * Close up the VFs and other things in prep for PF Reset.
10446   **/
10447 static void i40e_prep_for_reset(struct i40e_pf *pf)
10448 {
10449 	struct i40e_hw *hw = &pf->hw;
10450 	i40e_status ret = 0;
10451 	u32 v;
10452 
10453 	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10454 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10455 		return;
10456 	if (i40e_check_asq_alive(&pf->hw))
10457 		i40e_vc_notify_reset(pf);
10458 
10459 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10460 
10461 	/* quiesce the VSIs and their queues that are not already DOWN */
10462 	i40e_pf_quiesce_all_vsi(pf);
10463 
10464 	for (v = 0; v < pf->num_alloc_vsi; v++) {
10465 		if (pf->vsi[v])
10466 			pf->vsi[v]->seid = 0;
10467 	}
10468 
10469 	i40e_shutdown_adminq(&pf->hw);
10470 
10471 	/* call shutdown HMC */
10472 	if (hw->hmc.hmc_obj) {
10473 		ret = i40e_shutdown_lan_hmc(hw);
10474 		if (ret)
10475 			dev_warn(&pf->pdev->dev,
10476 				 "shutdown_lan_hmc failed: %d\n", ret);
10477 	}
10478 
10479 	/* Save the current PTP time so that we can restore the time after the
10480 	 * reset completes.
10481 	 */
10482 	i40e_ptp_save_hw_time(pf);
10483 }
10484 
10485 /**
10486  * i40e_send_version - update firmware with driver version
10487  * @pf: PF struct
10488  */
10489 static void i40e_send_version(struct i40e_pf *pf)
10490 {
10491 	struct i40e_driver_version dv;
10492 
10493 	dv.major_version = 0xff;
10494 	dv.minor_version = 0xff;
10495 	dv.build_version = 0xff;
10496 	dv.subbuild_version = 0;
10497 	strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10498 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10499 }
10500 
10501 /**
10502  * i40e_get_oem_version - get OEM specific version information
10503  * @hw: pointer to the hardware structure
10504  **/
10505 static void i40e_get_oem_version(struct i40e_hw *hw)
10506 {
10507 	u16 block_offset = 0xffff;
10508 	u16 block_length = 0;
10509 	u16 capabilities = 0;
10510 	u16 gen_snap = 0;
10511 	u16 release = 0;
10512 
10513 #define I40E_SR_NVM_OEM_VERSION_PTR		0x1B
10514 #define I40E_NVM_OEM_LENGTH_OFFSET		0x00
10515 #define I40E_NVM_OEM_CAPABILITIES_OFFSET	0x01
10516 #define I40E_NVM_OEM_GEN_OFFSET			0x02
10517 #define I40E_NVM_OEM_RELEASE_OFFSET		0x03
10518 #define I40E_NVM_OEM_CAPABILITIES_MASK		0x000F
10519 #define I40E_NVM_OEM_LENGTH			3
10520 
10521 	/* Check if pointer to OEM version block is valid. */
10522 	i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10523 	if (block_offset == 0xffff)
10524 		return;
10525 
10526 	/* Check if OEM version block has correct length. */
10527 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10528 			   &block_length);
10529 	if (block_length < I40E_NVM_OEM_LENGTH)
10530 		return;
10531 
10532 	/* Check if OEM version format is as expected. */
10533 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10534 			   &capabilities);
10535 	if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10536 		return;
10537 
10538 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10539 			   &gen_snap);
10540 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10541 			   &release);
10542 	hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10543 	hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10544 }
10545 
10546 /**
10547  * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10548  * @pf: board private structure
10549  **/
10550 static int i40e_reset(struct i40e_pf *pf)
10551 {
10552 	struct i40e_hw *hw = &pf->hw;
10553 	i40e_status ret;
10554 
10555 	ret = i40e_pf_reset(hw);
10556 	if (ret) {
10557 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10558 		set_bit(__I40E_RESET_FAILED, pf->state);
10559 		clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10560 	} else {
10561 		pf->pfr_count++;
10562 	}
10563 	return ret;
10564 }
10565 
10566 /**
10567  * i40e_rebuild - rebuild using a saved config
10568  * @pf: board private structure
10569  * @reinit: if the Main VSI needs to re-initialized.
10570  * @lock_acquired: indicates whether or not the lock has been acquired
10571  * before this function was called.
10572  **/
10573 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10574 {
10575 	int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10576 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10577 	struct i40e_hw *hw = &pf->hw;
10578 	i40e_status ret;
10579 	u32 val;
10580 	int v;
10581 
10582 	if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10583 	    i40e_check_recovery_mode(pf)) {
10584 		i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10585 	}
10586 
10587 	if (test_bit(__I40E_DOWN, pf->state) &&
10588 	    !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10589 	    !old_recovery_mode_bit)
10590 		goto clear_recovery;
10591 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10592 
10593 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10594 	ret = i40e_init_adminq(&pf->hw);
10595 	if (ret) {
10596 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10597 			 i40e_stat_str(&pf->hw, ret),
10598 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10599 		goto clear_recovery;
10600 	}
10601 	i40e_get_oem_version(&pf->hw);
10602 
10603 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10604 		/* The following delay is necessary for firmware update. */
10605 		mdelay(1000);
10606 	}
10607 
10608 	/* re-verify the eeprom if we just had an EMP reset */
10609 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10610 		i40e_verify_eeprom(pf);
10611 
10612 	/* if we are going out of or into recovery mode we have to act
10613 	 * accordingly with regard to resources initialization
10614 	 * and deinitialization
10615 	 */
10616 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10617 	    old_recovery_mode_bit) {
10618 		if (i40e_get_capabilities(pf,
10619 					  i40e_aqc_opc_list_func_capabilities))
10620 			goto end_unlock;
10621 
10622 		if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10623 			/* we're staying in recovery mode so we'll reinitialize
10624 			 * misc vector here
10625 			 */
10626 			if (i40e_setup_misc_vector_for_recovery_mode(pf))
10627 				goto end_unlock;
10628 		} else {
10629 			if (!lock_acquired)
10630 				rtnl_lock();
10631 			/* we're going out of recovery mode so we'll free
10632 			 * the IRQ allocated specifically for recovery mode
10633 			 * and restore the interrupt scheme
10634 			 */
10635 			free_irq(pf->pdev->irq, pf);
10636 			i40e_clear_interrupt_scheme(pf);
10637 			if (i40e_restore_interrupt_scheme(pf))
10638 				goto end_unlock;
10639 		}
10640 
10641 		/* tell the firmware that we're starting */
10642 		i40e_send_version(pf);
10643 
10644 		/* bail out in case recovery mode was detected, as there is
10645 		 * no need for further configuration.
10646 		 */
10647 		goto end_unlock;
10648 	}
10649 
10650 	i40e_clear_pxe_mode(hw);
10651 	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10652 	if (ret)
10653 		goto end_core_reset;
10654 
10655 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10656 				hw->func_caps.num_rx_qp, 0, 0);
10657 	if (ret) {
10658 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10659 		goto end_core_reset;
10660 	}
10661 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10662 	if (ret) {
10663 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10664 		goto end_core_reset;
10665 	}
10666 
10667 #ifdef CONFIG_I40E_DCB
10668 	/* Enable FW to write a default DCB config on link-up
10669 	 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10670 	 * is not supported with new link speed
10671 	 */
10672 	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10673 		i40e_aq_set_dcb_parameters(hw, false, NULL);
10674 	} else {
10675 		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10676 		    (hw->phy.link_info.link_speed &
10677 		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10678 			i40e_aq_set_dcb_parameters(hw, false, NULL);
10679 			dev_warn(&pf->pdev->dev,
10680 				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10681 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10682 		} else {
10683 			i40e_aq_set_dcb_parameters(hw, true, NULL);
10684 			ret = i40e_init_pf_dcb(pf);
10685 			if (ret) {
10686 				dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10687 					 ret);
10688 				pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10689 				/* Continue without DCB enabled */
10690 			}
10691 		}
10692 	}
10693 
10694 #endif /* CONFIG_I40E_DCB */
10695 	if (!lock_acquired)
10696 		rtnl_lock();
10697 	ret = i40e_setup_pf_switch(pf, reinit, true);
10698 	if (ret)
10699 		goto end_unlock;
10700 
10701 	/* The driver only wants link up/down and module qualification
10702 	 * reports from firmware.  Note the negative logic.
10703 	 */
10704 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
10705 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
10706 					 I40E_AQ_EVENT_MEDIA_NA |
10707 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10708 	if (ret)
10709 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10710 			 i40e_stat_str(&pf->hw, ret),
10711 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10712 
10713 	/* Rebuild the VSIs and VEBs that existed before reset.
10714 	 * They are still in our local switch element arrays, so only
10715 	 * need to rebuild the switch model in the HW.
10716 	 *
10717 	 * If there were VEBs but the reconstitution failed, we'll try
10718 	 * to recover minimal use by getting the basic PF VSI working.
10719 	 */
10720 	if (vsi->uplink_seid != pf->mac_seid) {
10721 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10722 		/* find the one VEB connected to the MAC, and find orphans */
10723 		for (v = 0; v < I40E_MAX_VEB; v++) {
10724 			if (!pf->veb[v])
10725 				continue;
10726 
10727 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10728 			    pf->veb[v]->uplink_seid == 0) {
10729 				ret = i40e_reconstitute_veb(pf->veb[v]);
10730 
10731 				if (!ret)
10732 					continue;
10733 
10734 				/* If Main VEB failed, we're in deep doodoo,
10735 				 * so give up rebuilding the switch and set up
10736 				 * for minimal rebuild of PF VSI.
10737 				 * If orphan failed, we'll report the error
10738 				 * but try to keep going.
10739 				 */
10740 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10741 					dev_info(&pf->pdev->dev,
10742 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10743 						 ret);
10744 					vsi->uplink_seid = pf->mac_seid;
10745 					break;
10746 				} else if (pf->veb[v]->uplink_seid == 0) {
10747 					dev_info(&pf->pdev->dev,
10748 						 "rebuild of orphan VEB failed: %d\n",
10749 						 ret);
10750 				}
10751 			}
10752 		}
10753 	}
10754 
10755 	if (vsi->uplink_seid == pf->mac_seid) {
10756 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10757 		/* no VEB, so rebuild only the Main VSI */
10758 		ret = i40e_add_vsi(vsi);
10759 		if (ret) {
10760 			dev_info(&pf->pdev->dev,
10761 				 "rebuild of Main VSI failed: %d\n", ret);
10762 			goto end_unlock;
10763 		}
10764 	}
10765 
10766 	if (vsi->mqprio_qopt.max_rate[0]) {
10767 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10768 		u64 credits = 0;
10769 
10770 		do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10771 		ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10772 		if (ret)
10773 			goto end_unlock;
10774 
10775 		credits = max_tx_rate;
10776 		do_div(credits, I40E_BW_CREDIT_DIVISOR);
10777 		dev_dbg(&vsi->back->pdev->dev,
10778 			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10779 			max_tx_rate,
10780 			credits,
10781 			vsi->seid);
10782 	}
10783 
10784 	ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10785 	if (ret)
10786 		goto end_unlock;
10787 
10788 	/* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10789 	 * for this main VSI if they exist
10790 	 */
10791 	ret = i40e_rebuild_channels(vsi);
10792 	if (ret)
10793 		goto end_unlock;
10794 
10795 	/* Reconfigure hardware for allowing smaller MSS in the case
10796 	 * of TSO, so that we avoid the MDD being fired and causing
10797 	 * a reset in the case of small MSS+TSO.
10798 	 */
10799 #define I40E_REG_MSS          0x000E64DC
10800 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10801 #define I40E_64BYTE_MSS       0x400000
10802 	val = rd32(hw, I40E_REG_MSS);
10803 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10804 		val &= ~I40E_REG_MSS_MIN_MASK;
10805 		val |= I40E_64BYTE_MSS;
10806 		wr32(hw, I40E_REG_MSS, val);
10807 	}
10808 
10809 	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10810 		msleep(75);
10811 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10812 		if (ret)
10813 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10814 				 i40e_stat_str(&pf->hw, ret),
10815 				 i40e_aq_str(&pf->hw,
10816 					     pf->hw.aq.asq_last_status));
10817 	}
10818 	/* reinit the misc interrupt */
10819 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10820 		ret = i40e_setup_misc_vector(pf);
10821 
10822 	/* Add a filter to drop all Flow control frames from any VSI from being
10823 	 * transmitted. By doing so we stop a malicious VF from sending out
10824 	 * PAUSE or PFC frames and potentially controlling traffic for other
10825 	 * PF/VF VSIs.
10826 	 * The FW can still send Flow control frames if enabled.
10827 	 */
10828 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10829 						       pf->main_vsi_seid);
10830 
10831 	/* restart the VSIs that were rebuilt and running before the reset */
10832 	i40e_pf_unquiesce_all_vsi(pf);
10833 
10834 	/* Release the RTNL lock before we start resetting VFs */
10835 	if (!lock_acquired)
10836 		rtnl_unlock();
10837 
10838 	/* Restore promiscuous settings */
10839 	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10840 	if (ret)
10841 		dev_warn(&pf->pdev->dev,
10842 			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10843 			 pf->cur_promisc ? "on" : "off",
10844 			 i40e_stat_str(&pf->hw, ret),
10845 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10846 
10847 	i40e_reset_all_vfs(pf, true);
10848 
10849 	/* tell the firmware that we're starting */
10850 	i40e_send_version(pf);
10851 
10852 	/* We've already released the lock, so don't do it again */
10853 	goto end_core_reset;
10854 
10855 end_unlock:
10856 	if (!lock_acquired)
10857 		rtnl_unlock();
10858 end_core_reset:
10859 	clear_bit(__I40E_RESET_FAILED, pf->state);
10860 clear_recovery:
10861 	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10862 	clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10863 }
10864 
10865 /**
10866  * i40e_reset_and_rebuild - reset and rebuild using a saved config
10867  * @pf: board private structure
10868  * @reinit: if the Main VSI needs to re-initialized.
10869  * @lock_acquired: indicates whether or not the lock has been acquired
10870  * before this function was called.
10871  **/
10872 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10873 				   bool lock_acquired)
10874 {
10875 	int ret;
10876 
10877 	if (test_bit(__I40E_IN_REMOVE, pf->state))
10878 		return;
10879 	/* Now we wait for GRST to settle out.
10880 	 * We don't have to delete the VEBs or VSIs from the hw switch
10881 	 * because the reset will make them disappear.
10882 	 */
10883 	ret = i40e_reset(pf);
10884 	if (!ret)
10885 		i40e_rebuild(pf, reinit, lock_acquired);
10886 }
10887 
10888 /**
10889  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10890  * @pf: board private structure
10891  *
10892  * Close up the VFs and other things in prep for a Core Reset,
10893  * then get ready to rebuild the world.
10894  * @lock_acquired: indicates whether or not the lock has been acquired
10895  * before this function was called.
10896  **/
10897 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10898 {
10899 	i40e_prep_for_reset(pf);
10900 	i40e_reset_and_rebuild(pf, false, lock_acquired);
10901 }
10902 
10903 /**
10904  * i40e_handle_mdd_event
10905  * @pf: pointer to the PF structure
10906  *
10907  * Called from the MDD irq handler to identify possibly malicious vfs
10908  **/
10909 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10910 {
10911 	struct i40e_hw *hw = &pf->hw;
10912 	bool mdd_detected = false;
10913 	struct i40e_vf *vf;
10914 	u32 reg;
10915 	int i;
10916 
10917 	if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10918 		return;
10919 
10920 	/* find what triggered the MDD event */
10921 	reg = rd32(hw, I40E_GL_MDET_TX);
10922 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10923 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10924 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
10925 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10926 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
10927 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10928 				I40E_GL_MDET_TX_EVENT_SHIFT;
10929 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10930 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
10931 				pf->hw.func_caps.base_queue;
10932 		if (netif_msg_tx_err(pf))
10933 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10934 				 event, queue, pf_num, vf_num);
10935 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10936 		mdd_detected = true;
10937 	}
10938 	reg = rd32(hw, I40E_GL_MDET_RX);
10939 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10940 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10941 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
10942 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10943 				I40E_GL_MDET_RX_EVENT_SHIFT;
10944 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10945 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
10946 				pf->hw.func_caps.base_queue;
10947 		if (netif_msg_rx_err(pf))
10948 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10949 				 event, queue, func);
10950 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10951 		mdd_detected = true;
10952 	}
10953 
10954 	if (mdd_detected) {
10955 		reg = rd32(hw, I40E_PF_MDET_TX);
10956 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10957 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10958 			dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10959 		}
10960 		reg = rd32(hw, I40E_PF_MDET_RX);
10961 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10962 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10963 			dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10964 		}
10965 	}
10966 
10967 	/* see if one of the VFs needs its hand slapped */
10968 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10969 		vf = &(pf->vf[i]);
10970 		reg = rd32(hw, I40E_VP_MDET_TX(i));
10971 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10972 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10973 			vf->num_mdd_events++;
10974 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10975 				 i);
10976 			dev_info(&pf->pdev->dev,
10977 				 "Use PF Control I/F to re-enable the VF\n");
10978 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10979 		}
10980 
10981 		reg = rd32(hw, I40E_VP_MDET_RX(i));
10982 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10983 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10984 			vf->num_mdd_events++;
10985 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10986 				 i);
10987 			dev_info(&pf->pdev->dev,
10988 				 "Use PF Control I/F to re-enable the VF\n");
10989 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10990 		}
10991 	}
10992 
10993 	/* re-enable mdd interrupt cause */
10994 	clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10995 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10996 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10997 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10998 	i40e_flush(hw);
10999 }
11000 
11001 /**
11002  * i40e_service_task - Run the driver's async subtasks
11003  * @work: pointer to work_struct containing our data
11004  **/
11005 static void i40e_service_task(struct work_struct *work)
11006 {
11007 	struct i40e_pf *pf = container_of(work,
11008 					  struct i40e_pf,
11009 					  service_task);
11010 	unsigned long start_time = jiffies;
11011 
11012 	/* don't bother with service tasks if a reset is in progress */
11013 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11014 	    test_bit(__I40E_SUSPENDED, pf->state))
11015 		return;
11016 
11017 	if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11018 		return;
11019 
11020 	if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11021 		i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11022 		i40e_sync_filters_subtask(pf);
11023 		i40e_reset_subtask(pf);
11024 		i40e_handle_mdd_event(pf);
11025 		i40e_vc_process_vflr_event(pf);
11026 		i40e_watchdog_subtask(pf);
11027 		i40e_fdir_reinit_subtask(pf);
11028 		if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11029 			/* Client subtask will reopen next time through. */
11030 			i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11031 							   true);
11032 		} else {
11033 			i40e_client_subtask(pf);
11034 			if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11035 					       pf->state))
11036 				i40e_notify_client_of_l2_param_changes(
11037 								pf->vsi[pf->lan_vsi]);
11038 		}
11039 		i40e_sync_filters_subtask(pf);
11040 	} else {
11041 		i40e_reset_subtask(pf);
11042 	}
11043 
11044 	i40e_clean_adminq_subtask(pf);
11045 
11046 	/* flush memory to make sure state is correct before next watchdog */
11047 	smp_mb__before_atomic();
11048 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
11049 
11050 	/* If the tasks have taken longer than one timer cycle or there
11051 	 * is more work to be done, reschedule the service task now
11052 	 * rather than wait for the timer to tick again.
11053 	 */
11054 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11055 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state)		 ||
11056 	    test_bit(__I40E_MDD_EVENT_PENDING, pf->state)		 ||
11057 	    test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11058 		i40e_service_event_schedule(pf);
11059 }
11060 
11061 /**
11062  * i40e_service_timer - timer callback
11063  * @t: timer list pointer
11064  **/
11065 static void i40e_service_timer(struct timer_list *t)
11066 {
11067 	struct i40e_pf *pf = from_timer(pf, t, service_timer);
11068 
11069 	mod_timer(&pf->service_timer,
11070 		  round_jiffies(jiffies + pf->service_timer_period));
11071 	i40e_service_event_schedule(pf);
11072 }
11073 
11074 /**
11075  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11076  * @vsi: the VSI being configured
11077  **/
11078 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11079 {
11080 	struct i40e_pf *pf = vsi->back;
11081 
11082 	switch (vsi->type) {
11083 	case I40E_VSI_MAIN:
11084 		vsi->alloc_queue_pairs = pf->num_lan_qps;
11085 		if (!vsi->num_tx_desc)
11086 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11087 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11088 		if (!vsi->num_rx_desc)
11089 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11090 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11091 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11092 			vsi->num_q_vectors = pf->num_lan_msix;
11093 		else
11094 			vsi->num_q_vectors = 1;
11095 
11096 		break;
11097 
11098 	case I40E_VSI_FDIR:
11099 		vsi->alloc_queue_pairs = 1;
11100 		vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11101 					 I40E_REQ_DESCRIPTOR_MULTIPLE);
11102 		vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11103 					 I40E_REQ_DESCRIPTOR_MULTIPLE);
11104 		vsi->num_q_vectors = pf->num_fdsb_msix;
11105 		break;
11106 
11107 	case I40E_VSI_VMDQ2:
11108 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11109 		if (!vsi->num_tx_desc)
11110 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11111 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11112 		if (!vsi->num_rx_desc)
11113 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11114 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11115 		vsi->num_q_vectors = pf->num_vmdq_msix;
11116 		break;
11117 
11118 	case I40E_VSI_SRIOV:
11119 		vsi->alloc_queue_pairs = pf->num_vf_qps;
11120 		if (!vsi->num_tx_desc)
11121 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11122 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11123 		if (!vsi->num_rx_desc)
11124 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11125 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11126 		break;
11127 
11128 	default:
11129 		WARN_ON(1);
11130 		return -ENODATA;
11131 	}
11132 
11133 	if (is_kdump_kernel()) {
11134 		vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11135 		vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11136 	}
11137 
11138 	return 0;
11139 }
11140 
11141 /**
11142  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11143  * @vsi: VSI pointer
11144  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11145  *
11146  * On error: returns error code (negative)
11147  * On success: returns 0
11148  **/
11149 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11150 {
11151 	struct i40e_ring **next_rings;
11152 	int size;
11153 	int ret = 0;
11154 
11155 	/* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11156 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11157 	       (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11158 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11159 	if (!vsi->tx_rings)
11160 		return -ENOMEM;
11161 	next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11162 	if (i40e_enabled_xdp_vsi(vsi)) {
11163 		vsi->xdp_rings = next_rings;
11164 		next_rings += vsi->alloc_queue_pairs;
11165 	}
11166 	vsi->rx_rings = next_rings;
11167 
11168 	if (alloc_qvectors) {
11169 		/* allocate memory for q_vector pointers */
11170 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11171 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11172 		if (!vsi->q_vectors) {
11173 			ret = -ENOMEM;
11174 			goto err_vectors;
11175 		}
11176 	}
11177 	return ret;
11178 
11179 err_vectors:
11180 	kfree(vsi->tx_rings);
11181 	return ret;
11182 }
11183 
11184 /**
11185  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11186  * @pf: board private structure
11187  * @type: type of VSI
11188  *
11189  * On error: returns error code (negative)
11190  * On success: returns vsi index in PF (positive)
11191  **/
11192 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11193 {
11194 	int ret = -ENODEV;
11195 	struct i40e_vsi *vsi;
11196 	int vsi_idx;
11197 	int i;
11198 
11199 	/* Need to protect the allocation of the VSIs at the PF level */
11200 	mutex_lock(&pf->switch_mutex);
11201 
11202 	/* VSI list may be fragmented if VSI creation/destruction has
11203 	 * been happening.  We can afford to do a quick scan to look
11204 	 * for any free VSIs in the list.
11205 	 *
11206 	 * find next empty vsi slot, looping back around if necessary
11207 	 */
11208 	i = pf->next_vsi;
11209 	while (i < pf->num_alloc_vsi && pf->vsi[i])
11210 		i++;
11211 	if (i >= pf->num_alloc_vsi) {
11212 		i = 0;
11213 		while (i < pf->next_vsi && pf->vsi[i])
11214 			i++;
11215 	}
11216 
11217 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11218 		vsi_idx = i;             /* Found one! */
11219 	} else {
11220 		ret = -ENODEV;
11221 		goto unlock_pf;  /* out of VSI slots! */
11222 	}
11223 	pf->next_vsi = ++i;
11224 
11225 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11226 	if (!vsi) {
11227 		ret = -ENOMEM;
11228 		goto unlock_pf;
11229 	}
11230 	vsi->type = type;
11231 	vsi->back = pf;
11232 	set_bit(__I40E_VSI_DOWN, vsi->state);
11233 	vsi->flags = 0;
11234 	vsi->idx = vsi_idx;
11235 	vsi->int_rate_limit = 0;
11236 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11237 				pf->rss_table_size : 64;
11238 	vsi->netdev_registered = false;
11239 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11240 	hash_init(vsi->mac_filter_hash);
11241 	vsi->irqs_ready = false;
11242 
11243 	if (type == I40E_VSI_MAIN) {
11244 		vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11245 		if (!vsi->af_xdp_zc_qps)
11246 			goto err_rings;
11247 	}
11248 
11249 	ret = i40e_set_num_rings_in_vsi(vsi);
11250 	if (ret)
11251 		goto err_rings;
11252 
11253 	ret = i40e_vsi_alloc_arrays(vsi, true);
11254 	if (ret)
11255 		goto err_rings;
11256 
11257 	/* Setup default MSIX irq handler for VSI */
11258 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11259 
11260 	/* Initialize VSI lock */
11261 	spin_lock_init(&vsi->mac_filter_hash_lock);
11262 	pf->vsi[vsi_idx] = vsi;
11263 	ret = vsi_idx;
11264 	goto unlock_pf;
11265 
11266 err_rings:
11267 	bitmap_free(vsi->af_xdp_zc_qps);
11268 	pf->next_vsi = i - 1;
11269 	kfree(vsi);
11270 unlock_pf:
11271 	mutex_unlock(&pf->switch_mutex);
11272 	return ret;
11273 }
11274 
11275 /**
11276  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11277  * @vsi: VSI pointer
11278  * @free_qvectors: a bool to specify if q_vectors need to be freed.
11279  *
11280  * On error: returns error code (negative)
11281  * On success: returns 0
11282  **/
11283 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11284 {
11285 	/* free the ring and vector containers */
11286 	if (free_qvectors) {
11287 		kfree(vsi->q_vectors);
11288 		vsi->q_vectors = NULL;
11289 	}
11290 	kfree(vsi->tx_rings);
11291 	vsi->tx_rings = NULL;
11292 	vsi->rx_rings = NULL;
11293 	vsi->xdp_rings = NULL;
11294 }
11295 
11296 /**
11297  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11298  * and lookup table
11299  * @vsi: Pointer to VSI structure
11300  */
11301 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11302 {
11303 	if (!vsi)
11304 		return;
11305 
11306 	kfree(vsi->rss_hkey_user);
11307 	vsi->rss_hkey_user = NULL;
11308 
11309 	kfree(vsi->rss_lut_user);
11310 	vsi->rss_lut_user = NULL;
11311 }
11312 
11313 /**
11314  * i40e_vsi_clear - Deallocate the VSI provided
11315  * @vsi: the VSI being un-configured
11316  **/
11317 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11318 {
11319 	struct i40e_pf *pf;
11320 
11321 	if (!vsi)
11322 		return 0;
11323 
11324 	if (!vsi->back)
11325 		goto free_vsi;
11326 	pf = vsi->back;
11327 
11328 	mutex_lock(&pf->switch_mutex);
11329 	if (!pf->vsi[vsi->idx]) {
11330 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11331 			vsi->idx, vsi->idx, vsi->type);
11332 		goto unlock_vsi;
11333 	}
11334 
11335 	if (pf->vsi[vsi->idx] != vsi) {
11336 		dev_err(&pf->pdev->dev,
11337 			"pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11338 			pf->vsi[vsi->idx]->idx,
11339 			pf->vsi[vsi->idx]->type,
11340 			vsi->idx, vsi->type);
11341 		goto unlock_vsi;
11342 	}
11343 
11344 	/* updates the PF for this cleared vsi */
11345 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11346 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11347 
11348 	bitmap_free(vsi->af_xdp_zc_qps);
11349 	i40e_vsi_free_arrays(vsi, true);
11350 	i40e_clear_rss_config_user(vsi);
11351 
11352 	pf->vsi[vsi->idx] = NULL;
11353 	if (vsi->idx < pf->next_vsi)
11354 		pf->next_vsi = vsi->idx;
11355 
11356 unlock_vsi:
11357 	mutex_unlock(&pf->switch_mutex);
11358 free_vsi:
11359 	kfree(vsi);
11360 
11361 	return 0;
11362 }
11363 
11364 /**
11365  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11366  * @vsi: the VSI being cleaned
11367  **/
11368 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11369 {
11370 	int i;
11371 
11372 	if (vsi->tx_rings && vsi->tx_rings[0]) {
11373 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11374 			kfree_rcu(vsi->tx_rings[i], rcu);
11375 			WRITE_ONCE(vsi->tx_rings[i], NULL);
11376 			WRITE_ONCE(vsi->rx_rings[i], NULL);
11377 			if (vsi->xdp_rings)
11378 				WRITE_ONCE(vsi->xdp_rings[i], NULL);
11379 		}
11380 	}
11381 }
11382 
11383 /**
11384  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11385  * @vsi: the VSI being configured
11386  **/
11387 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11388 {
11389 	int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11390 	struct i40e_pf *pf = vsi->back;
11391 	struct i40e_ring *ring;
11392 
11393 	/* Set basic values in the rings to be used later during open() */
11394 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11395 		/* allocate space for both Tx and Rx in one shot */
11396 		ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11397 		if (!ring)
11398 			goto err_out;
11399 
11400 		ring->queue_index = i;
11401 		ring->reg_idx = vsi->base_queue + i;
11402 		ring->ring_active = false;
11403 		ring->vsi = vsi;
11404 		ring->netdev = vsi->netdev;
11405 		ring->dev = &pf->pdev->dev;
11406 		ring->count = vsi->num_tx_desc;
11407 		ring->size = 0;
11408 		ring->dcb_tc = 0;
11409 		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11410 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11411 		ring->itr_setting = pf->tx_itr_default;
11412 		WRITE_ONCE(vsi->tx_rings[i], ring++);
11413 
11414 		if (!i40e_enabled_xdp_vsi(vsi))
11415 			goto setup_rx;
11416 
11417 		ring->queue_index = vsi->alloc_queue_pairs + i;
11418 		ring->reg_idx = vsi->base_queue + ring->queue_index;
11419 		ring->ring_active = false;
11420 		ring->vsi = vsi;
11421 		ring->netdev = NULL;
11422 		ring->dev = &pf->pdev->dev;
11423 		ring->count = vsi->num_tx_desc;
11424 		ring->size = 0;
11425 		ring->dcb_tc = 0;
11426 		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11427 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11428 		set_ring_xdp(ring);
11429 		ring->itr_setting = pf->tx_itr_default;
11430 		WRITE_ONCE(vsi->xdp_rings[i], ring++);
11431 
11432 setup_rx:
11433 		ring->queue_index = i;
11434 		ring->reg_idx = vsi->base_queue + i;
11435 		ring->ring_active = false;
11436 		ring->vsi = vsi;
11437 		ring->netdev = vsi->netdev;
11438 		ring->dev = &pf->pdev->dev;
11439 		ring->count = vsi->num_rx_desc;
11440 		ring->size = 0;
11441 		ring->dcb_tc = 0;
11442 		ring->itr_setting = pf->rx_itr_default;
11443 		WRITE_ONCE(vsi->rx_rings[i], ring);
11444 	}
11445 
11446 	return 0;
11447 
11448 err_out:
11449 	i40e_vsi_clear_rings(vsi);
11450 	return -ENOMEM;
11451 }
11452 
11453 /**
11454  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11455  * @pf: board private structure
11456  * @vectors: the number of MSI-X vectors to request
11457  *
11458  * Returns the number of vectors reserved, or error
11459  **/
11460 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11461 {
11462 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11463 					I40E_MIN_MSIX, vectors);
11464 	if (vectors < 0) {
11465 		dev_info(&pf->pdev->dev,
11466 			 "MSI-X vector reservation failed: %d\n", vectors);
11467 		vectors = 0;
11468 	}
11469 
11470 	return vectors;
11471 }
11472 
11473 /**
11474  * i40e_init_msix - Setup the MSIX capability
11475  * @pf: board private structure
11476  *
11477  * Work with the OS to set up the MSIX vectors needed.
11478  *
11479  * Returns the number of vectors reserved or negative on failure
11480  **/
11481 static int i40e_init_msix(struct i40e_pf *pf)
11482 {
11483 	struct i40e_hw *hw = &pf->hw;
11484 	int cpus, extra_vectors;
11485 	int vectors_left;
11486 	int v_budget, i;
11487 	int v_actual;
11488 	int iwarp_requested = 0;
11489 
11490 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11491 		return -ENODEV;
11492 
11493 	/* The number of vectors we'll request will be comprised of:
11494 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
11495 	 *   - The number of LAN queue pairs
11496 	 *	- Queues being used for RSS.
11497 	 *		We don't need as many as max_rss_size vectors.
11498 	 *		use rss_size instead in the calculation since that
11499 	 *		is governed by number of cpus in the system.
11500 	 *	- assumes symmetric Tx/Rx pairing
11501 	 *   - The number of VMDq pairs
11502 	 *   - The CPU count within the NUMA node if iWARP is enabled
11503 	 * Once we count this up, try the request.
11504 	 *
11505 	 * If we can't get what we want, we'll simplify to nearly nothing
11506 	 * and try again.  If that still fails, we punt.
11507 	 */
11508 	vectors_left = hw->func_caps.num_msix_vectors;
11509 	v_budget = 0;
11510 
11511 	/* reserve one vector for miscellaneous handler */
11512 	if (vectors_left) {
11513 		v_budget++;
11514 		vectors_left--;
11515 	}
11516 
11517 	/* reserve some vectors for the main PF traffic queues. Initially we
11518 	 * only reserve at most 50% of the available vectors, in the case that
11519 	 * the number of online CPUs is large. This ensures that we can enable
11520 	 * extra features as well. Once we've enabled the other features, we
11521 	 * will use any remaining vectors to reach as close as we can to the
11522 	 * number of online CPUs.
11523 	 */
11524 	cpus = num_online_cpus();
11525 	pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11526 	vectors_left -= pf->num_lan_msix;
11527 
11528 	/* reserve one vector for sideband flow director */
11529 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11530 		if (vectors_left) {
11531 			pf->num_fdsb_msix = 1;
11532 			v_budget++;
11533 			vectors_left--;
11534 		} else {
11535 			pf->num_fdsb_msix = 0;
11536 		}
11537 	}
11538 
11539 	/* can we reserve enough for iWARP? */
11540 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11541 		iwarp_requested = pf->num_iwarp_msix;
11542 
11543 		if (!vectors_left)
11544 			pf->num_iwarp_msix = 0;
11545 		else if (vectors_left < pf->num_iwarp_msix)
11546 			pf->num_iwarp_msix = 1;
11547 		v_budget += pf->num_iwarp_msix;
11548 		vectors_left -= pf->num_iwarp_msix;
11549 	}
11550 
11551 	/* any vectors left over go for VMDq support */
11552 	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11553 		if (!vectors_left) {
11554 			pf->num_vmdq_msix = 0;
11555 			pf->num_vmdq_qps = 0;
11556 		} else {
11557 			int vmdq_vecs_wanted =
11558 				pf->num_vmdq_vsis * pf->num_vmdq_qps;
11559 			int vmdq_vecs =
11560 				min_t(int, vectors_left, vmdq_vecs_wanted);
11561 
11562 			/* if we're short on vectors for what's desired, we limit
11563 			 * the queues per vmdq.  If this is still more than are
11564 			 * available, the user will need to change the number of
11565 			 * queues/vectors used by the PF later with the ethtool
11566 			 * channels command
11567 			 */
11568 			if (vectors_left < vmdq_vecs_wanted) {
11569 				pf->num_vmdq_qps = 1;
11570 				vmdq_vecs_wanted = pf->num_vmdq_vsis;
11571 				vmdq_vecs = min_t(int,
11572 						  vectors_left,
11573 						  vmdq_vecs_wanted);
11574 			}
11575 			pf->num_vmdq_msix = pf->num_vmdq_qps;
11576 
11577 			v_budget += vmdq_vecs;
11578 			vectors_left -= vmdq_vecs;
11579 		}
11580 	}
11581 
11582 	/* On systems with a large number of SMP cores, we previously limited
11583 	 * the number of vectors for num_lan_msix to be at most 50% of the
11584 	 * available vectors, to allow for other features. Now, we add back
11585 	 * the remaining vectors. However, we ensure that the total
11586 	 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11587 	 * calculate the number of vectors we can add without going over the
11588 	 * cap of CPUs. For systems with a small number of CPUs this will be
11589 	 * zero.
11590 	 */
11591 	extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11592 	pf->num_lan_msix += extra_vectors;
11593 	vectors_left -= extra_vectors;
11594 
11595 	WARN(vectors_left < 0,
11596 	     "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11597 
11598 	v_budget += pf->num_lan_msix;
11599 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11600 				   GFP_KERNEL);
11601 	if (!pf->msix_entries)
11602 		return -ENOMEM;
11603 
11604 	for (i = 0; i < v_budget; i++)
11605 		pf->msix_entries[i].entry = i;
11606 	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11607 
11608 	if (v_actual < I40E_MIN_MSIX) {
11609 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11610 		kfree(pf->msix_entries);
11611 		pf->msix_entries = NULL;
11612 		pci_disable_msix(pf->pdev);
11613 		return -ENODEV;
11614 
11615 	} else if (v_actual == I40E_MIN_MSIX) {
11616 		/* Adjust for minimal MSIX use */
11617 		pf->num_vmdq_vsis = 0;
11618 		pf->num_vmdq_qps = 0;
11619 		pf->num_lan_qps = 1;
11620 		pf->num_lan_msix = 1;
11621 
11622 	} else if (v_actual != v_budget) {
11623 		/* If we have limited resources, we will start with no vectors
11624 		 * for the special features and then allocate vectors to some
11625 		 * of these features based on the policy and at the end disable
11626 		 * the features that did not get any vectors.
11627 		 */
11628 		int vec;
11629 
11630 		dev_info(&pf->pdev->dev,
11631 			 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11632 			 v_actual, v_budget);
11633 		/* reserve the misc vector */
11634 		vec = v_actual - 1;
11635 
11636 		/* Scale vector usage down */
11637 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
11638 		pf->num_vmdq_vsis = 1;
11639 		pf->num_vmdq_qps = 1;
11640 
11641 		/* partition out the remaining vectors */
11642 		switch (vec) {
11643 		case 2:
11644 			pf->num_lan_msix = 1;
11645 			break;
11646 		case 3:
11647 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11648 				pf->num_lan_msix = 1;
11649 				pf->num_iwarp_msix = 1;
11650 			} else {
11651 				pf->num_lan_msix = 2;
11652 			}
11653 			break;
11654 		default:
11655 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11656 				pf->num_iwarp_msix = min_t(int, (vec / 3),
11657 						 iwarp_requested);
11658 				pf->num_vmdq_vsis = min_t(int, (vec / 3),
11659 						  I40E_DEFAULT_NUM_VMDQ_VSI);
11660 			} else {
11661 				pf->num_vmdq_vsis = min_t(int, (vec / 2),
11662 						  I40E_DEFAULT_NUM_VMDQ_VSI);
11663 			}
11664 			if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11665 				pf->num_fdsb_msix = 1;
11666 				vec--;
11667 			}
11668 			pf->num_lan_msix = min_t(int,
11669 			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11670 							      pf->num_lan_msix);
11671 			pf->num_lan_qps = pf->num_lan_msix;
11672 			break;
11673 		}
11674 	}
11675 
11676 	if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11677 	    (pf->num_fdsb_msix == 0)) {
11678 		dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11679 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11680 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11681 	}
11682 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11683 	    (pf->num_vmdq_msix == 0)) {
11684 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11685 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11686 	}
11687 
11688 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11689 	    (pf->num_iwarp_msix == 0)) {
11690 		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11691 		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11692 	}
11693 	i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11694 		   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11695 		   pf->num_lan_msix,
11696 		   pf->num_vmdq_msix * pf->num_vmdq_vsis,
11697 		   pf->num_fdsb_msix,
11698 		   pf->num_iwarp_msix);
11699 
11700 	return v_actual;
11701 }
11702 
11703 /**
11704  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11705  * @vsi: the VSI being configured
11706  * @v_idx: index of the vector in the vsi struct
11707  *
11708  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
11709  **/
11710 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11711 {
11712 	struct i40e_q_vector *q_vector;
11713 
11714 	/* allocate q_vector */
11715 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11716 	if (!q_vector)
11717 		return -ENOMEM;
11718 
11719 	q_vector->vsi = vsi;
11720 	q_vector->v_idx = v_idx;
11721 	cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11722 
11723 	if (vsi->netdev)
11724 		netif_napi_add(vsi->netdev, &q_vector->napi,
11725 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
11726 
11727 	/* tie q_vector and vsi together */
11728 	vsi->q_vectors[v_idx] = q_vector;
11729 
11730 	return 0;
11731 }
11732 
11733 /**
11734  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11735  * @vsi: the VSI being configured
11736  *
11737  * We allocate one q_vector per queue interrupt.  If allocation fails we
11738  * return -ENOMEM.
11739  **/
11740 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11741 {
11742 	struct i40e_pf *pf = vsi->back;
11743 	int err, v_idx, num_q_vectors;
11744 
11745 	/* if not MSIX, give the one vector only to the LAN VSI */
11746 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11747 		num_q_vectors = vsi->num_q_vectors;
11748 	else if (vsi == pf->vsi[pf->lan_vsi])
11749 		num_q_vectors = 1;
11750 	else
11751 		return -EINVAL;
11752 
11753 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11754 		err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11755 		if (err)
11756 			goto err_out;
11757 	}
11758 
11759 	return 0;
11760 
11761 err_out:
11762 	while (v_idx--)
11763 		i40e_free_q_vector(vsi, v_idx);
11764 
11765 	return err;
11766 }
11767 
11768 /**
11769  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11770  * @pf: board private structure to initialize
11771  **/
11772 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11773 {
11774 	int vectors = 0;
11775 	ssize_t size;
11776 
11777 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11778 		vectors = i40e_init_msix(pf);
11779 		if (vectors < 0) {
11780 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
11781 				       I40E_FLAG_IWARP_ENABLED	|
11782 				       I40E_FLAG_RSS_ENABLED	|
11783 				       I40E_FLAG_DCB_CAPABLE	|
11784 				       I40E_FLAG_DCB_ENABLED	|
11785 				       I40E_FLAG_SRIOV_ENABLED	|
11786 				       I40E_FLAG_FD_SB_ENABLED	|
11787 				       I40E_FLAG_FD_ATR_ENABLED	|
11788 				       I40E_FLAG_VMDQ_ENABLED);
11789 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11790 
11791 			/* rework the queue expectations without MSIX */
11792 			i40e_determine_queue_usage(pf);
11793 		}
11794 	}
11795 
11796 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11797 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11798 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11799 		vectors = pci_enable_msi(pf->pdev);
11800 		if (vectors < 0) {
11801 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11802 				 vectors);
11803 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11804 		}
11805 		vectors = 1;  /* one MSI or Legacy vector */
11806 	}
11807 
11808 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11809 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11810 
11811 	/* set up vector assignment tracking */
11812 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11813 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
11814 	if (!pf->irq_pile)
11815 		return -ENOMEM;
11816 
11817 	pf->irq_pile->num_entries = vectors;
11818 
11819 	/* track first vector for misc interrupts, ignore return */
11820 	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11821 
11822 	return 0;
11823 }
11824 
11825 /**
11826  * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11827  * @pf: private board data structure
11828  *
11829  * Restore the interrupt scheme that was cleared when we suspended the
11830  * device. This should be called during resume to re-allocate the q_vectors
11831  * and reacquire IRQs.
11832  */
11833 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11834 {
11835 	int err, i;
11836 
11837 	/* We cleared the MSI and MSI-X flags when disabling the old interrupt
11838 	 * scheme. We need to re-enabled them here in order to attempt to
11839 	 * re-acquire the MSI or MSI-X vectors
11840 	 */
11841 	pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11842 
11843 	err = i40e_init_interrupt_scheme(pf);
11844 	if (err)
11845 		return err;
11846 
11847 	/* Now that we've re-acquired IRQs, we need to remap the vectors and
11848 	 * rings together again.
11849 	 */
11850 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11851 		if (pf->vsi[i]) {
11852 			err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11853 			if (err)
11854 				goto err_unwind;
11855 			i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11856 		}
11857 	}
11858 
11859 	err = i40e_setup_misc_vector(pf);
11860 	if (err)
11861 		goto err_unwind;
11862 
11863 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11864 		i40e_client_update_msix_info(pf);
11865 
11866 	return 0;
11867 
11868 err_unwind:
11869 	while (i--) {
11870 		if (pf->vsi[i])
11871 			i40e_vsi_free_q_vectors(pf->vsi[i]);
11872 	}
11873 
11874 	return err;
11875 }
11876 
11877 /**
11878  * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11879  * non queue events in recovery mode
11880  * @pf: board private structure
11881  *
11882  * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11883  * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11884  * This is handled differently than in recovery mode since no Tx/Rx resources
11885  * are being allocated.
11886  **/
11887 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11888 {
11889 	int err;
11890 
11891 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11892 		err = i40e_setup_misc_vector(pf);
11893 
11894 		if (err) {
11895 			dev_info(&pf->pdev->dev,
11896 				 "MSI-X misc vector request failed, error %d\n",
11897 				 err);
11898 			return err;
11899 		}
11900 	} else {
11901 		u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11902 
11903 		err = request_irq(pf->pdev->irq, i40e_intr, flags,
11904 				  pf->int_name, pf);
11905 
11906 		if (err) {
11907 			dev_info(&pf->pdev->dev,
11908 				 "MSI/legacy misc vector request failed, error %d\n",
11909 				 err);
11910 			return err;
11911 		}
11912 		i40e_enable_misc_int_causes(pf);
11913 		i40e_irq_dynamic_enable_icr0(pf);
11914 	}
11915 
11916 	return 0;
11917 }
11918 
11919 /**
11920  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11921  * @pf: board private structure
11922  *
11923  * This sets up the handler for MSIX 0, which is used to manage the
11924  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
11925  * when in MSI or Legacy interrupt mode.
11926  **/
11927 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11928 {
11929 	struct i40e_hw *hw = &pf->hw;
11930 	int err = 0;
11931 
11932 	/* Only request the IRQ once, the first time through. */
11933 	if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11934 		err = request_irq(pf->msix_entries[0].vector,
11935 				  i40e_intr, 0, pf->int_name, pf);
11936 		if (err) {
11937 			clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11938 			dev_info(&pf->pdev->dev,
11939 				 "request_irq for %s failed: %d\n",
11940 				 pf->int_name, err);
11941 			return -EFAULT;
11942 		}
11943 	}
11944 
11945 	i40e_enable_misc_int_causes(pf);
11946 
11947 	/* associate no queues to the misc vector */
11948 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11949 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11950 
11951 	i40e_flush(hw);
11952 
11953 	i40e_irq_dynamic_enable_icr0(pf);
11954 
11955 	return err;
11956 }
11957 
11958 /**
11959  * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11960  * @vsi: Pointer to vsi structure
11961  * @seed: Buffter to store the hash keys
11962  * @lut: Buffer to store the lookup table entries
11963  * @lut_size: Size of buffer to store the lookup table entries
11964  *
11965  * Return 0 on success, negative on failure
11966  */
11967 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11968 			   u8 *lut, u16 lut_size)
11969 {
11970 	struct i40e_pf *pf = vsi->back;
11971 	struct i40e_hw *hw = &pf->hw;
11972 	int ret = 0;
11973 
11974 	if (seed) {
11975 		ret = i40e_aq_get_rss_key(hw, vsi->id,
11976 			(struct i40e_aqc_get_set_rss_key_data *)seed);
11977 		if (ret) {
11978 			dev_info(&pf->pdev->dev,
11979 				 "Cannot get RSS key, err %s aq_err %s\n",
11980 				 i40e_stat_str(&pf->hw, ret),
11981 				 i40e_aq_str(&pf->hw,
11982 					     pf->hw.aq.asq_last_status));
11983 			return ret;
11984 		}
11985 	}
11986 
11987 	if (lut) {
11988 		bool pf_lut = vsi->type == I40E_VSI_MAIN;
11989 
11990 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11991 		if (ret) {
11992 			dev_info(&pf->pdev->dev,
11993 				 "Cannot get RSS lut, err %s aq_err %s\n",
11994 				 i40e_stat_str(&pf->hw, ret),
11995 				 i40e_aq_str(&pf->hw,
11996 					     pf->hw.aq.asq_last_status));
11997 			return ret;
11998 		}
11999 	}
12000 
12001 	return ret;
12002 }
12003 
12004 /**
12005  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12006  * @vsi: Pointer to vsi structure
12007  * @seed: RSS hash seed
12008  * @lut: Lookup table
12009  * @lut_size: Lookup table size
12010  *
12011  * Returns 0 on success, negative on failure
12012  **/
12013 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12014 			       const u8 *lut, u16 lut_size)
12015 {
12016 	struct i40e_pf *pf = vsi->back;
12017 	struct i40e_hw *hw = &pf->hw;
12018 	u16 vf_id = vsi->vf_id;
12019 	u8 i;
12020 
12021 	/* Fill out hash function seed */
12022 	if (seed) {
12023 		u32 *seed_dw = (u32 *)seed;
12024 
12025 		if (vsi->type == I40E_VSI_MAIN) {
12026 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12027 				wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12028 		} else if (vsi->type == I40E_VSI_SRIOV) {
12029 			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12030 				wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12031 		} else {
12032 			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12033 		}
12034 	}
12035 
12036 	if (lut) {
12037 		u32 *lut_dw = (u32 *)lut;
12038 
12039 		if (vsi->type == I40E_VSI_MAIN) {
12040 			if (lut_size != I40E_HLUT_ARRAY_SIZE)
12041 				return -EINVAL;
12042 			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12043 				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12044 		} else if (vsi->type == I40E_VSI_SRIOV) {
12045 			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12046 				return -EINVAL;
12047 			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12048 				wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12049 		} else {
12050 			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12051 		}
12052 	}
12053 	i40e_flush(hw);
12054 
12055 	return 0;
12056 }
12057 
12058 /**
12059  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12060  * @vsi: Pointer to VSI structure
12061  * @seed: Buffer to store the keys
12062  * @lut: Buffer to store the lookup table entries
12063  * @lut_size: Size of buffer to store the lookup table entries
12064  *
12065  * Returns 0 on success, negative on failure
12066  */
12067 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12068 			    u8 *lut, u16 lut_size)
12069 {
12070 	struct i40e_pf *pf = vsi->back;
12071 	struct i40e_hw *hw = &pf->hw;
12072 	u16 i;
12073 
12074 	if (seed) {
12075 		u32 *seed_dw = (u32 *)seed;
12076 
12077 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12078 			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12079 	}
12080 	if (lut) {
12081 		u32 *lut_dw = (u32 *)lut;
12082 
12083 		if (lut_size != I40E_HLUT_ARRAY_SIZE)
12084 			return -EINVAL;
12085 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12086 			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12087 	}
12088 
12089 	return 0;
12090 }
12091 
12092 /**
12093  * i40e_config_rss - Configure RSS keys and lut
12094  * @vsi: Pointer to VSI structure
12095  * @seed: RSS hash seed
12096  * @lut: Lookup table
12097  * @lut_size: Lookup table size
12098  *
12099  * Returns 0 on success, negative on failure
12100  */
12101 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12102 {
12103 	struct i40e_pf *pf = vsi->back;
12104 
12105 	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12106 		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12107 	else
12108 		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12109 }
12110 
12111 /**
12112  * i40e_get_rss - Get RSS keys and lut
12113  * @vsi: Pointer to VSI structure
12114  * @seed: Buffer to store the keys
12115  * @lut: Buffer to store the lookup table entries
12116  * @lut_size: Size of buffer to store the lookup table entries
12117  *
12118  * Returns 0 on success, negative on failure
12119  */
12120 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12121 {
12122 	struct i40e_pf *pf = vsi->back;
12123 
12124 	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12125 		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12126 	else
12127 		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12128 }
12129 
12130 /**
12131  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12132  * @pf: Pointer to board private structure
12133  * @lut: Lookup table
12134  * @rss_table_size: Lookup table size
12135  * @rss_size: Range of queue number for hashing
12136  */
12137 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12138 		       u16 rss_table_size, u16 rss_size)
12139 {
12140 	u16 i;
12141 
12142 	for (i = 0; i < rss_table_size; i++)
12143 		lut[i] = i % rss_size;
12144 }
12145 
12146 /**
12147  * i40e_pf_config_rss - Prepare for RSS if used
12148  * @pf: board private structure
12149  **/
12150 static int i40e_pf_config_rss(struct i40e_pf *pf)
12151 {
12152 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12153 	u8 seed[I40E_HKEY_ARRAY_SIZE];
12154 	u8 *lut;
12155 	struct i40e_hw *hw = &pf->hw;
12156 	u32 reg_val;
12157 	u64 hena;
12158 	int ret;
12159 
12160 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12161 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12162 		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12163 	hena |= i40e_pf_get_default_rss_hena(pf);
12164 
12165 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12166 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12167 
12168 	/* Determine the RSS table size based on the hardware capabilities */
12169 	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12170 	reg_val = (pf->rss_table_size == 512) ?
12171 			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12172 			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12173 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12174 
12175 	/* Determine the RSS size of the VSI */
12176 	if (!vsi->rss_size) {
12177 		u16 qcount;
12178 		/* If the firmware does something weird during VSI init, we
12179 		 * could end up with zero TCs. Check for that to avoid
12180 		 * divide-by-zero. It probably won't pass traffic, but it also
12181 		 * won't panic.
12182 		 */
12183 		qcount = vsi->num_queue_pairs /
12184 			 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12185 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12186 	}
12187 	if (!vsi->rss_size)
12188 		return -EINVAL;
12189 
12190 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12191 	if (!lut)
12192 		return -ENOMEM;
12193 
12194 	/* Use user configured lut if there is one, otherwise use default */
12195 	if (vsi->rss_lut_user)
12196 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12197 	else
12198 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12199 
12200 	/* Use user configured hash key if there is one, otherwise
12201 	 * use default.
12202 	 */
12203 	if (vsi->rss_hkey_user)
12204 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12205 	else
12206 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12207 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12208 	kfree(lut);
12209 
12210 	return ret;
12211 }
12212 
12213 /**
12214  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12215  * @pf: board private structure
12216  * @queue_count: the requested queue count for rss.
12217  *
12218  * returns 0 if rss is not enabled, if enabled returns the final rss queue
12219  * count which may be different from the requested queue count.
12220  * Note: expects to be called while under rtnl_lock()
12221  **/
12222 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12223 {
12224 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12225 	int new_rss_size;
12226 
12227 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12228 		return 0;
12229 
12230 	queue_count = min_t(int, queue_count, num_online_cpus());
12231 	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12232 
12233 	if (queue_count != vsi->num_queue_pairs) {
12234 		u16 qcount;
12235 
12236 		vsi->req_queue_pairs = queue_count;
12237 		i40e_prep_for_reset(pf);
12238 		if (test_bit(__I40E_IN_REMOVE, pf->state))
12239 			return pf->alloc_rss_size;
12240 
12241 		pf->alloc_rss_size = new_rss_size;
12242 
12243 		i40e_reset_and_rebuild(pf, true, true);
12244 
12245 		/* Discard the user configured hash keys and lut, if less
12246 		 * queues are enabled.
12247 		 */
12248 		if (queue_count < vsi->rss_size) {
12249 			i40e_clear_rss_config_user(vsi);
12250 			dev_dbg(&pf->pdev->dev,
12251 				"discard user configured hash keys and lut\n");
12252 		}
12253 
12254 		/* Reset vsi->rss_size, as number of enabled queues changed */
12255 		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12256 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12257 
12258 		i40e_pf_config_rss(pf);
12259 	}
12260 	dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count:  %d/%d\n",
12261 		 vsi->req_queue_pairs, pf->rss_size_max);
12262 	return pf->alloc_rss_size;
12263 }
12264 
12265 /**
12266  * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12267  * @pf: board private structure
12268  **/
12269 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12270 {
12271 	i40e_status status;
12272 	bool min_valid, max_valid;
12273 	u32 max_bw, min_bw;
12274 
12275 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12276 					   &min_valid, &max_valid);
12277 
12278 	if (!status) {
12279 		if (min_valid)
12280 			pf->min_bw = min_bw;
12281 		if (max_valid)
12282 			pf->max_bw = max_bw;
12283 	}
12284 
12285 	return status;
12286 }
12287 
12288 /**
12289  * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12290  * @pf: board private structure
12291  **/
12292 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12293 {
12294 	struct i40e_aqc_configure_partition_bw_data bw_data;
12295 	i40e_status status;
12296 
12297 	memset(&bw_data, 0, sizeof(bw_data));
12298 
12299 	/* Set the valid bit for this PF */
12300 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12301 	bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12302 	bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12303 
12304 	/* Set the new bandwidths */
12305 	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12306 
12307 	return status;
12308 }
12309 
12310 /**
12311  * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12312  * @pf: board private structure
12313  **/
12314 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12315 {
12316 	/* Commit temporary BW setting to permanent NVM image */
12317 	enum i40e_admin_queue_err last_aq_status;
12318 	i40e_status ret;
12319 	u16 nvm_word;
12320 
12321 	if (pf->hw.partition_id != 1) {
12322 		dev_info(&pf->pdev->dev,
12323 			 "Commit BW only works on partition 1! This is partition %d",
12324 			 pf->hw.partition_id);
12325 		ret = I40E_NOT_SUPPORTED;
12326 		goto bw_commit_out;
12327 	}
12328 
12329 	/* Acquire NVM for read access */
12330 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12331 	last_aq_status = pf->hw.aq.asq_last_status;
12332 	if (ret) {
12333 		dev_info(&pf->pdev->dev,
12334 			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12335 			 i40e_stat_str(&pf->hw, ret),
12336 			 i40e_aq_str(&pf->hw, last_aq_status));
12337 		goto bw_commit_out;
12338 	}
12339 
12340 	/* Read word 0x10 of NVM - SW compatibility word 1 */
12341 	ret = i40e_aq_read_nvm(&pf->hw,
12342 			       I40E_SR_NVM_CONTROL_WORD,
12343 			       0x10, sizeof(nvm_word), &nvm_word,
12344 			       false, NULL);
12345 	/* Save off last admin queue command status before releasing
12346 	 * the NVM
12347 	 */
12348 	last_aq_status = pf->hw.aq.asq_last_status;
12349 	i40e_release_nvm(&pf->hw);
12350 	if (ret) {
12351 		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12352 			 i40e_stat_str(&pf->hw, ret),
12353 			 i40e_aq_str(&pf->hw, last_aq_status));
12354 		goto bw_commit_out;
12355 	}
12356 
12357 	/* Wait a bit for NVM release to complete */
12358 	msleep(50);
12359 
12360 	/* Acquire NVM for write access */
12361 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12362 	last_aq_status = pf->hw.aq.asq_last_status;
12363 	if (ret) {
12364 		dev_info(&pf->pdev->dev,
12365 			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12366 			 i40e_stat_str(&pf->hw, ret),
12367 			 i40e_aq_str(&pf->hw, last_aq_status));
12368 		goto bw_commit_out;
12369 	}
12370 	/* Write it back out unchanged to initiate update NVM,
12371 	 * which will force a write of the shadow (alt) RAM to
12372 	 * the NVM - thus storing the bandwidth values permanently.
12373 	 */
12374 	ret = i40e_aq_update_nvm(&pf->hw,
12375 				 I40E_SR_NVM_CONTROL_WORD,
12376 				 0x10, sizeof(nvm_word),
12377 				 &nvm_word, true, 0, NULL);
12378 	/* Save off last admin queue command status before releasing
12379 	 * the NVM
12380 	 */
12381 	last_aq_status = pf->hw.aq.asq_last_status;
12382 	i40e_release_nvm(&pf->hw);
12383 	if (ret)
12384 		dev_info(&pf->pdev->dev,
12385 			 "BW settings NOT SAVED, err %s aq_err %s\n",
12386 			 i40e_stat_str(&pf->hw, ret),
12387 			 i40e_aq_str(&pf->hw, last_aq_status));
12388 bw_commit_out:
12389 
12390 	return ret;
12391 }
12392 
12393 /**
12394  * i40e_is_total_port_shutdown_enabled - read NVM and return value
12395  * if total port shutdown feature is enabled for this PF
12396  * @pf: board private structure
12397  **/
12398 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12399 {
12400 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED	BIT(4)
12401 #define I40E_FEATURES_ENABLE_PTR		0x2A
12402 #define I40E_CURRENT_SETTING_PTR		0x2B
12403 #define I40E_LINK_BEHAVIOR_WORD_OFFSET		0x2D
12404 #define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1
12405 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)
12406 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4
12407 	i40e_status read_status = I40E_SUCCESS;
12408 	u16 sr_emp_sr_settings_ptr = 0;
12409 	u16 features_enable = 0;
12410 	u16 link_behavior = 0;
12411 	bool ret = false;
12412 
12413 	read_status = i40e_read_nvm_word(&pf->hw,
12414 					 I40E_SR_EMP_SR_SETTINGS_PTR,
12415 					 &sr_emp_sr_settings_ptr);
12416 	if (read_status)
12417 		goto err_nvm;
12418 	read_status = i40e_read_nvm_word(&pf->hw,
12419 					 sr_emp_sr_settings_ptr +
12420 					 I40E_FEATURES_ENABLE_PTR,
12421 					 &features_enable);
12422 	if (read_status)
12423 		goto err_nvm;
12424 	if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12425 		read_status = i40e_read_nvm_module_data(&pf->hw,
12426 							I40E_SR_EMP_SR_SETTINGS_PTR,
12427 							I40E_CURRENT_SETTING_PTR,
12428 							I40E_LINK_BEHAVIOR_WORD_OFFSET,
12429 							I40E_LINK_BEHAVIOR_WORD_LENGTH,
12430 							&link_behavior);
12431 		if (read_status)
12432 			goto err_nvm;
12433 		link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12434 		ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12435 	}
12436 	return ret;
12437 
12438 err_nvm:
12439 	dev_warn(&pf->pdev->dev,
12440 		 "total-port-shutdown feature is off due to read nvm error: %s\n",
12441 		 i40e_stat_str(&pf->hw, read_status));
12442 	return ret;
12443 }
12444 
12445 /**
12446  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12447  * @pf: board private structure to initialize
12448  *
12449  * i40e_sw_init initializes the Adapter private data structure.
12450  * Fields are initialized based on PCI device information and
12451  * OS network device settings (MTU size).
12452  **/
12453 static int i40e_sw_init(struct i40e_pf *pf)
12454 {
12455 	int err = 0;
12456 	int size;
12457 	u16 pow;
12458 
12459 	/* Set default capability flags */
12460 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12461 		    I40E_FLAG_MSI_ENABLED     |
12462 		    I40E_FLAG_MSIX_ENABLED;
12463 
12464 	/* Set default ITR */
12465 	pf->rx_itr_default = I40E_ITR_RX_DEF;
12466 	pf->tx_itr_default = I40E_ITR_TX_DEF;
12467 
12468 	/* Depending on PF configurations, it is possible that the RSS
12469 	 * maximum might end up larger than the available queues
12470 	 */
12471 	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12472 	pf->alloc_rss_size = 1;
12473 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12474 	pf->rss_size_max = min_t(int, pf->rss_size_max,
12475 				 pf->hw.func_caps.num_tx_qp);
12476 
12477 	/* find the next higher power-of-2 of num cpus */
12478 	pow = roundup_pow_of_two(num_online_cpus());
12479 	pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12480 
12481 	if (pf->hw.func_caps.rss) {
12482 		pf->flags |= I40E_FLAG_RSS_ENABLED;
12483 		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12484 					   num_online_cpus());
12485 	}
12486 
12487 	/* MFP mode enabled */
12488 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12489 		pf->flags |= I40E_FLAG_MFP_ENABLED;
12490 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12491 		if (i40e_get_partition_bw_setting(pf)) {
12492 			dev_warn(&pf->pdev->dev,
12493 				 "Could not get partition bw settings\n");
12494 		} else {
12495 			dev_info(&pf->pdev->dev,
12496 				 "Partition BW Min = %8.8x, Max = %8.8x\n",
12497 				 pf->min_bw, pf->max_bw);
12498 
12499 			/* nudge the Tx scheduler */
12500 			i40e_set_partition_bw_setting(pf);
12501 		}
12502 	}
12503 
12504 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12505 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12506 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12507 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12508 		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12509 		    pf->hw.num_partitions > 1)
12510 			dev_info(&pf->pdev->dev,
12511 				 "Flow Director Sideband mode Disabled in MFP mode\n");
12512 		else
12513 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12514 		pf->fdir_pf_filter_count =
12515 				 pf->hw.func_caps.fd_filters_guaranteed;
12516 		pf->hw.fdir_shared_filter_count =
12517 				 pf->hw.func_caps.fd_filters_best_effort;
12518 	}
12519 
12520 	if (pf->hw.mac.type == I40E_MAC_X722) {
12521 		pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12522 				    I40E_HW_128_QP_RSS_CAPABLE |
12523 				    I40E_HW_ATR_EVICT_CAPABLE |
12524 				    I40E_HW_WB_ON_ITR_CAPABLE |
12525 				    I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12526 				    I40E_HW_NO_PCI_LINK_CHECK |
12527 				    I40E_HW_USE_SET_LLDP_MIB |
12528 				    I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12529 				    I40E_HW_PTP_L4_CAPABLE |
12530 				    I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12531 				    I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12532 
12533 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12534 		if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12535 		    I40E_FDEVICT_PCTYPE_DEFAULT) {
12536 			dev_warn(&pf->pdev->dev,
12537 				 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12538 			pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12539 		}
12540 	} else if ((pf->hw.aq.api_maj_ver > 1) ||
12541 		   ((pf->hw.aq.api_maj_ver == 1) &&
12542 		    (pf->hw.aq.api_min_ver > 4))) {
12543 		/* Supported in FW API version higher than 1.4 */
12544 		pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12545 	}
12546 
12547 	/* Enable HW ATR eviction if possible */
12548 	if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12549 		pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12550 
12551 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12552 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12553 	    (pf->hw.aq.fw_maj_ver < 4))) {
12554 		pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12555 		/* No DCB support  for FW < v4.33 */
12556 		pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12557 	}
12558 
12559 	/* Disable FW LLDP if FW < v4.3 */
12560 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12561 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12562 	    (pf->hw.aq.fw_maj_ver < 4)))
12563 		pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12564 
12565 	/* Use the FW Set LLDP MIB API if FW > v4.40 */
12566 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12567 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12568 	    (pf->hw.aq.fw_maj_ver >= 5)))
12569 		pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12570 
12571 	/* Enable PTP L4 if FW > v6.0 */
12572 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
12573 	    pf->hw.aq.fw_maj_ver >= 6)
12574 		pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12575 
12576 	if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12577 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12578 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12579 		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12580 	}
12581 
12582 	if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12583 		pf->flags |= I40E_FLAG_IWARP_ENABLED;
12584 		/* IWARP needs one extra vector for CQP just like MISC.*/
12585 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12586 	}
12587 	/* Stopping FW LLDP engine is supported on XL710 and X722
12588 	 * starting from FW versions determined in i40e_init_adminq.
12589 	 * Stopping the FW LLDP engine is not supported on XL710
12590 	 * if NPAR is functioning so unset this hw flag in this case.
12591 	 */
12592 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
12593 	    pf->hw.func_caps.npar_enable &&
12594 	    (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12595 		pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12596 
12597 #ifdef CONFIG_PCI_IOV
12598 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12599 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12600 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12601 		pf->num_req_vfs = min_t(int,
12602 					pf->hw.func_caps.num_vfs,
12603 					I40E_MAX_VF_COUNT);
12604 	}
12605 #endif /* CONFIG_PCI_IOV */
12606 	pf->eeprom_version = 0xDEAD;
12607 	pf->lan_veb = I40E_NO_VEB;
12608 	pf->lan_vsi = I40E_NO_VSI;
12609 
12610 	/* By default FW has this off for performance reasons */
12611 	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12612 
12613 	/* set up queue assignment tracking */
12614 	size = sizeof(struct i40e_lump_tracking)
12615 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12616 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
12617 	if (!pf->qp_pile) {
12618 		err = -ENOMEM;
12619 		goto sw_init_done;
12620 	}
12621 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12622 
12623 	pf->tx_timeout_recovery_level = 1;
12624 
12625 	if (pf->hw.mac.type != I40E_MAC_X722 &&
12626 	    i40e_is_total_port_shutdown_enabled(pf)) {
12627 		/* Link down on close must be on when total port shutdown
12628 		 * is enabled for a given port
12629 		 */
12630 		pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12631 			      I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12632 		dev_info(&pf->pdev->dev,
12633 			 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12634 	}
12635 	mutex_init(&pf->switch_mutex);
12636 
12637 sw_init_done:
12638 	return err;
12639 }
12640 
12641 /**
12642  * i40e_set_ntuple - set the ntuple feature flag and take action
12643  * @pf: board private structure to initialize
12644  * @features: the feature set that the stack is suggesting
12645  *
12646  * returns a bool to indicate if reset needs to happen
12647  **/
12648 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12649 {
12650 	bool need_reset = false;
12651 
12652 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
12653 	 * the state changed, we need to reset.
12654 	 */
12655 	if (features & NETIF_F_NTUPLE) {
12656 		/* Enable filters and mark for reset */
12657 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12658 			need_reset = true;
12659 		/* enable FD_SB only if there is MSI-X vector and no cloud
12660 		 * filters exist
12661 		 */
12662 		if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12663 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12664 			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12665 		}
12666 	} else {
12667 		/* turn off filters, mark for reset and clear SW filter list */
12668 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12669 			need_reset = true;
12670 			i40e_fdir_filter_exit(pf);
12671 		}
12672 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12673 		clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12674 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12675 
12676 		/* reset fd counters */
12677 		pf->fd_add_err = 0;
12678 		pf->fd_atr_cnt = 0;
12679 		/* if ATR was auto disabled it can be re-enabled. */
12680 		if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12681 			if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12682 			    (I40E_DEBUG_FD & pf->hw.debug_mask))
12683 				dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12684 	}
12685 	return need_reset;
12686 }
12687 
12688 /**
12689  * i40e_clear_rss_lut - clear the rx hash lookup table
12690  * @vsi: the VSI being configured
12691  **/
12692 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12693 {
12694 	struct i40e_pf *pf = vsi->back;
12695 	struct i40e_hw *hw = &pf->hw;
12696 	u16 vf_id = vsi->vf_id;
12697 	u8 i;
12698 
12699 	if (vsi->type == I40E_VSI_MAIN) {
12700 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12701 			wr32(hw, I40E_PFQF_HLUT(i), 0);
12702 	} else if (vsi->type == I40E_VSI_SRIOV) {
12703 		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12704 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12705 	} else {
12706 		dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12707 	}
12708 }
12709 
12710 /**
12711  * i40e_set_features - set the netdev feature flags
12712  * @netdev: ptr to the netdev being adjusted
12713  * @features: the feature set that the stack is suggesting
12714  * Note: expects to be called while under rtnl_lock()
12715  **/
12716 static int i40e_set_features(struct net_device *netdev,
12717 			     netdev_features_t features)
12718 {
12719 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12720 	struct i40e_vsi *vsi = np->vsi;
12721 	struct i40e_pf *pf = vsi->back;
12722 	bool need_reset;
12723 
12724 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12725 		i40e_pf_config_rss(pf);
12726 	else if (!(features & NETIF_F_RXHASH) &&
12727 		 netdev->features & NETIF_F_RXHASH)
12728 		i40e_clear_rss_lut(vsi);
12729 
12730 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
12731 		i40e_vlan_stripping_enable(vsi);
12732 	else
12733 		i40e_vlan_stripping_disable(vsi);
12734 
12735 	if (!(features & NETIF_F_HW_TC) &&
12736 	    (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12737 		dev_err(&pf->pdev->dev,
12738 			"Offloaded tc filters active, can't turn hw_tc_offload off");
12739 		return -EINVAL;
12740 	}
12741 
12742 	if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12743 		i40e_del_all_macvlans(vsi);
12744 
12745 	need_reset = i40e_set_ntuple(pf, features);
12746 
12747 	if (need_reset)
12748 		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12749 
12750 	return 0;
12751 }
12752 
12753 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12754 				    unsigned int table, unsigned int idx,
12755 				    struct udp_tunnel_info *ti)
12756 {
12757 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12758 	struct i40e_hw *hw = &np->vsi->back->hw;
12759 	u8 type, filter_index;
12760 	i40e_status ret;
12761 
12762 	type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12763 						   I40E_AQC_TUNNEL_TYPE_NGE;
12764 
12765 	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12766 				     NULL);
12767 	if (ret) {
12768 		netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12769 			    i40e_stat_str(hw, ret),
12770 			    i40e_aq_str(hw, hw->aq.asq_last_status));
12771 		return -EIO;
12772 	}
12773 
12774 	udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12775 	return 0;
12776 }
12777 
12778 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12779 				      unsigned int table, unsigned int idx,
12780 				      struct udp_tunnel_info *ti)
12781 {
12782 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12783 	struct i40e_hw *hw = &np->vsi->back->hw;
12784 	i40e_status ret;
12785 
12786 	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12787 	if (ret) {
12788 		netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12789 			    i40e_stat_str(hw, ret),
12790 			    i40e_aq_str(hw, hw->aq.asq_last_status));
12791 		return -EIO;
12792 	}
12793 
12794 	return 0;
12795 }
12796 
12797 static int i40e_get_phys_port_id(struct net_device *netdev,
12798 				 struct netdev_phys_item_id *ppid)
12799 {
12800 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12801 	struct i40e_pf *pf = np->vsi->back;
12802 	struct i40e_hw *hw = &pf->hw;
12803 
12804 	if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12805 		return -EOPNOTSUPP;
12806 
12807 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12808 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12809 
12810 	return 0;
12811 }
12812 
12813 /**
12814  * i40e_ndo_fdb_add - add an entry to the hardware database
12815  * @ndm: the input from the stack
12816  * @tb: pointer to array of nladdr (unused)
12817  * @dev: the net device pointer
12818  * @addr: the MAC address entry being added
12819  * @vid: VLAN ID
12820  * @flags: instructions from stack about fdb operation
12821  * @extack: netlink extended ack, unused currently
12822  */
12823 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12824 			    struct net_device *dev,
12825 			    const unsigned char *addr, u16 vid,
12826 			    u16 flags,
12827 			    struct netlink_ext_ack *extack)
12828 {
12829 	struct i40e_netdev_priv *np = netdev_priv(dev);
12830 	struct i40e_pf *pf = np->vsi->back;
12831 	int err = 0;
12832 
12833 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12834 		return -EOPNOTSUPP;
12835 
12836 	if (vid) {
12837 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12838 		return -EINVAL;
12839 	}
12840 
12841 	/* Hardware does not support aging addresses so if a
12842 	 * ndm_state is given only allow permanent addresses
12843 	 */
12844 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12845 		netdev_info(dev, "FDB only supports static addresses\n");
12846 		return -EINVAL;
12847 	}
12848 
12849 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12850 		err = dev_uc_add_excl(dev, addr);
12851 	else if (is_multicast_ether_addr(addr))
12852 		err = dev_mc_add_excl(dev, addr);
12853 	else
12854 		err = -EINVAL;
12855 
12856 	/* Only return duplicate errors if NLM_F_EXCL is set */
12857 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
12858 		err = 0;
12859 
12860 	return err;
12861 }
12862 
12863 /**
12864  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12865  * @dev: the netdev being configured
12866  * @nlh: RTNL message
12867  * @flags: bridge flags
12868  * @extack: netlink extended ack
12869  *
12870  * Inserts a new hardware bridge if not already created and
12871  * enables the bridging mode requested (VEB or VEPA). If the
12872  * hardware bridge has already been inserted and the request
12873  * is to change the mode then that requires a PF reset to
12874  * allow rebuild of the components with required hardware
12875  * bridge mode enabled.
12876  *
12877  * Note: expects to be called while under rtnl_lock()
12878  **/
12879 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12880 				   struct nlmsghdr *nlh,
12881 				   u16 flags,
12882 				   struct netlink_ext_ack *extack)
12883 {
12884 	struct i40e_netdev_priv *np = netdev_priv(dev);
12885 	struct i40e_vsi *vsi = np->vsi;
12886 	struct i40e_pf *pf = vsi->back;
12887 	struct i40e_veb *veb = NULL;
12888 	struct nlattr *attr, *br_spec;
12889 	int i, rem;
12890 
12891 	/* Only for PF VSI for now */
12892 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12893 		return -EOPNOTSUPP;
12894 
12895 	/* Find the HW bridge for PF VSI */
12896 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12897 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12898 			veb = pf->veb[i];
12899 	}
12900 
12901 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12902 
12903 	nla_for_each_nested(attr, br_spec, rem) {
12904 		__u16 mode;
12905 
12906 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
12907 			continue;
12908 
12909 		mode = nla_get_u16(attr);
12910 		if ((mode != BRIDGE_MODE_VEPA) &&
12911 		    (mode != BRIDGE_MODE_VEB))
12912 			return -EINVAL;
12913 
12914 		/* Insert a new HW bridge */
12915 		if (!veb) {
12916 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12917 					     vsi->tc_config.enabled_tc);
12918 			if (veb) {
12919 				veb->bridge_mode = mode;
12920 				i40e_config_bridge_mode(veb);
12921 			} else {
12922 				/* No Bridge HW offload available */
12923 				return -ENOENT;
12924 			}
12925 			break;
12926 		} else if (mode != veb->bridge_mode) {
12927 			/* Existing HW bridge but different mode needs reset */
12928 			veb->bridge_mode = mode;
12929 			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12930 			if (mode == BRIDGE_MODE_VEB)
12931 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12932 			else
12933 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12934 			i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12935 			break;
12936 		}
12937 	}
12938 
12939 	return 0;
12940 }
12941 
12942 /**
12943  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12944  * @skb: skb buff
12945  * @pid: process id
12946  * @seq: RTNL message seq #
12947  * @dev: the netdev being configured
12948  * @filter_mask: unused
12949  * @nlflags: netlink flags passed in
12950  *
12951  * Return the mode in which the hardware bridge is operating in
12952  * i.e VEB or VEPA.
12953  **/
12954 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12955 				   struct net_device *dev,
12956 				   u32 __always_unused filter_mask,
12957 				   int nlflags)
12958 {
12959 	struct i40e_netdev_priv *np = netdev_priv(dev);
12960 	struct i40e_vsi *vsi = np->vsi;
12961 	struct i40e_pf *pf = vsi->back;
12962 	struct i40e_veb *veb = NULL;
12963 	int i;
12964 
12965 	/* Only for PF VSI for now */
12966 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12967 		return -EOPNOTSUPP;
12968 
12969 	/* Find the HW bridge for the PF VSI */
12970 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12971 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12972 			veb = pf->veb[i];
12973 	}
12974 
12975 	if (!veb)
12976 		return 0;
12977 
12978 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12979 				       0, 0, nlflags, filter_mask, NULL);
12980 }
12981 
12982 /**
12983  * i40e_features_check - Validate encapsulated packet conforms to limits
12984  * @skb: skb buff
12985  * @dev: This physical port's netdev
12986  * @features: Offload features that the stack believes apply
12987  **/
12988 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12989 					     struct net_device *dev,
12990 					     netdev_features_t features)
12991 {
12992 	size_t len;
12993 
12994 	/* No point in doing any of this if neither checksum nor GSO are
12995 	 * being requested for this frame.  We can rule out both by just
12996 	 * checking for CHECKSUM_PARTIAL
12997 	 */
12998 	if (skb->ip_summed != CHECKSUM_PARTIAL)
12999 		return features;
13000 
13001 	/* We cannot support GSO if the MSS is going to be less than
13002 	 * 64 bytes.  If it is then we need to drop support for GSO.
13003 	 */
13004 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13005 		features &= ~NETIF_F_GSO_MASK;
13006 
13007 	/* MACLEN can support at most 63 words */
13008 	len = skb_network_header(skb) - skb->data;
13009 	if (len & ~(63 * 2))
13010 		goto out_err;
13011 
13012 	/* IPLEN and EIPLEN can support at most 127 dwords */
13013 	len = skb_transport_header(skb) - skb_network_header(skb);
13014 	if (len & ~(127 * 4))
13015 		goto out_err;
13016 
13017 	if (skb->encapsulation) {
13018 		/* L4TUNLEN can support 127 words */
13019 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
13020 		if (len & ~(127 * 2))
13021 			goto out_err;
13022 
13023 		/* IPLEN can support at most 127 dwords */
13024 		len = skb_inner_transport_header(skb) -
13025 		      skb_inner_network_header(skb);
13026 		if (len & ~(127 * 4))
13027 			goto out_err;
13028 	}
13029 
13030 	/* No need to validate L4LEN as TCP is the only protocol with a
13031 	 * a flexible value and we support all possible values supported
13032 	 * by TCP, which is at most 15 dwords
13033 	 */
13034 
13035 	return features;
13036 out_err:
13037 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13038 }
13039 
13040 /**
13041  * i40e_xdp_setup - add/remove an XDP program
13042  * @vsi: VSI to changed
13043  * @prog: XDP program
13044  * @extack: netlink extended ack
13045  **/
13046 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13047 			  struct netlink_ext_ack *extack)
13048 {
13049 	int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13050 	struct i40e_pf *pf = vsi->back;
13051 	struct bpf_prog *old_prog;
13052 	bool need_reset;
13053 	int i;
13054 
13055 	/* Don't allow frames that span over multiple buffers */
13056 	if (frame_size > vsi->rx_buf_len) {
13057 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13058 		return -EINVAL;
13059 	}
13060 
13061 	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
13062 	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13063 
13064 	if (need_reset)
13065 		i40e_prep_for_reset(pf);
13066 
13067 	/* VSI shall be deleted in a moment, just return EINVAL */
13068 	if (test_bit(__I40E_IN_REMOVE, pf->state))
13069 		return -EINVAL;
13070 
13071 	old_prog = xchg(&vsi->xdp_prog, prog);
13072 
13073 	if (need_reset) {
13074 		if (!prog)
13075 			/* Wait until ndo_xsk_wakeup completes. */
13076 			synchronize_rcu();
13077 		i40e_reset_and_rebuild(pf, true, true);
13078 	}
13079 
13080 	for (i = 0; i < vsi->num_queue_pairs; i++)
13081 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13082 
13083 	if (old_prog)
13084 		bpf_prog_put(old_prog);
13085 
13086 	/* Kick start the NAPI context if there is an AF_XDP socket open
13087 	 * on that queue id. This so that receiving will start.
13088 	 */
13089 	if (need_reset && prog)
13090 		for (i = 0; i < vsi->num_queue_pairs; i++)
13091 			if (vsi->xdp_rings[i]->xsk_pool)
13092 				(void)i40e_xsk_wakeup(vsi->netdev, i,
13093 						      XDP_WAKEUP_RX);
13094 
13095 	return 0;
13096 }
13097 
13098 /**
13099  * i40e_enter_busy_conf - Enters busy config state
13100  * @vsi: vsi
13101  *
13102  * Returns 0 on success, <0 for failure.
13103  **/
13104 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13105 {
13106 	struct i40e_pf *pf = vsi->back;
13107 	int timeout = 50;
13108 
13109 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13110 		timeout--;
13111 		if (!timeout)
13112 			return -EBUSY;
13113 		usleep_range(1000, 2000);
13114 	}
13115 
13116 	return 0;
13117 }
13118 
13119 /**
13120  * i40e_exit_busy_conf - Exits busy config state
13121  * @vsi: vsi
13122  **/
13123 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13124 {
13125 	struct i40e_pf *pf = vsi->back;
13126 
13127 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
13128 }
13129 
13130 /**
13131  * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13132  * @vsi: vsi
13133  * @queue_pair: queue pair
13134  **/
13135 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13136 {
13137 	memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13138 	       sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13139 	memset(&vsi->tx_rings[queue_pair]->stats, 0,
13140 	       sizeof(vsi->tx_rings[queue_pair]->stats));
13141 	if (i40e_enabled_xdp_vsi(vsi)) {
13142 		memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13143 		       sizeof(vsi->xdp_rings[queue_pair]->stats));
13144 	}
13145 }
13146 
13147 /**
13148  * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13149  * @vsi: vsi
13150  * @queue_pair: queue pair
13151  **/
13152 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13153 {
13154 	i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13155 	if (i40e_enabled_xdp_vsi(vsi)) {
13156 		/* Make sure that in-progress ndo_xdp_xmit calls are
13157 		 * completed.
13158 		 */
13159 		synchronize_rcu();
13160 		i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13161 	}
13162 	i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13163 }
13164 
13165 /**
13166  * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13167  * @vsi: vsi
13168  * @queue_pair: queue pair
13169  * @enable: true for enable, false for disable
13170  **/
13171 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13172 					bool enable)
13173 {
13174 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13175 	struct i40e_q_vector *q_vector = rxr->q_vector;
13176 
13177 	if (!vsi->netdev)
13178 		return;
13179 
13180 	/* All rings in a qp belong to the same qvector. */
13181 	if (q_vector->rx.ring || q_vector->tx.ring) {
13182 		if (enable)
13183 			napi_enable(&q_vector->napi);
13184 		else
13185 			napi_disable(&q_vector->napi);
13186 	}
13187 }
13188 
13189 /**
13190  * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13191  * @vsi: vsi
13192  * @queue_pair: queue pair
13193  * @enable: true for enable, false for disable
13194  *
13195  * Returns 0 on success, <0 on failure.
13196  **/
13197 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13198 					bool enable)
13199 {
13200 	struct i40e_pf *pf = vsi->back;
13201 	int pf_q, ret = 0;
13202 
13203 	pf_q = vsi->base_queue + queue_pair;
13204 	ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13205 				     false /*is xdp*/, enable);
13206 	if (ret) {
13207 		dev_info(&pf->pdev->dev,
13208 			 "VSI seid %d Tx ring %d %sable timeout\n",
13209 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13210 		return ret;
13211 	}
13212 
13213 	i40e_control_rx_q(pf, pf_q, enable);
13214 	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13215 	if (ret) {
13216 		dev_info(&pf->pdev->dev,
13217 			 "VSI seid %d Rx ring %d %sable timeout\n",
13218 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13219 		return ret;
13220 	}
13221 
13222 	/* Due to HW errata, on Rx disable only, the register can
13223 	 * indicate done before it really is. Needs 50ms to be sure
13224 	 */
13225 	if (!enable)
13226 		mdelay(50);
13227 
13228 	if (!i40e_enabled_xdp_vsi(vsi))
13229 		return ret;
13230 
13231 	ret = i40e_control_wait_tx_q(vsi->seid, pf,
13232 				     pf_q + vsi->alloc_queue_pairs,
13233 				     true /*is xdp*/, enable);
13234 	if (ret) {
13235 		dev_info(&pf->pdev->dev,
13236 			 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13237 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13238 	}
13239 
13240 	return ret;
13241 }
13242 
13243 /**
13244  * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13245  * @vsi: vsi
13246  * @queue_pair: queue_pair
13247  **/
13248 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13249 {
13250 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13251 	struct i40e_pf *pf = vsi->back;
13252 	struct i40e_hw *hw = &pf->hw;
13253 
13254 	/* All rings in a qp belong to the same qvector. */
13255 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13256 		i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13257 	else
13258 		i40e_irq_dynamic_enable_icr0(pf);
13259 
13260 	i40e_flush(hw);
13261 }
13262 
13263 /**
13264  * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13265  * @vsi: vsi
13266  * @queue_pair: queue_pair
13267  **/
13268 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13269 {
13270 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13271 	struct i40e_pf *pf = vsi->back;
13272 	struct i40e_hw *hw = &pf->hw;
13273 
13274 	/* For simplicity, instead of removing the qp interrupt causes
13275 	 * from the interrupt linked list, we simply disable the interrupt, and
13276 	 * leave the list intact.
13277 	 *
13278 	 * All rings in a qp belong to the same qvector.
13279 	 */
13280 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13281 		u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13282 
13283 		wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13284 		i40e_flush(hw);
13285 		synchronize_irq(pf->msix_entries[intpf].vector);
13286 	} else {
13287 		/* Legacy and MSI mode - this stops all interrupt handling */
13288 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13289 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13290 		i40e_flush(hw);
13291 		synchronize_irq(pf->pdev->irq);
13292 	}
13293 }
13294 
13295 /**
13296  * i40e_queue_pair_disable - Disables a queue pair
13297  * @vsi: vsi
13298  * @queue_pair: queue pair
13299  *
13300  * Returns 0 on success, <0 on failure.
13301  **/
13302 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13303 {
13304 	int err;
13305 
13306 	err = i40e_enter_busy_conf(vsi);
13307 	if (err)
13308 		return err;
13309 
13310 	i40e_queue_pair_disable_irq(vsi, queue_pair);
13311 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13312 	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13313 	i40e_queue_pair_clean_rings(vsi, queue_pair);
13314 	i40e_queue_pair_reset_stats(vsi, queue_pair);
13315 
13316 	return err;
13317 }
13318 
13319 /**
13320  * i40e_queue_pair_enable - Enables a queue pair
13321  * @vsi: vsi
13322  * @queue_pair: queue pair
13323  *
13324  * Returns 0 on success, <0 on failure.
13325  **/
13326 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13327 {
13328 	int err;
13329 
13330 	err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13331 	if (err)
13332 		return err;
13333 
13334 	if (i40e_enabled_xdp_vsi(vsi)) {
13335 		err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13336 		if (err)
13337 			return err;
13338 	}
13339 
13340 	err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13341 	if (err)
13342 		return err;
13343 
13344 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13345 	i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13346 	i40e_queue_pair_enable_irq(vsi, queue_pair);
13347 
13348 	i40e_exit_busy_conf(vsi);
13349 
13350 	return err;
13351 }
13352 
13353 /**
13354  * i40e_xdp - implements ndo_bpf for i40e
13355  * @dev: netdevice
13356  * @xdp: XDP command
13357  **/
13358 static int i40e_xdp(struct net_device *dev,
13359 		    struct netdev_bpf *xdp)
13360 {
13361 	struct i40e_netdev_priv *np = netdev_priv(dev);
13362 	struct i40e_vsi *vsi = np->vsi;
13363 
13364 	if (vsi->type != I40E_VSI_MAIN)
13365 		return -EINVAL;
13366 
13367 	switch (xdp->command) {
13368 	case XDP_SETUP_PROG:
13369 		return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13370 	case XDP_SETUP_XSK_POOL:
13371 		return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13372 					   xdp->xsk.queue_id);
13373 	default:
13374 		return -EINVAL;
13375 	}
13376 }
13377 
13378 static const struct net_device_ops i40e_netdev_ops = {
13379 	.ndo_open		= i40e_open,
13380 	.ndo_stop		= i40e_close,
13381 	.ndo_start_xmit		= i40e_lan_xmit_frame,
13382 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
13383 	.ndo_set_rx_mode	= i40e_set_rx_mode,
13384 	.ndo_validate_addr	= eth_validate_addr,
13385 	.ndo_set_mac_address	= i40e_set_mac,
13386 	.ndo_change_mtu		= i40e_change_mtu,
13387 	.ndo_eth_ioctl		= i40e_ioctl,
13388 	.ndo_tx_timeout		= i40e_tx_timeout,
13389 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
13390 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
13391 #ifdef CONFIG_NET_POLL_CONTROLLER
13392 	.ndo_poll_controller	= i40e_netpoll,
13393 #endif
13394 	.ndo_setup_tc		= __i40e_setup_tc,
13395 	.ndo_select_queue	= i40e_lan_select_queue,
13396 	.ndo_set_features	= i40e_set_features,
13397 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
13398 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
13399 	.ndo_get_vf_stats	= i40e_get_vf_stats,
13400 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
13401 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
13402 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
13403 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
13404 	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
13405 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
13406 	.ndo_fdb_add		= i40e_ndo_fdb_add,
13407 	.ndo_features_check	= i40e_features_check,
13408 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
13409 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
13410 	.ndo_bpf		= i40e_xdp,
13411 	.ndo_xdp_xmit		= i40e_xdp_xmit,
13412 	.ndo_xsk_wakeup	        = i40e_xsk_wakeup,
13413 	.ndo_dfwd_add_station	= i40e_fwd_add,
13414 	.ndo_dfwd_del_station	= i40e_fwd_del,
13415 };
13416 
13417 /**
13418  * i40e_config_netdev - Setup the netdev flags
13419  * @vsi: the VSI being configured
13420  *
13421  * Returns 0 on success, negative value on failure
13422  **/
13423 static int i40e_config_netdev(struct i40e_vsi *vsi)
13424 {
13425 	struct i40e_pf *pf = vsi->back;
13426 	struct i40e_hw *hw = &pf->hw;
13427 	struct i40e_netdev_priv *np;
13428 	struct net_device *netdev;
13429 	u8 broadcast[ETH_ALEN];
13430 	u8 mac_addr[ETH_ALEN];
13431 	int etherdev_size;
13432 	netdev_features_t hw_enc_features;
13433 	netdev_features_t hw_features;
13434 
13435 	etherdev_size = sizeof(struct i40e_netdev_priv);
13436 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13437 	if (!netdev)
13438 		return -ENOMEM;
13439 
13440 	vsi->netdev = netdev;
13441 	np = netdev_priv(netdev);
13442 	np->vsi = vsi;
13443 
13444 	hw_enc_features = NETIF_F_SG			|
13445 			  NETIF_F_HW_CSUM		|
13446 			  NETIF_F_HIGHDMA		|
13447 			  NETIF_F_SOFT_FEATURES		|
13448 			  NETIF_F_TSO			|
13449 			  NETIF_F_TSO_ECN		|
13450 			  NETIF_F_TSO6			|
13451 			  NETIF_F_GSO_GRE		|
13452 			  NETIF_F_GSO_GRE_CSUM		|
13453 			  NETIF_F_GSO_PARTIAL		|
13454 			  NETIF_F_GSO_IPXIP4		|
13455 			  NETIF_F_GSO_IPXIP6		|
13456 			  NETIF_F_GSO_UDP_TUNNEL	|
13457 			  NETIF_F_GSO_UDP_TUNNEL_CSUM	|
13458 			  NETIF_F_GSO_UDP_L4		|
13459 			  NETIF_F_SCTP_CRC		|
13460 			  NETIF_F_RXHASH		|
13461 			  NETIF_F_RXCSUM		|
13462 			  0;
13463 
13464 	if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13465 		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13466 
13467 	netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13468 
13469 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13470 
13471 	netdev->hw_enc_features |= hw_enc_features;
13472 
13473 	/* record features VLANs can make use of */
13474 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13475 
13476 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE |		\
13477 				   NETIF_F_GSO_GRE_CSUM |	\
13478 				   NETIF_F_GSO_IPXIP4 |		\
13479 				   NETIF_F_GSO_IPXIP6 |		\
13480 				   NETIF_F_GSO_UDP_TUNNEL |	\
13481 				   NETIF_F_GSO_UDP_TUNNEL_CSUM)
13482 
13483 	netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13484 	netdev->features |= NETIF_F_GSO_PARTIAL |
13485 			    I40E_GSO_PARTIAL_FEATURES;
13486 
13487 	netdev->mpls_features |= NETIF_F_SG;
13488 	netdev->mpls_features |= NETIF_F_HW_CSUM;
13489 	netdev->mpls_features |= NETIF_F_TSO;
13490 	netdev->mpls_features |= NETIF_F_TSO6;
13491 	netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13492 
13493 	/* enable macvlan offloads */
13494 	netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13495 
13496 	hw_features = hw_enc_features		|
13497 		      NETIF_F_HW_VLAN_CTAG_TX	|
13498 		      NETIF_F_HW_VLAN_CTAG_RX;
13499 
13500 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13501 		hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13502 
13503 	netdev->hw_features |= hw_features;
13504 
13505 	netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13506 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13507 
13508 	netdev->features &= ~NETIF_F_HW_TC;
13509 
13510 	if (vsi->type == I40E_VSI_MAIN) {
13511 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13512 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
13513 		/* The following steps are necessary for two reasons. First,
13514 		 * some older NVM configurations load a default MAC-VLAN
13515 		 * filter that will accept any tagged packet, and we want to
13516 		 * replace this with a normal filter. Additionally, it is
13517 		 * possible our MAC address was provided by the platform using
13518 		 * Open Firmware or similar.
13519 		 *
13520 		 * Thus, we need to remove the default filter and install one
13521 		 * specific to the MAC address.
13522 		 */
13523 		i40e_rm_default_mac_filter(vsi, mac_addr);
13524 		spin_lock_bh(&vsi->mac_filter_hash_lock);
13525 		i40e_add_mac_filter(vsi, mac_addr);
13526 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
13527 	} else {
13528 		/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13529 		 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13530 		 * the end, which is 4 bytes long, so force truncation of the
13531 		 * original name by IFNAMSIZ - 4
13532 		 */
13533 		snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13534 			 IFNAMSIZ - 4,
13535 			 pf->vsi[pf->lan_vsi]->netdev->name);
13536 		eth_random_addr(mac_addr);
13537 
13538 		spin_lock_bh(&vsi->mac_filter_hash_lock);
13539 		i40e_add_mac_filter(vsi, mac_addr);
13540 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
13541 	}
13542 
13543 	/* Add the broadcast filter so that we initially will receive
13544 	 * broadcast packets. Note that when a new VLAN is first added the
13545 	 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13546 	 * specific filters as part of transitioning into "vlan" operation.
13547 	 * When more VLANs are added, the driver will copy each existing MAC
13548 	 * filter and add it for the new VLAN.
13549 	 *
13550 	 * Broadcast filters are handled specially by
13551 	 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13552 	 * promiscuous bit instead of adding this directly as a MAC/VLAN
13553 	 * filter. The subtask will update the correct broadcast promiscuous
13554 	 * bits as VLANs become active or inactive.
13555 	 */
13556 	eth_broadcast_addr(broadcast);
13557 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13558 	i40e_add_mac_filter(vsi, broadcast);
13559 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13560 
13561 	eth_hw_addr_set(netdev, mac_addr);
13562 	ether_addr_copy(netdev->perm_addr, mac_addr);
13563 
13564 	/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13565 	netdev->neigh_priv_len = sizeof(u32) * 4;
13566 
13567 	netdev->priv_flags |= IFF_UNICAST_FLT;
13568 	netdev->priv_flags |= IFF_SUPP_NOFCS;
13569 	/* Setup netdev TC information */
13570 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13571 
13572 	netdev->netdev_ops = &i40e_netdev_ops;
13573 	netdev->watchdog_timeo = 5 * HZ;
13574 	i40e_set_ethtool_ops(netdev);
13575 
13576 	/* MTU range: 68 - 9706 */
13577 	netdev->min_mtu = ETH_MIN_MTU;
13578 	netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13579 
13580 	return 0;
13581 }
13582 
13583 /**
13584  * i40e_vsi_delete - Delete a VSI from the switch
13585  * @vsi: the VSI being removed
13586  *
13587  * Returns 0 on success, negative value on failure
13588  **/
13589 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13590 {
13591 	/* remove default VSI is not allowed */
13592 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13593 		return;
13594 
13595 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13596 }
13597 
13598 /**
13599  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13600  * @vsi: the VSI being queried
13601  *
13602  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13603  **/
13604 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13605 {
13606 	struct i40e_veb *veb;
13607 	struct i40e_pf *pf = vsi->back;
13608 
13609 	/* Uplink is not a bridge so default to VEB */
13610 	if (vsi->veb_idx >= I40E_MAX_VEB)
13611 		return 1;
13612 
13613 	veb = pf->veb[vsi->veb_idx];
13614 	if (!veb) {
13615 		dev_info(&pf->pdev->dev,
13616 			 "There is no veb associated with the bridge\n");
13617 		return -ENOENT;
13618 	}
13619 
13620 	/* Uplink is a bridge in VEPA mode */
13621 	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13622 		return 0;
13623 	} else {
13624 		/* Uplink is a bridge in VEB mode */
13625 		return 1;
13626 	}
13627 
13628 	/* VEPA is now default bridge, so return 0 */
13629 	return 0;
13630 }
13631 
13632 /**
13633  * i40e_add_vsi - Add a VSI to the switch
13634  * @vsi: the VSI being configured
13635  *
13636  * This initializes a VSI context depending on the VSI type to be added and
13637  * passes it down to the add_vsi aq command.
13638  **/
13639 static int i40e_add_vsi(struct i40e_vsi *vsi)
13640 {
13641 	int ret = -ENODEV;
13642 	struct i40e_pf *pf = vsi->back;
13643 	struct i40e_hw *hw = &pf->hw;
13644 	struct i40e_vsi_context ctxt;
13645 	struct i40e_mac_filter *f;
13646 	struct hlist_node *h;
13647 	int bkt;
13648 
13649 	u8 enabled_tc = 0x1; /* TC0 enabled */
13650 	int f_count = 0;
13651 
13652 	memset(&ctxt, 0, sizeof(ctxt));
13653 	switch (vsi->type) {
13654 	case I40E_VSI_MAIN:
13655 		/* The PF's main VSI is already setup as part of the
13656 		 * device initialization, so we'll not bother with
13657 		 * the add_vsi call, but we will retrieve the current
13658 		 * VSI context.
13659 		 */
13660 		ctxt.seid = pf->main_vsi_seid;
13661 		ctxt.pf_num = pf->hw.pf_id;
13662 		ctxt.vf_num = 0;
13663 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13664 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13665 		if (ret) {
13666 			dev_info(&pf->pdev->dev,
13667 				 "couldn't get PF vsi config, err %s aq_err %s\n",
13668 				 i40e_stat_str(&pf->hw, ret),
13669 				 i40e_aq_str(&pf->hw,
13670 					     pf->hw.aq.asq_last_status));
13671 			return -ENOENT;
13672 		}
13673 		vsi->info = ctxt.info;
13674 		vsi->info.valid_sections = 0;
13675 
13676 		vsi->seid = ctxt.seid;
13677 		vsi->id = ctxt.vsi_number;
13678 
13679 		enabled_tc = i40e_pf_get_tc_map(pf);
13680 
13681 		/* Source pruning is enabled by default, so the flag is
13682 		 * negative logic - if it's set, we need to fiddle with
13683 		 * the VSI to disable source pruning.
13684 		 */
13685 		if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13686 			memset(&ctxt, 0, sizeof(ctxt));
13687 			ctxt.seid = pf->main_vsi_seid;
13688 			ctxt.pf_num = pf->hw.pf_id;
13689 			ctxt.vf_num = 0;
13690 			ctxt.info.valid_sections |=
13691 				     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13692 			ctxt.info.switch_id =
13693 				   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13694 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13695 			if (ret) {
13696 				dev_info(&pf->pdev->dev,
13697 					 "update vsi failed, err %s aq_err %s\n",
13698 					 i40e_stat_str(&pf->hw, ret),
13699 					 i40e_aq_str(&pf->hw,
13700 						     pf->hw.aq.asq_last_status));
13701 				ret = -ENOENT;
13702 				goto err;
13703 			}
13704 		}
13705 
13706 		/* MFP mode setup queue map and update VSI */
13707 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13708 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13709 			memset(&ctxt, 0, sizeof(ctxt));
13710 			ctxt.seid = pf->main_vsi_seid;
13711 			ctxt.pf_num = pf->hw.pf_id;
13712 			ctxt.vf_num = 0;
13713 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13714 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13715 			if (ret) {
13716 				dev_info(&pf->pdev->dev,
13717 					 "update vsi failed, err %s aq_err %s\n",
13718 					 i40e_stat_str(&pf->hw, ret),
13719 					 i40e_aq_str(&pf->hw,
13720 						    pf->hw.aq.asq_last_status));
13721 				ret = -ENOENT;
13722 				goto err;
13723 			}
13724 			/* update the local VSI info queue map */
13725 			i40e_vsi_update_queue_map(vsi, &ctxt);
13726 			vsi->info.valid_sections = 0;
13727 		} else {
13728 			/* Default/Main VSI is only enabled for TC0
13729 			 * reconfigure it to enable all TCs that are
13730 			 * available on the port in SFP mode.
13731 			 * For MFP case the iSCSI PF would use this
13732 			 * flow to enable LAN+iSCSI TC.
13733 			 */
13734 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
13735 			if (ret) {
13736 				/* Single TC condition is not fatal,
13737 				 * message and continue
13738 				 */
13739 				dev_info(&pf->pdev->dev,
13740 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13741 					 enabled_tc,
13742 					 i40e_stat_str(&pf->hw, ret),
13743 					 i40e_aq_str(&pf->hw,
13744 						    pf->hw.aq.asq_last_status));
13745 			}
13746 		}
13747 		break;
13748 
13749 	case I40E_VSI_FDIR:
13750 		ctxt.pf_num = hw->pf_id;
13751 		ctxt.vf_num = 0;
13752 		ctxt.uplink_seid = vsi->uplink_seid;
13753 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13754 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13755 		if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13756 		    (i40e_is_vsi_uplink_mode_veb(vsi))) {
13757 			ctxt.info.valid_sections |=
13758 			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13759 			ctxt.info.switch_id =
13760 			   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13761 		}
13762 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13763 		break;
13764 
13765 	case I40E_VSI_VMDQ2:
13766 		ctxt.pf_num = hw->pf_id;
13767 		ctxt.vf_num = 0;
13768 		ctxt.uplink_seid = vsi->uplink_seid;
13769 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13770 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13771 
13772 		/* This VSI is connected to VEB so the switch_id
13773 		 * should be set to zero by default.
13774 		 */
13775 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13776 			ctxt.info.valid_sections |=
13777 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13778 			ctxt.info.switch_id =
13779 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13780 		}
13781 
13782 		/* Setup the VSI tx/rx queue map for TC0 only for now */
13783 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13784 		break;
13785 
13786 	case I40E_VSI_SRIOV:
13787 		ctxt.pf_num = hw->pf_id;
13788 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13789 		ctxt.uplink_seid = vsi->uplink_seid;
13790 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13791 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13792 
13793 		/* This VSI is connected to VEB so the switch_id
13794 		 * should be set to zero by default.
13795 		 */
13796 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13797 			ctxt.info.valid_sections |=
13798 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13799 			ctxt.info.switch_id =
13800 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13801 		}
13802 
13803 		if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13804 			ctxt.info.valid_sections |=
13805 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13806 			ctxt.info.queueing_opt_flags |=
13807 				(I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13808 				 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13809 		}
13810 
13811 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13812 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13813 		if (pf->vf[vsi->vf_id].spoofchk) {
13814 			ctxt.info.valid_sections |=
13815 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13816 			ctxt.info.sec_flags |=
13817 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13818 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13819 		}
13820 		/* Setup the VSI tx/rx queue map for TC0 only for now */
13821 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13822 		break;
13823 
13824 	case I40E_VSI_IWARP:
13825 		/* send down message to iWARP */
13826 		break;
13827 
13828 	default:
13829 		return -ENODEV;
13830 	}
13831 
13832 	if (vsi->type != I40E_VSI_MAIN) {
13833 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13834 		if (ret) {
13835 			dev_info(&vsi->back->pdev->dev,
13836 				 "add vsi failed, err %s aq_err %s\n",
13837 				 i40e_stat_str(&pf->hw, ret),
13838 				 i40e_aq_str(&pf->hw,
13839 					     pf->hw.aq.asq_last_status));
13840 			ret = -ENOENT;
13841 			goto err;
13842 		}
13843 		vsi->info = ctxt.info;
13844 		vsi->info.valid_sections = 0;
13845 		vsi->seid = ctxt.seid;
13846 		vsi->id = ctxt.vsi_number;
13847 	}
13848 
13849 	vsi->active_filters = 0;
13850 	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13851 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13852 	/* If macvlan filters already exist, force them to get loaded */
13853 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13854 		f->state = I40E_FILTER_NEW;
13855 		f_count++;
13856 	}
13857 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13858 
13859 	if (f_count) {
13860 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13861 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13862 	}
13863 
13864 	/* Update VSI BW information */
13865 	ret = i40e_vsi_get_bw_info(vsi);
13866 	if (ret) {
13867 		dev_info(&pf->pdev->dev,
13868 			 "couldn't get vsi bw info, err %s aq_err %s\n",
13869 			 i40e_stat_str(&pf->hw, ret),
13870 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13871 		/* VSI is already added so not tearing that up */
13872 		ret = 0;
13873 	}
13874 
13875 err:
13876 	return ret;
13877 }
13878 
13879 /**
13880  * i40e_vsi_release - Delete a VSI and free its resources
13881  * @vsi: the VSI being removed
13882  *
13883  * Returns 0 on success or < 0 on error
13884  **/
13885 int i40e_vsi_release(struct i40e_vsi *vsi)
13886 {
13887 	struct i40e_mac_filter *f;
13888 	struct hlist_node *h;
13889 	struct i40e_veb *veb = NULL;
13890 	struct i40e_pf *pf;
13891 	u16 uplink_seid;
13892 	int i, n, bkt;
13893 
13894 	pf = vsi->back;
13895 
13896 	/* release of a VEB-owner or last VSI is not allowed */
13897 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13898 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13899 			 vsi->seid, vsi->uplink_seid);
13900 		return -ENODEV;
13901 	}
13902 	if (vsi == pf->vsi[pf->lan_vsi] &&
13903 	    !test_bit(__I40E_DOWN, pf->state)) {
13904 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13905 		return -ENODEV;
13906 	}
13907 	set_bit(__I40E_VSI_RELEASING, vsi->state);
13908 	uplink_seid = vsi->uplink_seid;
13909 	if (vsi->type != I40E_VSI_SRIOV) {
13910 		if (vsi->netdev_registered) {
13911 			vsi->netdev_registered = false;
13912 			if (vsi->netdev) {
13913 				/* results in a call to i40e_close() */
13914 				unregister_netdev(vsi->netdev);
13915 			}
13916 		} else {
13917 			i40e_vsi_close(vsi);
13918 		}
13919 		i40e_vsi_disable_irq(vsi);
13920 	}
13921 
13922 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13923 
13924 	/* clear the sync flag on all filters */
13925 	if (vsi->netdev) {
13926 		__dev_uc_unsync(vsi->netdev, NULL);
13927 		__dev_mc_unsync(vsi->netdev, NULL);
13928 	}
13929 
13930 	/* make sure any remaining filters are marked for deletion */
13931 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13932 		__i40e_del_filter(vsi, f);
13933 
13934 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13935 
13936 	i40e_sync_vsi_filters(vsi);
13937 
13938 	i40e_vsi_delete(vsi);
13939 	i40e_vsi_free_q_vectors(vsi);
13940 	if (vsi->netdev) {
13941 		free_netdev(vsi->netdev);
13942 		vsi->netdev = NULL;
13943 	}
13944 	i40e_vsi_clear_rings(vsi);
13945 	i40e_vsi_clear(vsi);
13946 
13947 	/* If this was the last thing on the VEB, except for the
13948 	 * controlling VSI, remove the VEB, which puts the controlling
13949 	 * VSI onto the next level down in the switch.
13950 	 *
13951 	 * Well, okay, there's one more exception here: don't remove
13952 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
13953 	 * from up the network stack.
13954 	 */
13955 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13956 		if (pf->vsi[i] &&
13957 		    pf->vsi[i]->uplink_seid == uplink_seid &&
13958 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13959 			n++;      /* count the VSIs */
13960 		}
13961 	}
13962 	for (i = 0; i < I40E_MAX_VEB; i++) {
13963 		if (!pf->veb[i])
13964 			continue;
13965 		if (pf->veb[i]->uplink_seid == uplink_seid)
13966 			n++;     /* count the VEBs */
13967 		if (pf->veb[i]->seid == uplink_seid)
13968 			veb = pf->veb[i];
13969 	}
13970 	if (n == 0 && veb && veb->uplink_seid != 0)
13971 		i40e_veb_release(veb);
13972 
13973 	return 0;
13974 }
13975 
13976 /**
13977  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13978  * @vsi: ptr to the VSI
13979  *
13980  * This should only be called after i40e_vsi_mem_alloc() which allocates the
13981  * corresponding SW VSI structure and initializes num_queue_pairs for the
13982  * newly allocated VSI.
13983  *
13984  * Returns 0 on success or negative on failure
13985  **/
13986 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13987 {
13988 	int ret = -ENOENT;
13989 	struct i40e_pf *pf = vsi->back;
13990 
13991 	if (vsi->q_vectors[0]) {
13992 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13993 			 vsi->seid);
13994 		return -EEXIST;
13995 	}
13996 
13997 	if (vsi->base_vector) {
13998 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13999 			 vsi->seid, vsi->base_vector);
14000 		return -EEXIST;
14001 	}
14002 
14003 	ret = i40e_vsi_alloc_q_vectors(vsi);
14004 	if (ret) {
14005 		dev_info(&pf->pdev->dev,
14006 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14007 			 vsi->num_q_vectors, vsi->seid, ret);
14008 		vsi->num_q_vectors = 0;
14009 		goto vector_setup_out;
14010 	}
14011 
14012 	/* In Legacy mode, we do not have to get any other vector since we
14013 	 * piggyback on the misc/ICR0 for queue interrupts.
14014 	*/
14015 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14016 		return ret;
14017 	if (vsi->num_q_vectors)
14018 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14019 						 vsi->num_q_vectors, vsi->idx);
14020 	if (vsi->base_vector < 0) {
14021 		dev_info(&pf->pdev->dev,
14022 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14023 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14024 		i40e_vsi_free_q_vectors(vsi);
14025 		ret = -ENOENT;
14026 		goto vector_setup_out;
14027 	}
14028 
14029 vector_setup_out:
14030 	return ret;
14031 }
14032 
14033 /**
14034  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14035  * @vsi: pointer to the vsi.
14036  *
14037  * This re-allocates a vsi's queue resources.
14038  *
14039  * Returns pointer to the successfully allocated and configured VSI sw struct
14040  * on success, otherwise returns NULL on failure.
14041  **/
14042 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14043 {
14044 	u16 alloc_queue_pairs;
14045 	struct i40e_pf *pf;
14046 	u8 enabled_tc;
14047 	int ret;
14048 
14049 	if (!vsi)
14050 		return NULL;
14051 
14052 	pf = vsi->back;
14053 
14054 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14055 	i40e_vsi_clear_rings(vsi);
14056 
14057 	i40e_vsi_free_arrays(vsi, false);
14058 	i40e_set_num_rings_in_vsi(vsi);
14059 	ret = i40e_vsi_alloc_arrays(vsi, false);
14060 	if (ret)
14061 		goto err_vsi;
14062 
14063 	alloc_queue_pairs = vsi->alloc_queue_pairs *
14064 			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14065 
14066 	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14067 	if (ret < 0) {
14068 		dev_info(&pf->pdev->dev,
14069 			 "failed to get tracking for %d queues for VSI %d err %d\n",
14070 			 alloc_queue_pairs, vsi->seid, ret);
14071 		goto err_vsi;
14072 	}
14073 	vsi->base_queue = ret;
14074 
14075 	/* Update the FW view of the VSI. Force a reset of TC and queue
14076 	 * layout configurations.
14077 	 */
14078 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14079 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14080 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14081 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14082 	if (vsi->type == I40E_VSI_MAIN)
14083 		i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14084 
14085 	/* assign it some queues */
14086 	ret = i40e_alloc_rings(vsi);
14087 	if (ret)
14088 		goto err_rings;
14089 
14090 	/* map all of the rings to the q_vectors */
14091 	i40e_vsi_map_rings_to_vectors(vsi);
14092 	return vsi;
14093 
14094 err_rings:
14095 	i40e_vsi_free_q_vectors(vsi);
14096 	if (vsi->netdev_registered) {
14097 		vsi->netdev_registered = false;
14098 		unregister_netdev(vsi->netdev);
14099 		free_netdev(vsi->netdev);
14100 		vsi->netdev = NULL;
14101 	}
14102 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14103 err_vsi:
14104 	i40e_vsi_clear(vsi);
14105 	return NULL;
14106 }
14107 
14108 /**
14109  * i40e_vsi_setup - Set up a VSI by a given type
14110  * @pf: board private structure
14111  * @type: VSI type
14112  * @uplink_seid: the switch element to link to
14113  * @param1: usage depends upon VSI type. For VF types, indicates VF id
14114  *
14115  * This allocates the sw VSI structure and its queue resources, then add a VSI
14116  * to the identified VEB.
14117  *
14118  * Returns pointer to the successfully allocated and configure VSI sw struct on
14119  * success, otherwise returns NULL on failure.
14120  **/
14121 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14122 				u16 uplink_seid, u32 param1)
14123 {
14124 	struct i40e_vsi *vsi = NULL;
14125 	struct i40e_veb *veb = NULL;
14126 	u16 alloc_queue_pairs;
14127 	int ret, i;
14128 	int v_idx;
14129 
14130 	/* The requested uplink_seid must be either
14131 	 *     - the PF's port seid
14132 	 *              no VEB is needed because this is the PF
14133 	 *              or this is a Flow Director special case VSI
14134 	 *     - seid of an existing VEB
14135 	 *     - seid of a VSI that owns an existing VEB
14136 	 *     - seid of a VSI that doesn't own a VEB
14137 	 *              a new VEB is created and the VSI becomes the owner
14138 	 *     - seid of the PF VSI, which is what creates the first VEB
14139 	 *              this is a special case of the previous
14140 	 *
14141 	 * Find which uplink_seid we were given and create a new VEB if needed
14142 	 */
14143 	for (i = 0; i < I40E_MAX_VEB; i++) {
14144 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14145 			veb = pf->veb[i];
14146 			break;
14147 		}
14148 	}
14149 
14150 	if (!veb && uplink_seid != pf->mac_seid) {
14151 
14152 		for (i = 0; i < pf->num_alloc_vsi; i++) {
14153 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14154 				vsi = pf->vsi[i];
14155 				break;
14156 			}
14157 		}
14158 		if (!vsi) {
14159 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14160 				 uplink_seid);
14161 			return NULL;
14162 		}
14163 
14164 		if (vsi->uplink_seid == pf->mac_seid)
14165 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14166 					     vsi->tc_config.enabled_tc);
14167 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14168 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14169 					     vsi->tc_config.enabled_tc);
14170 		if (veb) {
14171 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14172 				dev_info(&vsi->back->pdev->dev,
14173 					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14174 				return NULL;
14175 			}
14176 			/* We come up by default in VEPA mode if SRIOV is not
14177 			 * already enabled, in which case we can't force VEPA
14178 			 * mode.
14179 			 */
14180 			if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14181 				veb->bridge_mode = BRIDGE_MODE_VEPA;
14182 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14183 			}
14184 			i40e_config_bridge_mode(veb);
14185 		}
14186 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14187 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14188 				veb = pf->veb[i];
14189 		}
14190 		if (!veb) {
14191 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14192 			return NULL;
14193 		}
14194 
14195 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14196 		uplink_seid = veb->seid;
14197 	}
14198 
14199 	/* get vsi sw struct */
14200 	v_idx = i40e_vsi_mem_alloc(pf, type);
14201 	if (v_idx < 0)
14202 		goto err_alloc;
14203 	vsi = pf->vsi[v_idx];
14204 	if (!vsi)
14205 		goto err_alloc;
14206 	vsi->type = type;
14207 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14208 
14209 	if (type == I40E_VSI_MAIN)
14210 		pf->lan_vsi = v_idx;
14211 	else if (type == I40E_VSI_SRIOV)
14212 		vsi->vf_id = param1;
14213 	/* assign it some queues */
14214 	alloc_queue_pairs = vsi->alloc_queue_pairs *
14215 			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14216 
14217 	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14218 	if (ret < 0) {
14219 		dev_info(&pf->pdev->dev,
14220 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
14221 			 alloc_queue_pairs, vsi->seid, ret);
14222 		goto err_vsi;
14223 	}
14224 	vsi->base_queue = ret;
14225 
14226 	/* get a VSI from the hardware */
14227 	vsi->uplink_seid = uplink_seid;
14228 	ret = i40e_add_vsi(vsi);
14229 	if (ret)
14230 		goto err_vsi;
14231 
14232 	switch (vsi->type) {
14233 	/* setup the netdev if needed */
14234 	case I40E_VSI_MAIN:
14235 	case I40E_VSI_VMDQ2:
14236 		ret = i40e_config_netdev(vsi);
14237 		if (ret)
14238 			goto err_netdev;
14239 		ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14240 		if (ret)
14241 			goto err_netdev;
14242 		ret = register_netdev(vsi->netdev);
14243 		if (ret)
14244 			goto err_netdev;
14245 		vsi->netdev_registered = true;
14246 		netif_carrier_off(vsi->netdev);
14247 #ifdef CONFIG_I40E_DCB
14248 		/* Setup DCB netlink interface */
14249 		i40e_dcbnl_setup(vsi);
14250 #endif /* CONFIG_I40E_DCB */
14251 		fallthrough;
14252 	case I40E_VSI_FDIR:
14253 		/* set up vectors and rings if needed */
14254 		ret = i40e_vsi_setup_vectors(vsi);
14255 		if (ret)
14256 			goto err_msix;
14257 
14258 		ret = i40e_alloc_rings(vsi);
14259 		if (ret)
14260 			goto err_rings;
14261 
14262 		/* map all of the rings to the q_vectors */
14263 		i40e_vsi_map_rings_to_vectors(vsi);
14264 
14265 		i40e_vsi_reset_stats(vsi);
14266 		break;
14267 	default:
14268 		/* no netdev or rings for the other VSI types */
14269 		break;
14270 	}
14271 
14272 	if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14273 	    (vsi->type == I40E_VSI_VMDQ2)) {
14274 		ret = i40e_vsi_config_rss(vsi);
14275 	}
14276 	return vsi;
14277 
14278 err_rings:
14279 	i40e_vsi_free_q_vectors(vsi);
14280 err_msix:
14281 	if (vsi->netdev_registered) {
14282 		vsi->netdev_registered = false;
14283 		unregister_netdev(vsi->netdev);
14284 		free_netdev(vsi->netdev);
14285 		vsi->netdev = NULL;
14286 	}
14287 err_netdev:
14288 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14289 err_vsi:
14290 	i40e_vsi_clear(vsi);
14291 err_alloc:
14292 	return NULL;
14293 }
14294 
14295 /**
14296  * i40e_veb_get_bw_info - Query VEB BW information
14297  * @veb: the veb to query
14298  *
14299  * Query the Tx scheduler BW configuration data for given VEB
14300  **/
14301 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14302 {
14303 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14304 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14305 	struct i40e_pf *pf = veb->pf;
14306 	struct i40e_hw *hw = &pf->hw;
14307 	u32 tc_bw_max;
14308 	int ret = 0;
14309 	int i;
14310 
14311 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14312 						  &bw_data, NULL);
14313 	if (ret) {
14314 		dev_info(&pf->pdev->dev,
14315 			 "query veb bw config failed, err %s aq_err %s\n",
14316 			 i40e_stat_str(&pf->hw, ret),
14317 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14318 		goto out;
14319 	}
14320 
14321 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14322 						   &ets_data, NULL);
14323 	if (ret) {
14324 		dev_info(&pf->pdev->dev,
14325 			 "query veb bw ets config failed, err %s aq_err %s\n",
14326 			 i40e_stat_str(&pf->hw, ret),
14327 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14328 		goto out;
14329 	}
14330 
14331 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14332 	veb->bw_max_quanta = ets_data.tc_bw_max;
14333 	veb->is_abs_credits = bw_data.absolute_credits_enable;
14334 	veb->enabled_tc = ets_data.tc_valid_bits;
14335 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14336 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14337 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14338 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14339 		veb->bw_tc_limit_credits[i] =
14340 					le16_to_cpu(bw_data.tc_bw_limits[i]);
14341 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14342 	}
14343 
14344 out:
14345 	return ret;
14346 }
14347 
14348 /**
14349  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14350  * @pf: board private structure
14351  *
14352  * On error: returns error code (negative)
14353  * On success: returns vsi index in PF (positive)
14354  **/
14355 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14356 {
14357 	int ret = -ENOENT;
14358 	struct i40e_veb *veb;
14359 	int i;
14360 
14361 	/* Need to protect the allocation of switch elements at the PF level */
14362 	mutex_lock(&pf->switch_mutex);
14363 
14364 	/* VEB list may be fragmented if VEB creation/destruction has
14365 	 * been happening.  We can afford to do a quick scan to look
14366 	 * for any free slots in the list.
14367 	 *
14368 	 * find next empty veb slot, looping back around if necessary
14369 	 */
14370 	i = 0;
14371 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14372 		i++;
14373 	if (i >= I40E_MAX_VEB) {
14374 		ret = -ENOMEM;
14375 		goto err_alloc_veb;  /* out of VEB slots! */
14376 	}
14377 
14378 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14379 	if (!veb) {
14380 		ret = -ENOMEM;
14381 		goto err_alloc_veb;
14382 	}
14383 	veb->pf = pf;
14384 	veb->idx = i;
14385 	veb->enabled_tc = 1;
14386 
14387 	pf->veb[i] = veb;
14388 	ret = i;
14389 err_alloc_veb:
14390 	mutex_unlock(&pf->switch_mutex);
14391 	return ret;
14392 }
14393 
14394 /**
14395  * i40e_switch_branch_release - Delete a branch of the switch tree
14396  * @branch: where to start deleting
14397  *
14398  * This uses recursion to find the tips of the branch to be
14399  * removed, deleting until we get back to and can delete this VEB.
14400  **/
14401 static void i40e_switch_branch_release(struct i40e_veb *branch)
14402 {
14403 	struct i40e_pf *pf = branch->pf;
14404 	u16 branch_seid = branch->seid;
14405 	u16 veb_idx = branch->idx;
14406 	int i;
14407 
14408 	/* release any VEBs on this VEB - RECURSION */
14409 	for (i = 0; i < I40E_MAX_VEB; i++) {
14410 		if (!pf->veb[i])
14411 			continue;
14412 		if (pf->veb[i]->uplink_seid == branch->seid)
14413 			i40e_switch_branch_release(pf->veb[i]);
14414 	}
14415 
14416 	/* Release the VSIs on this VEB, but not the owner VSI.
14417 	 *
14418 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14419 	 *       the VEB itself, so don't use (*branch) after this loop.
14420 	 */
14421 	for (i = 0; i < pf->num_alloc_vsi; i++) {
14422 		if (!pf->vsi[i])
14423 			continue;
14424 		if (pf->vsi[i]->uplink_seid == branch_seid &&
14425 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14426 			i40e_vsi_release(pf->vsi[i]);
14427 		}
14428 	}
14429 
14430 	/* There's one corner case where the VEB might not have been
14431 	 * removed, so double check it here and remove it if needed.
14432 	 * This case happens if the veb was created from the debugfs
14433 	 * commands and no VSIs were added to it.
14434 	 */
14435 	if (pf->veb[veb_idx])
14436 		i40e_veb_release(pf->veb[veb_idx]);
14437 }
14438 
14439 /**
14440  * i40e_veb_clear - remove veb struct
14441  * @veb: the veb to remove
14442  **/
14443 static void i40e_veb_clear(struct i40e_veb *veb)
14444 {
14445 	if (!veb)
14446 		return;
14447 
14448 	if (veb->pf) {
14449 		struct i40e_pf *pf = veb->pf;
14450 
14451 		mutex_lock(&pf->switch_mutex);
14452 		if (pf->veb[veb->idx] == veb)
14453 			pf->veb[veb->idx] = NULL;
14454 		mutex_unlock(&pf->switch_mutex);
14455 	}
14456 
14457 	kfree(veb);
14458 }
14459 
14460 /**
14461  * i40e_veb_release - Delete a VEB and free its resources
14462  * @veb: the VEB being removed
14463  **/
14464 void i40e_veb_release(struct i40e_veb *veb)
14465 {
14466 	struct i40e_vsi *vsi = NULL;
14467 	struct i40e_pf *pf;
14468 	int i, n = 0;
14469 
14470 	pf = veb->pf;
14471 
14472 	/* find the remaining VSI and check for extras */
14473 	for (i = 0; i < pf->num_alloc_vsi; i++) {
14474 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14475 			n++;
14476 			vsi = pf->vsi[i];
14477 		}
14478 	}
14479 	if (n != 1) {
14480 		dev_info(&pf->pdev->dev,
14481 			 "can't remove VEB %d with %d VSIs left\n",
14482 			 veb->seid, n);
14483 		return;
14484 	}
14485 
14486 	/* move the remaining VSI to uplink veb */
14487 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14488 	if (veb->uplink_seid) {
14489 		vsi->uplink_seid = veb->uplink_seid;
14490 		if (veb->uplink_seid == pf->mac_seid)
14491 			vsi->veb_idx = I40E_NO_VEB;
14492 		else
14493 			vsi->veb_idx = veb->veb_idx;
14494 	} else {
14495 		/* floating VEB */
14496 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14497 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14498 	}
14499 
14500 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14501 	i40e_veb_clear(veb);
14502 }
14503 
14504 /**
14505  * i40e_add_veb - create the VEB in the switch
14506  * @veb: the VEB to be instantiated
14507  * @vsi: the controlling VSI
14508  **/
14509 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14510 {
14511 	struct i40e_pf *pf = veb->pf;
14512 	bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14513 	int ret;
14514 
14515 	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14516 			      veb->enabled_tc, false,
14517 			      &veb->seid, enable_stats, NULL);
14518 
14519 	/* get a VEB from the hardware */
14520 	if (ret) {
14521 		dev_info(&pf->pdev->dev,
14522 			 "couldn't add VEB, err %s aq_err %s\n",
14523 			 i40e_stat_str(&pf->hw, ret),
14524 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14525 		return -EPERM;
14526 	}
14527 
14528 	/* get statistics counter */
14529 	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14530 					 &veb->stats_idx, NULL, NULL, NULL);
14531 	if (ret) {
14532 		dev_info(&pf->pdev->dev,
14533 			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14534 			 i40e_stat_str(&pf->hw, ret),
14535 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14536 		return -EPERM;
14537 	}
14538 	ret = i40e_veb_get_bw_info(veb);
14539 	if (ret) {
14540 		dev_info(&pf->pdev->dev,
14541 			 "couldn't get VEB bw info, err %s aq_err %s\n",
14542 			 i40e_stat_str(&pf->hw, ret),
14543 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14544 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14545 		return -ENOENT;
14546 	}
14547 
14548 	vsi->uplink_seid = veb->seid;
14549 	vsi->veb_idx = veb->idx;
14550 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14551 
14552 	return 0;
14553 }
14554 
14555 /**
14556  * i40e_veb_setup - Set up a VEB
14557  * @pf: board private structure
14558  * @flags: VEB setup flags
14559  * @uplink_seid: the switch element to link to
14560  * @vsi_seid: the initial VSI seid
14561  * @enabled_tc: Enabled TC bit-map
14562  *
14563  * This allocates the sw VEB structure and links it into the switch
14564  * It is possible and legal for this to be a duplicate of an already
14565  * existing VEB.  It is also possible for both uplink and vsi seids
14566  * to be zero, in order to create a floating VEB.
14567  *
14568  * Returns pointer to the successfully allocated VEB sw struct on
14569  * success, otherwise returns NULL on failure.
14570  **/
14571 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14572 				u16 uplink_seid, u16 vsi_seid,
14573 				u8 enabled_tc)
14574 {
14575 	struct i40e_veb *veb, *uplink_veb = NULL;
14576 	int vsi_idx, veb_idx;
14577 	int ret;
14578 
14579 	/* if one seid is 0, the other must be 0 to create a floating relay */
14580 	if ((uplink_seid == 0 || vsi_seid == 0) &&
14581 	    (uplink_seid + vsi_seid != 0)) {
14582 		dev_info(&pf->pdev->dev,
14583 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14584 			 uplink_seid, vsi_seid);
14585 		return NULL;
14586 	}
14587 
14588 	/* make sure there is such a vsi and uplink */
14589 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14590 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14591 			break;
14592 	if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14593 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14594 			 vsi_seid);
14595 		return NULL;
14596 	}
14597 
14598 	if (uplink_seid && uplink_seid != pf->mac_seid) {
14599 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14600 			if (pf->veb[veb_idx] &&
14601 			    pf->veb[veb_idx]->seid == uplink_seid) {
14602 				uplink_veb = pf->veb[veb_idx];
14603 				break;
14604 			}
14605 		}
14606 		if (!uplink_veb) {
14607 			dev_info(&pf->pdev->dev,
14608 				 "uplink seid %d not found\n", uplink_seid);
14609 			return NULL;
14610 		}
14611 	}
14612 
14613 	/* get veb sw struct */
14614 	veb_idx = i40e_veb_mem_alloc(pf);
14615 	if (veb_idx < 0)
14616 		goto err_alloc;
14617 	veb = pf->veb[veb_idx];
14618 	veb->flags = flags;
14619 	veb->uplink_seid = uplink_seid;
14620 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14621 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14622 
14623 	/* create the VEB in the switch */
14624 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14625 	if (ret)
14626 		goto err_veb;
14627 	if (vsi_idx == pf->lan_vsi)
14628 		pf->lan_veb = veb->idx;
14629 
14630 	return veb;
14631 
14632 err_veb:
14633 	i40e_veb_clear(veb);
14634 err_alloc:
14635 	return NULL;
14636 }
14637 
14638 /**
14639  * i40e_setup_pf_switch_element - set PF vars based on switch type
14640  * @pf: board private structure
14641  * @ele: element we are building info from
14642  * @num_reported: total number of elements
14643  * @printconfig: should we print the contents
14644  *
14645  * helper function to assist in extracting a few useful SEID values.
14646  **/
14647 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14648 				struct i40e_aqc_switch_config_element_resp *ele,
14649 				u16 num_reported, bool printconfig)
14650 {
14651 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14652 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14653 	u8 element_type = ele->element_type;
14654 	u16 seid = le16_to_cpu(ele->seid);
14655 
14656 	if (printconfig)
14657 		dev_info(&pf->pdev->dev,
14658 			 "type=%d seid=%d uplink=%d downlink=%d\n",
14659 			 element_type, seid, uplink_seid, downlink_seid);
14660 
14661 	switch (element_type) {
14662 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
14663 		pf->mac_seid = seid;
14664 		break;
14665 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
14666 		/* Main VEB? */
14667 		if (uplink_seid != pf->mac_seid)
14668 			break;
14669 		if (pf->lan_veb >= I40E_MAX_VEB) {
14670 			int v;
14671 
14672 			/* find existing or else empty VEB */
14673 			for (v = 0; v < I40E_MAX_VEB; v++) {
14674 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14675 					pf->lan_veb = v;
14676 					break;
14677 				}
14678 			}
14679 			if (pf->lan_veb >= I40E_MAX_VEB) {
14680 				v = i40e_veb_mem_alloc(pf);
14681 				if (v < 0)
14682 					break;
14683 				pf->lan_veb = v;
14684 			}
14685 		}
14686 		if (pf->lan_veb >= I40E_MAX_VEB)
14687 			break;
14688 
14689 		pf->veb[pf->lan_veb]->seid = seid;
14690 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14691 		pf->veb[pf->lan_veb]->pf = pf;
14692 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14693 		break;
14694 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
14695 		if (num_reported != 1)
14696 			break;
14697 		/* This is immediately after a reset so we can assume this is
14698 		 * the PF's VSI
14699 		 */
14700 		pf->mac_seid = uplink_seid;
14701 		pf->pf_seid = downlink_seid;
14702 		pf->main_vsi_seid = seid;
14703 		if (printconfig)
14704 			dev_info(&pf->pdev->dev,
14705 				 "pf_seid=%d main_vsi_seid=%d\n",
14706 				 pf->pf_seid, pf->main_vsi_seid);
14707 		break;
14708 	case I40E_SWITCH_ELEMENT_TYPE_PF:
14709 	case I40E_SWITCH_ELEMENT_TYPE_VF:
14710 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
14711 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
14712 	case I40E_SWITCH_ELEMENT_TYPE_PE:
14713 	case I40E_SWITCH_ELEMENT_TYPE_PA:
14714 		/* ignore these for now */
14715 		break;
14716 	default:
14717 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14718 			 element_type, seid);
14719 		break;
14720 	}
14721 }
14722 
14723 /**
14724  * i40e_fetch_switch_configuration - Get switch config from firmware
14725  * @pf: board private structure
14726  * @printconfig: should we print the contents
14727  *
14728  * Get the current switch configuration from the device and
14729  * extract a few useful SEID values.
14730  **/
14731 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14732 {
14733 	struct i40e_aqc_get_switch_config_resp *sw_config;
14734 	u16 next_seid = 0;
14735 	int ret = 0;
14736 	u8 *aq_buf;
14737 	int i;
14738 
14739 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14740 	if (!aq_buf)
14741 		return -ENOMEM;
14742 
14743 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14744 	do {
14745 		u16 num_reported, num_total;
14746 
14747 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14748 						I40E_AQ_LARGE_BUF,
14749 						&next_seid, NULL);
14750 		if (ret) {
14751 			dev_info(&pf->pdev->dev,
14752 				 "get switch config failed err %s aq_err %s\n",
14753 				 i40e_stat_str(&pf->hw, ret),
14754 				 i40e_aq_str(&pf->hw,
14755 					     pf->hw.aq.asq_last_status));
14756 			kfree(aq_buf);
14757 			return -ENOENT;
14758 		}
14759 
14760 		num_reported = le16_to_cpu(sw_config->header.num_reported);
14761 		num_total = le16_to_cpu(sw_config->header.num_total);
14762 
14763 		if (printconfig)
14764 			dev_info(&pf->pdev->dev,
14765 				 "header: %d reported %d total\n",
14766 				 num_reported, num_total);
14767 
14768 		for (i = 0; i < num_reported; i++) {
14769 			struct i40e_aqc_switch_config_element_resp *ele =
14770 				&sw_config->element[i];
14771 
14772 			i40e_setup_pf_switch_element(pf, ele, num_reported,
14773 						     printconfig);
14774 		}
14775 	} while (next_seid != 0);
14776 
14777 	kfree(aq_buf);
14778 	return ret;
14779 }
14780 
14781 /**
14782  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14783  * @pf: board private structure
14784  * @reinit: if the Main VSI needs to re-initialized.
14785  * @lock_acquired: indicates whether or not the lock has been acquired
14786  *
14787  * Returns 0 on success, negative value on failure
14788  **/
14789 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14790 {
14791 	u16 flags = 0;
14792 	int ret;
14793 
14794 	/* find out what's out there already */
14795 	ret = i40e_fetch_switch_configuration(pf, false);
14796 	if (ret) {
14797 		dev_info(&pf->pdev->dev,
14798 			 "couldn't fetch switch config, err %s aq_err %s\n",
14799 			 i40e_stat_str(&pf->hw, ret),
14800 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14801 		return ret;
14802 	}
14803 	i40e_pf_reset_stats(pf);
14804 
14805 	/* set the switch config bit for the whole device to
14806 	 * support limited promisc or true promisc
14807 	 * when user requests promisc. The default is limited
14808 	 * promisc.
14809 	*/
14810 
14811 	if ((pf->hw.pf_id == 0) &&
14812 	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14813 		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14814 		pf->last_sw_conf_flags = flags;
14815 	}
14816 
14817 	if (pf->hw.pf_id == 0) {
14818 		u16 valid_flags;
14819 
14820 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14821 		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14822 						NULL);
14823 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14824 			dev_info(&pf->pdev->dev,
14825 				 "couldn't set switch config bits, err %s aq_err %s\n",
14826 				 i40e_stat_str(&pf->hw, ret),
14827 				 i40e_aq_str(&pf->hw,
14828 					     pf->hw.aq.asq_last_status));
14829 			/* not a fatal problem, just keep going */
14830 		}
14831 		pf->last_sw_conf_valid_flags = valid_flags;
14832 	}
14833 
14834 	/* first time setup */
14835 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14836 		struct i40e_vsi *vsi = NULL;
14837 		u16 uplink_seid;
14838 
14839 		/* Set up the PF VSI associated with the PF's main VSI
14840 		 * that is already in the HW switch
14841 		 */
14842 		if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14843 			uplink_seid = pf->veb[pf->lan_veb]->seid;
14844 		else
14845 			uplink_seid = pf->mac_seid;
14846 		if (pf->lan_vsi == I40E_NO_VSI)
14847 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14848 		else if (reinit)
14849 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14850 		if (!vsi) {
14851 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14852 			i40e_cloud_filter_exit(pf);
14853 			i40e_fdir_teardown(pf);
14854 			return -EAGAIN;
14855 		}
14856 	} else {
14857 		/* force a reset of TC and queue layout configurations */
14858 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14859 
14860 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14861 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14862 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14863 	}
14864 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14865 
14866 	i40e_fdir_sb_setup(pf);
14867 
14868 	/* Setup static PF queue filter control settings */
14869 	ret = i40e_setup_pf_filter_control(pf);
14870 	if (ret) {
14871 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14872 			 ret);
14873 		/* Failure here should not stop continuing other steps */
14874 	}
14875 
14876 	/* enable RSS in the HW, even for only one queue, as the stack can use
14877 	 * the hash
14878 	 */
14879 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14880 		i40e_pf_config_rss(pf);
14881 
14882 	/* fill in link information and enable LSE reporting */
14883 	i40e_link_event(pf);
14884 
14885 	/* Initialize user-specific link properties */
14886 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14887 				  I40E_AQ_AN_COMPLETED) ? true : false);
14888 
14889 	i40e_ptp_init(pf);
14890 
14891 	if (!lock_acquired)
14892 		rtnl_lock();
14893 
14894 	/* repopulate tunnel port filters */
14895 	udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14896 
14897 	if (!lock_acquired)
14898 		rtnl_unlock();
14899 
14900 	return ret;
14901 }
14902 
14903 /**
14904  * i40e_determine_queue_usage - Work out queue distribution
14905  * @pf: board private structure
14906  **/
14907 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14908 {
14909 	int queues_left;
14910 	int q_max;
14911 
14912 	pf->num_lan_qps = 0;
14913 
14914 	/* Find the max queues to be put into basic use.  We'll always be
14915 	 * using TC0, whether or not DCB is running, and TC0 will get the
14916 	 * big RSS set.
14917 	 */
14918 	queues_left = pf->hw.func_caps.num_tx_qp;
14919 
14920 	if ((queues_left == 1) ||
14921 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14922 		/* one qp for PF, no queues for anything else */
14923 		queues_left = 0;
14924 		pf->alloc_rss_size = pf->num_lan_qps = 1;
14925 
14926 		/* make sure all the fancies are disabled */
14927 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
14928 			       I40E_FLAG_IWARP_ENABLED	|
14929 			       I40E_FLAG_FD_SB_ENABLED	|
14930 			       I40E_FLAG_FD_ATR_ENABLED	|
14931 			       I40E_FLAG_DCB_CAPABLE	|
14932 			       I40E_FLAG_DCB_ENABLED	|
14933 			       I40E_FLAG_SRIOV_ENABLED	|
14934 			       I40E_FLAG_VMDQ_ENABLED);
14935 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14936 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14937 				  I40E_FLAG_FD_SB_ENABLED |
14938 				  I40E_FLAG_FD_ATR_ENABLED |
14939 				  I40E_FLAG_DCB_CAPABLE))) {
14940 		/* one qp for PF */
14941 		pf->alloc_rss_size = pf->num_lan_qps = 1;
14942 		queues_left -= pf->num_lan_qps;
14943 
14944 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
14945 			       I40E_FLAG_IWARP_ENABLED	|
14946 			       I40E_FLAG_FD_SB_ENABLED	|
14947 			       I40E_FLAG_FD_ATR_ENABLED	|
14948 			       I40E_FLAG_DCB_ENABLED	|
14949 			       I40E_FLAG_VMDQ_ENABLED);
14950 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14951 	} else {
14952 		/* Not enough queues for all TCs */
14953 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14954 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14955 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14956 					I40E_FLAG_DCB_ENABLED);
14957 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14958 		}
14959 
14960 		/* limit lan qps to the smaller of qps, cpus or msix */
14961 		q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14962 		q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14963 		q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14964 		pf->num_lan_qps = q_max;
14965 
14966 		queues_left -= pf->num_lan_qps;
14967 	}
14968 
14969 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14970 		if (queues_left > 1) {
14971 			queues_left -= 1; /* save 1 queue for FD */
14972 		} else {
14973 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14974 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14975 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14976 		}
14977 	}
14978 
14979 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14980 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14981 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14982 					(queues_left / pf->num_vf_qps));
14983 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14984 	}
14985 
14986 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14987 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14988 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14989 					  (queues_left / pf->num_vmdq_qps));
14990 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14991 	}
14992 
14993 	pf->queues_left = queues_left;
14994 	dev_dbg(&pf->pdev->dev,
14995 		"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14996 		pf->hw.func_caps.num_tx_qp,
14997 		!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14998 		pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14999 		pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15000 		queues_left);
15001 }
15002 
15003 /**
15004  * i40e_setup_pf_filter_control - Setup PF static filter control
15005  * @pf: PF to be setup
15006  *
15007  * i40e_setup_pf_filter_control sets up a PF's initial filter control
15008  * settings. If PE/FCoE are enabled then it will also set the per PF
15009  * based filter sizes required for them. It also enables Flow director,
15010  * ethertype and macvlan type filter settings for the pf.
15011  *
15012  * Returns 0 on success, negative on failure
15013  **/
15014 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15015 {
15016 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
15017 
15018 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15019 
15020 	/* Flow Director is enabled */
15021 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15022 		settings->enable_fdir = true;
15023 
15024 	/* Ethtype and MACVLAN filters enabled for PF */
15025 	settings->enable_ethtype = true;
15026 	settings->enable_macvlan = true;
15027 
15028 	if (i40e_set_filter_control(&pf->hw, settings))
15029 		return -ENOENT;
15030 
15031 	return 0;
15032 }
15033 
15034 #define INFO_STRING_LEN 255
15035 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15036 static void i40e_print_features(struct i40e_pf *pf)
15037 {
15038 	struct i40e_hw *hw = &pf->hw;
15039 	char *buf;
15040 	int i;
15041 
15042 	buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15043 	if (!buf)
15044 		return;
15045 
15046 	i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15047 #ifdef CONFIG_PCI_IOV
15048 	i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15049 #endif
15050 	i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15051 		      pf->hw.func_caps.num_vsis,
15052 		      pf->vsi[pf->lan_vsi]->num_queue_pairs);
15053 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
15054 		i += scnprintf(&buf[i], REMAIN(i), " RSS");
15055 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15056 		i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15057 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15058 		i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15059 		i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15060 	}
15061 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15062 		i += scnprintf(&buf[i], REMAIN(i), " DCB");
15063 	i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15064 	i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15065 	if (pf->flags & I40E_FLAG_PTP)
15066 		i += scnprintf(&buf[i], REMAIN(i), " PTP");
15067 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15068 		i += scnprintf(&buf[i], REMAIN(i), " VEB");
15069 	else
15070 		i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15071 
15072 	dev_info(&pf->pdev->dev, "%s\n", buf);
15073 	kfree(buf);
15074 	WARN_ON(i > INFO_STRING_LEN);
15075 }
15076 
15077 /**
15078  * i40e_get_platform_mac_addr - get platform-specific MAC address
15079  * @pdev: PCI device information struct
15080  * @pf: board private structure
15081  *
15082  * Look up the MAC address for the device. First we'll try
15083  * eth_platform_get_mac_address, which will check Open Firmware, or arch
15084  * specific fallback. Otherwise, we'll default to the stored value in
15085  * firmware.
15086  **/
15087 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15088 {
15089 	if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15090 		i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15091 }
15092 
15093 /**
15094  * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15095  * @fec_cfg: FEC option to set in flags
15096  * @flags: ptr to flags in which we set FEC option
15097  **/
15098 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15099 {
15100 	if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15101 		*flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15102 	if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15103 	    (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15104 		*flags |= I40E_FLAG_RS_FEC;
15105 		*flags &= ~I40E_FLAG_BASE_R_FEC;
15106 	}
15107 	if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15108 	    (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15109 		*flags |= I40E_FLAG_BASE_R_FEC;
15110 		*flags &= ~I40E_FLAG_RS_FEC;
15111 	}
15112 	if (fec_cfg == 0)
15113 		*flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15114 }
15115 
15116 /**
15117  * i40e_check_recovery_mode - check if we are running transition firmware
15118  * @pf: board private structure
15119  *
15120  * Check registers indicating the firmware runs in recovery mode. Sets the
15121  * appropriate driver state.
15122  *
15123  * Returns true if the recovery mode was detected, false otherwise
15124  **/
15125 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15126 {
15127 	u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15128 
15129 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15130 		dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15131 		dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15132 		set_bit(__I40E_RECOVERY_MODE, pf->state);
15133 
15134 		return true;
15135 	}
15136 	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15137 		dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15138 
15139 	return false;
15140 }
15141 
15142 /**
15143  * i40e_pf_loop_reset - perform reset in a loop.
15144  * @pf: board private structure
15145  *
15146  * This function is useful when a NIC is about to enter recovery mode.
15147  * When a NIC's internal data structures are corrupted the NIC's
15148  * firmware is going to enter recovery mode.
15149  * Right after a POR it takes about 7 minutes for firmware to enter
15150  * recovery mode. Until that time a NIC is in some kind of intermediate
15151  * state. After that time period the NIC almost surely enters
15152  * recovery mode. The only way for a driver to detect intermediate
15153  * state is to issue a series of pf-resets and check a return value.
15154  * If a PF reset returns success then the firmware could be in recovery
15155  * mode so the caller of this code needs to check for recovery mode
15156  * if this function returns success. There is a little chance that
15157  * firmware will hang in intermediate state forever.
15158  * Since waiting 7 minutes is quite a lot of time this function waits
15159  * 10 seconds and then gives up by returning an error.
15160  *
15161  * Return 0 on success, negative on failure.
15162  **/
15163 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15164 {
15165 	/* wait max 10 seconds for PF reset to succeed */
15166 	const unsigned long time_end = jiffies + 10 * HZ;
15167 
15168 	struct i40e_hw *hw = &pf->hw;
15169 	i40e_status ret;
15170 
15171 	ret = i40e_pf_reset(hw);
15172 	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15173 		usleep_range(10000, 20000);
15174 		ret = i40e_pf_reset(hw);
15175 	}
15176 
15177 	if (ret == I40E_SUCCESS)
15178 		pf->pfr_count++;
15179 	else
15180 		dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15181 
15182 	return ret;
15183 }
15184 
15185 /**
15186  * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15187  * @pf: board private structure
15188  *
15189  * Check FW registers to determine if FW issued unexpected EMP Reset.
15190  * Every time when unexpected EMP Reset occurs the FW increments
15191  * a counter of unexpected EMP Resets. When the counter reaches 10
15192  * the FW should enter the Recovery mode
15193  *
15194  * Returns true if FW issued unexpected EMP Reset
15195  **/
15196 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15197 {
15198 	const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15199 			   I40E_GL_FWSTS_FWS1B_MASK;
15200 	return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15201 	       (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15202 }
15203 
15204 /**
15205  * i40e_handle_resets - handle EMP resets and PF resets
15206  * @pf: board private structure
15207  *
15208  * Handle both EMP resets and PF resets and conclude whether there are
15209  * any issues regarding these resets. If there are any issues then
15210  * generate log entry.
15211  *
15212  * Return 0 if NIC is healthy or negative value when there are issues
15213  * with resets
15214  **/
15215 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15216 {
15217 	const i40e_status pfr = i40e_pf_loop_reset(pf);
15218 	const bool is_empr = i40e_check_fw_empr(pf);
15219 
15220 	if (is_empr || pfr != I40E_SUCCESS)
15221 		dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15222 
15223 	return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15224 }
15225 
15226 /**
15227  * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15228  * @pf: board private structure
15229  * @hw: ptr to the hardware info
15230  *
15231  * This function does a minimal setup of all subsystems needed for running
15232  * recovery mode.
15233  *
15234  * Returns 0 on success, negative on failure
15235  **/
15236 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15237 {
15238 	struct i40e_vsi *vsi;
15239 	int err;
15240 	int v_idx;
15241 
15242 	pci_save_state(pf->pdev);
15243 
15244 	/* set up periodic task facility */
15245 	timer_setup(&pf->service_timer, i40e_service_timer, 0);
15246 	pf->service_timer_period = HZ;
15247 
15248 	INIT_WORK(&pf->service_task, i40e_service_task);
15249 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
15250 
15251 	err = i40e_init_interrupt_scheme(pf);
15252 	if (err)
15253 		goto err_switch_setup;
15254 
15255 	/* The number of VSIs reported by the FW is the minimum guaranteed
15256 	 * to us; HW supports far more and we share the remaining pool with
15257 	 * the other PFs. We allocate space for more than the guarantee with
15258 	 * the understanding that we might not get them all later.
15259 	 */
15260 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15261 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15262 	else
15263 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15264 
15265 	/* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15266 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15267 			  GFP_KERNEL);
15268 	if (!pf->vsi) {
15269 		err = -ENOMEM;
15270 		goto err_switch_setup;
15271 	}
15272 
15273 	/* We allocate one VSI which is needed as absolute minimum
15274 	 * in order to register the netdev
15275 	 */
15276 	v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15277 	if (v_idx < 0) {
15278 		err = v_idx;
15279 		goto err_switch_setup;
15280 	}
15281 	pf->lan_vsi = v_idx;
15282 	vsi = pf->vsi[v_idx];
15283 	if (!vsi) {
15284 		err = -EFAULT;
15285 		goto err_switch_setup;
15286 	}
15287 	vsi->alloc_queue_pairs = 1;
15288 	err = i40e_config_netdev(vsi);
15289 	if (err)
15290 		goto err_switch_setup;
15291 	err = register_netdev(vsi->netdev);
15292 	if (err)
15293 		goto err_switch_setup;
15294 	vsi->netdev_registered = true;
15295 	i40e_dbg_pf_init(pf);
15296 
15297 	err = i40e_setup_misc_vector_for_recovery_mode(pf);
15298 	if (err)
15299 		goto err_switch_setup;
15300 
15301 	/* tell the firmware that we're starting */
15302 	i40e_send_version(pf);
15303 
15304 	/* since everything's happy, start the service_task timer */
15305 	mod_timer(&pf->service_timer,
15306 		  round_jiffies(jiffies + pf->service_timer_period));
15307 
15308 	return 0;
15309 
15310 err_switch_setup:
15311 	i40e_reset_interrupt_capability(pf);
15312 	del_timer_sync(&pf->service_timer);
15313 	i40e_shutdown_adminq(hw);
15314 	iounmap(hw->hw_addr);
15315 	pci_disable_pcie_error_reporting(pf->pdev);
15316 	pci_release_mem_regions(pf->pdev);
15317 	pci_disable_device(pf->pdev);
15318 	kfree(pf);
15319 
15320 	return err;
15321 }
15322 
15323 /**
15324  * i40e_set_subsystem_device_id - set subsystem device id
15325  * @hw: pointer to the hardware info
15326  *
15327  * Set PCI subsystem device id either from a pci_dev structure or
15328  * a specific FW register.
15329  **/
15330 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15331 {
15332 	struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15333 
15334 	hw->subsystem_device_id = pdev->subsystem_device ?
15335 		pdev->subsystem_device :
15336 		(ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15337 }
15338 
15339 /**
15340  * i40e_probe - Device initialization routine
15341  * @pdev: PCI device information struct
15342  * @ent: entry in i40e_pci_tbl
15343  *
15344  * i40e_probe initializes a PF identified by a pci_dev structure.
15345  * The OS initialization, configuring of the PF private structure,
15346  * and a hardware reset occur.
15347  *
15348  * Returns 0 on success, negative on failure
15349  **/
15350 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15351 {
15352 	struct i40e_aq_get_phy_abilities_resp abilities;
15353 #ifdef CONFIG_I40E_DCB
15354 	enum i40e_get_fw_lldp_status_resp lldp_status;
15355 	i40e_status status;
15356 #endif /* CONFIG_I40E_DCB */
15357 	struct i40e_pf *pf;
15358 	struct i40e_hw *hw;
15359 	static u16 pfs_found;
15360 	u16 wol_nvm_bits;
15361 	u16 link_status;
15362 	int err;
15363 	u32 val;
15364 	u32 i;
15365 
15366 	err = pci_enable_device_mem(pdev);
15367 	if (err)
15368 		return err;
15369 
15370 	/* set up for high or low dma */
15371 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15372 	if (err) {
15373 		dev_err(&pdev->dev,
15374 			"DMA configuration failed: 0x%x\n", err);
15375 		goto err_dma;
15376 	}
15377 
15378 	/* set up pci connections */
15379 	err = pci_request_mem_regions(pdev, i40e_driver_name);
15380 	if (err) {
15381 		dev_info(&pdev->dev,
15382 			 "pci_request_selected_regions failed %d\n", err);
15383 		goto err_pci_reg;
15384 	}
15385 
15386 	pci_enable_pcie_error_reporting(pdev);
15387 	pci_set_master(pdev);
15388 
15389 	/* Now that we have a PCI connection, we need to do the
15390 	 * low level device setup.  This is primarily setting up
15391 	 * the Admin Queue structures and then querying for the
15392 	 * device's current profile information.
15393 	 */
15394 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15395 	if (!pf) {
15396 		err = -ENOMEM;
15397 		goto err_pf_alloc;
15398 	}
15399 	pf->next_vsi = 0;
15400 	pf->pdev = pdev;
15401 	set_bit(__I40E_DOWN, pf->state);
15402 
15403 	hw = &pf->hw;
15404 	hw->back = pf;
15405 
15406 	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15407 				I40E_MAX_CSR_SPACE);
15408 	/* We believe that the highest register to read is
15409 	 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15410 	 * is not less than that before mapping to prevent a
15411 	 * kernel panic.
15412 	 */
15413 	if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15414 		dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15415 			pf->ioremap_len);
15416 		err = -ENOMEM;
15417 		goto err_ioremap;
15418 	}
15419 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15420 	if (!hw->hw_addr) {
15421 		err = -EIO;
15422 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15423 			 (unsigned int)pci_resource_start(pdev, 0),
15424 			 pf->ioremap_len, err);
15425 		goto err_ioremap;
15426 	}
15427 	hw->vendor_id = pdev->vendor;
15428 	hw->device_id = pdev->device;
15429 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15430 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
15431 	i40e_set_subsystem_device_id(hw);
15432 	hw->bus.device = PCI_SLOT(pdev->devfn);
15433 	hw->bus.func = PCI_FUNC(pdev->devfn);
15434 	hw->bus.bus_id = pdev->bus->number;
15435 	pf->instance = pfs_found;
15436 
15437 	/* Select something other than the 802.1ad ethertype for the
15438 	 * switch to use internally and drop on ingress.
15439 	 */
15440 	hw->switch_tag = 0xffff;
15441 	hw->first_tag = ETH_P_8021AD;
15442 	hw->second_tag = ETH_P_8021Q;
15443 
15444 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15445 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15446 	INIT_LIST_HEAD(&pf->ddp_old_prof);
15447 
15448 	/* set up the locks for the AQ, do this only once in probe
15449 	 * and destroy them only once in remove
15450 	 */
15451 	mutex_init(&hw->aq.asq_mutex);
15452 	mutex_init(&hw->aq.arq_mutex);
15453 
15454 	pf->msg_enable = netif_msg_init(debug,
15455 					NETIF_MSG_DRV |
15456 					NETIF_MSG_PROBE |
15457 					NETIF_MSG_LINK);
15458 	if (debug < -1)
15459 		pf->hw.debug_mask = debug;
15460 
15461 	/* do a special CORER for clearing PXE mode once at init */
15462 	if (hw->revision_id == 0 &&
15463 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15464 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15465 		i40e_flush(hw);
15466 		msleep(200);
15467 		pf->corer_count++;
15468 
15469 		i40e_clear_pxe_mode(hw);
15470 	}
15471 
15472 	/* Reset here to make sure all is clean and to define PF 'n' */
15473 	i40e_clear_hw(hw);
15474 
15475 	err = i40e_set_mac_type(hw);
15476 	if (err) {
15477 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15478 			 err);
15479 		goto err_pf_reset;
15480 	}
15481 
15482 	err = i40e_handle_resets(pf);
15483 	if (err)
15484 		goto err_pf_reset;
15485 
15486 	i40e_check_recovery_mode(pf);
15487 
15488 	if (is_kdump_kernel()) {
15489 		hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15490 		hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15491 	} else {
15492 		hw->aq.num_arq_entries = I40E_AQ_LEN;
15493 		hw->aq.num_asq_entries = I40E_AQ_LEN;
15494 	}
15495 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15496 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15497 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15498 
15499 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15500 		 "%s-%s:misc",
15501 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15502 
15503 	err = i40e_init_shared_code(hw);
15504 	if (err) {
15505 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15506 			 err);
15507 		goto err_pf_reset;
15508 	}
15509 
15510 	/* set up a default setting for link flow control */
15511 	pf->hw.fc.requested_mode = I40E_FC_NONE;
15512 
15513 	err = i40e_init_adminq(hw);
15514 	if (err) {
15515 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
15516 			dev_info(&pdev->dev,
15517 				 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15518 				 hw->aq.api_maj_ver,
15519 				 hw->aq.api_min_ver,
15520 				 I40E_FW_API_VERSION_MAJOR,
15521 				 I40E_FW_MINOR_VERSION(hw));
15522 		else
15523 			dev_info(&pdev->dev,
15524 				 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15525 
15526 		goto err_pf_reset;
15527 	}
15528 	i40e_get_oem_version(hw);
15529 
15530 	/* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15531 	dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15532 		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15533 		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15534 		 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15535 		 hw->subsystem_vendor_id, hw->subsystem_device_id);
15536 
15537 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15538 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15539 		dev_dbg(&pdev->dev,
15540 			"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15541 			 hw->aq.api_maj_ver,
15542 			 hw->aq.api_min_ver,
15543 			 I40E_FW_API_VERSION_MAJOR,
15544 			 I40E_FW_MINOR_VERSION(hw));
15545 	else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15546 		dev_info(&pdev->dev,
15547 			 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15548 			 hw->aq.api_maj_ver,
15549 			 hw->aq.api_min_ver,
15550 			 I40E_FW_API_VERSION_MAJOR,
15551 			 I40E_FW_MINOR_VERSION(hw));
15552 
15553 	i40e_verify_eeprom(pf);
15554 
15555 	/* Rev 0 hardware was never productized */
15556 	if (hw->revision_id < 1)
15557 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15558 
15559 	i40e_clear_pxe_mode(hw);
15560 
15561 	err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15562 	if (err)
15563 		goto err_adminq_setup;
15564 
15565 	err = i40e_sw_init(pf);
15566 	if (err) {
15567 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15568 		goto err_sw_init;
15569 	}
15570 
15571 	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15572 		return i40e_init_recovery_mode(pf, hw);
15573 
15574 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15575 				hw->func_caps.num_rx_qp, 0, 0);
15576 	if (err) {
15577 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15578 		goto err_init_lan_hmc;
15579 	}
15580 
15581 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15582 	if (err) {
15583 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15584 		err = -ENOENT;
15585 		goto err_configure_lan_hmc;
15586 	}
15587 
15588 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
15589 	 * Ignore error return codes because if it was already disabled via
15590 	 * hardware settings this will fail
15591 	 */
15592 	if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15593 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15594 		i40e_aq_stop_lldp(hw, true, false, NULL);
15595 	}
15596 
15597 	/* allow a platform config to override the HW addr */
15598 	i40e_get_platform_mac_addr(pdev, pf);
15599 
15600 	if (!is_valid_ether_addr(hw->mac.addr)) {
15601 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15602 		err = -EIO;
15603 		goto err_mac_addr;
15604 	}
15605 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15606 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15607 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15608 	if (is_valid_ether_addr(hw->mac.port_addr))
15609 		pf->hw_features |= I40E_HW_PORT_ID_VALID;
15610 
15611 	i40e_ptp_alloc_pins(pf);
15612 	pci_set_drvdata(pdev, pf);
15613 	pci_save_state(pdev);
15614 
15615 #ifdef CONFIG_I40E_DCB
15616 	status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15617 	(!status &&
15618 	 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15619 		(pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15620 		(pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15621 	dev_info(&pdev->dev,
15622 		 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15623 			"FW LLDP is disabled\n" :
15624 			"FW LLDP is enabled\n");
15625 
15626 	/* Enable FW to write default DCB config on link-up */
15627 	i40e_aq_set_dcb_parameters(hw, true, NULL);
15628 
15629 	err = i40e_init_pf_dcb(pf);
15630 	if (err) {
15631 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15632 		pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15633 		/* Continue without DCB enabled */
15634 	}
15635 #endif /* CONFIG_I40E_DCB */
15636 
15637 	/* set up periodic task facility */
15638 	timer_setup(&pf->service_timer, i40e_service_timer, 0);
15639 	pf->service_timer_period = HZ;
15640 
15641 	INIT_WORK(&pf->service_task, i40e_service_task);
15642 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
15643 
15644 	/* NVM bit on means WoL disabled for the port */
15645 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15646 	if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15647 		pf->wol_en = false;
15648 	else
15649 		pf->wol_en = true;
15650 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15651 
15652 	/* set up the main switch operations */
15653 	i40e_determine_queue_usage(pf);
15654 	err = i40e_init_interrupt_scheme(pf);
15655 	if (err)
15656 		goto err_switch_setup;
15657 
15658 	/* Reduce Tx and Rx pairs for kdump
15659 	 * When MSI-X is enabled, it's not allowed to use more TC queue
15660 	 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15661 	 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15662 	 */
15663 	if (is_kdump_kernel())
15664 		pf->num_lan_msix = 1;
15665 
15666 	pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15667 	pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15668 	pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15669 	pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15670 	pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15671 	pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15672 						    UDP_TUNNEL_TYPE_GENEVE;
15673 
15674 	/* The number of VSIs reported by the FW is the minimum guaranteed
15675 	 * to us; HW supports far more and we share the remaining pool with
15676 	 * the other PFs. We allocate space for more than the guarantee with
15677 	 * the understanding that we might not get them all later.
15678 	 */
15679 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15680 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15681 	else
15682 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15683 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15684 		dev_warn(&pf->pdev->dev,
15685 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15686 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15687 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15688 	}
15689 
15690 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15691 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15692 			  GFP_KERNEL);
15693 	if (!pf->vsi) {
15694 		err = -ENOMEM;
15695 		goto err_switch_setup;
15696 	}
15697 
15698 #ifdef CONFIG_PCI_IOV
15699 	/* prep for VF support */
15700 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15701 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15702 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15703 		if (pci_num_vf(pdev))
15704 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15705 	}
15706 #endif
15707 	err = i40e_setup_pf_switch(pf, false, false);
15708 	if (err) {
15709 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15710 		goto err_vsis;
15711 	}
15712 	INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15713 
15714 	/* if FDIR VSI was set up, start it now */
15715 	for (i = 0; i < pf->num_alloc_vsi; i++) {
15716 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15717 			i40e_vsi_open(pf->vsi[i]);
15718 			break;
15719 		}
15720 	}
15721 
15722 	/* The driver only wants link up/down and module qualification
15723 	 * reports from firmware.  Note the negative logic.
15724 	 */
15725 	err = i40e_aq_set_phy_int_mask(&pf->hw,
15726 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
15727 					 I40E_AQ_EVENT_MEDIA_NA |
15728 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15729 	if (err)
15730 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15731 			 i40e_stat_str(&pf->hw, err),
15732 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15733 
15734 	/* Reconfigure hardware for allowing smaller MSS in the case
15735 	 * of TSO, so that we avoid the MDD being fired and causing
15736 	 * a reset in the case of small MSS+TSO.
15737 	 */
15738 	val = rd32(hw, I40E_REG_MSS);
15739 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15740 		val &= ~I40E_REG_MSS_MIN_MASK;
15741 		val |= I40E_64BYTE_MSS;
15742 		wr32(hw, I40E_REG_MSS, val);
15743 	}
15744 
15745 	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15746 		msleep(75);
15747 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15748 		if (err)
15749 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15750 				 i40e_stat_str(&pf->hw, err),
15751 				 i40e_aq_str(&pf->hw,
15752 					     pf->hw.aq.asq_last_status));
15753 	}
15754 	/* The main driver is (mostly) up and happy. We need to set this state
15755 	 * before setting up the misc vector or we get a race and the vector
15756 	 * ends up disabled forever.
15757 	 */
15758 	clear_bit(__I40E_DOWN, pf->state);
15759 
15760 	/* In case of MSIX we are going to setup the misc vector right here
15761 	 * to handle admin queue events etc. In case of legacy and MSI
15762 	 * the misc functionality and queue processing is combined in
15763 	 * the same vector and that gets setup at open.
15764 	 */
15765 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15766 		err = i40e_setup_misc_vector(pf);
15767 		if (err) {
15768 			dev_info(&pdev->dev,
15769 				 "setup of misc vector failed: %d\n", err);
15770 			i40e_cloud_filter_exit(pf);
15771 			i40e_fdir_teardown(pf);
15772 			goto err_vsis;
15773 		}
15774 	}
15775 
15776 #ifdef CONFIG_PCI_IOV
15777 	/* prep for VF support */
15778 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15779 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15780 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15781 		/* disable link interrupts for VFs */
15782 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15783 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15784 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15785 		i40e_flush(hw);
15786 
15787 		if (pci_num_vf(pdev)) {
15788 			dev_info(&pdev->dev,
15789 				 "Active VFs found, allocating resources.\n");
15790 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15791 			if (err)
15792 				dev_info(&pdev->dev,
15793 					 "Error %d allocating resources for existing VFs\n",
15794 					 err);
15795 		}
15796 	}
15797 #endif /* CONFIG_PCI_IOV */
15798 
15799 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15800 		pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15801 						      pf->num_iwarp_msix,
15802 						      I40E_IWARP_IRQ_PILE_ID);
15803 		if (pf->iwarp_base_vector < 0) {
15804 			dev_info(&pdev->dev,
15805 				 "failed to get tracking for %d vectors for IWARP err=%d\n",
15806 				 pf->num_iwarp_msix, pf->iwarp_base_vector);
15807 			pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15808 		}
15809 	}
15810 
15811 	i40e_dbg_pf_init(pf);
15812 
15813 	/* tell the firmware that we're starting */
15814 	i40e_send_version(pf);
15815 
15816 	/* since everything's happy, start the service_task timer */
15817 	mod_timer(&pf->service_timer,
15818 		  round_jiffies(jiffies + pf->service_timer_period));
15819 
15820 	/* add this PF to client device list and launch a client service task */
15821 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15822 		err = i40e_lan_add_device(pf);
15823 		if (err)
15824 			dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15825 				 err);
15826 	}
15827 
15828 #define PCI_SPEED_SIZE 8
15829 #define PCI_WIDTH_SIZE 8
15830 	/* Devices on the IOSF bus do not have this information
15831 	 * and will report PCI Gen 1 x 1 by default so don't bother
15832 	 * checking them.
15833 	 */
15834 	if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15835 		char speed[PCI_SPEED_SIZE] = "Unknown";
15836 		char width[PCI_WIDTH_SIZE] = "Unknown";
15837 
15838 		/* Get the negotiated link width and speed from PCI config
15839 		 * space
15840 		 */
15841 		pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15842 					  &link_status);
15843 
15844 		i40e_set_pci_config_data(hw, link_status);
15845 
15846 		switch (hw->bus.speed) {
15847 		case i40e_bus_speed_8000:
15848 			strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15849 		case i40e_bus_speed_5000:
15850 			strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15851 		case i40e_bus_speed_2500:
15852 			strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15853 		default:
15854 			break;
15855 		}
15856 		switch (hw->bus.width) {
15857 		case i40e_bus_width_pcie_x8:
15858 			strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15859 		case i40e_bus_width_pcie_x4:
15860 			strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15861 		case i40e_bus_width_pcie_x2:
15862 			strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15863 		case i40e_bus_width_pcie_x1:
15864 			strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15865 		default:
15866 			break;
15867 		}
15868 
15869 		dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15870 			 speed, width);
15871 
15872 		if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15873 		    hw->bus.speed < i40e_bus_speed_8000) {
15874 			dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15875 			dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15876 		}
15877 	}
15878 
15879 	/* get the requested speeds from the fw */
15880 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15881 	if (err)
15882 		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
15883 			i40e_stat_str(&pf->hw, err),
15884 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15885 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15886 
15887 	/* set the FEC config due to the board capabilities */
15888 	i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15889 
15890 	/* get the supported phy types from the fw */
15891 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15892 	if (err)
15893 		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
15894 			i40e_stat_str(&pf->hw, err),
15895 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15896 
15897 	/* make sure the MFS hasn't been set lower than the default */
15898 #define MAX_FRAME_SIZE_DEFAULT 0x2600
15899 	val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15900 	       I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15901 	if (val < MAX_FRAME_SIZE_DEFAULT)
15902 		dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15903 			 i, val);
15904 
15905 	/* Add a filter to drop all Flow control frames from any VSI from being
15906 	 * transmitted. By doing so we stop a malicious VF from sending out
15907 	 * PAUSE or PFC frames and potentially controlling traffic for other
15908 	 * PF/VF VSIs.
15909 	 * The FW can still send Flow control frames if enabled.
15910 	 */
15911 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15912 						       pf->main_vsi_seid);
15913 
15914 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15915 		(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15916 		pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15917 	if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15918 		pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15919 	/* print a string summarizing features */
15920 	i40e_print_features(pf);
15921 
15922 	return 0;
15923 
15924 	/* Unwind what we've done if something failed in the setup */
15925 err_vsis:
15926 	set_bit(__I40E_DOWN, pf->state);
15927 	i40e_clear_interrupt_scheme(pf);
15928 	kfree(pf->vsi);
15929 err_switch_setup:
15930 	i40e_reset_interrupt_capability(pf);
15931 	del_timer_sync(&pf->service_timer);
15932 err_mac_addr:
15933 err_configure_lan_hmc:
15934 	(void)i40e_shutdown_lan_hmc(hw);
15935 err_init_lan_hmc:
15936 	kfree(pf->qp_pile);
15937 err_sw_init:
15938 err_adminq_setup:
15939 err_pf_reset:
15940 	iounmap(hw->hw_addr);
15941 err_ioremap:
15942 	kfree(pf);
15943 err_pf_alloc:
15944 	pci_disable_pcie_error_reporting(pdev);
15945 	pci_release_mem_regions(pdev);
15946 err_pci_reg:
15947 err_dma:
15948 	pci_disable_device(pdev);
15949 	return err;
15950 }
15951 
15952 /**
15953  * i40e_remove - Device removal routine
15954  * @pdev: PCI device information struct
15955  *
15956  * i40e_remove is called by the PCI subsystem to alert the driver
15957  * that is should release a PCI device.  This could be caused by a
15958  * Hot-Plug event, or because the driver is going to be removed from
15959  * memory.
15960  **/
15961 static void i40e_remove(struct pci_dev *pdev)
15962 {
15963 	struct i40e_pf *pf = pci_get_drvdata(pdev);
15964 	struct i40e_hw *hw = &pf->hw;
15965 	i40e_status ret_code;
15966 	int i;
15967 
15968 	i40e_dbg_pf_exit(pf);
15969 
15970 	i40e_ptp_stop(pf);
15971 
15972 	/* Disable RSS in hw */
15973 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15974 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15975 
15976 	/* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
15977 	 * flags, once they are set, i40e_rebuild should not be called as
15978 	 * i40e_prep_for_reset always returns early.
15979 	 */
15980 	while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15981 		usleep_range(1000, 2000);
15982 	set_bit(__I40E_IN_REMOVE, pf->state);
15983 
15984 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15985 		set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15986 		i40e_free_vfs(pf);
15987 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15988 	}
15989 	/* no more scheduling of any task */
15990 	set_bit(__I40E_SUSPENDED, pf->state);
15991 	set_bit(__I40E_DOWN, pf->state);
15992 	if (pf->service_timer.function)
15993 		del_timer_sync(&pf->service_timer);
15994 	if (pf->service_task.func)
15995 		cancel_work_sync(&pf->service_task);
15996 
15997 	if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15998 		struct i40e_vsi *vsi = pf->vsi[0];
15999 
16000 		/* We know that we have allocated only one vsi for this PF,
16001 		 * it was just for registering netdevice, so the interface
16002 		 * could be visible in the 'ifconfig' output
16003 		 */
16004 		unregister_netdev(vsi->netdev);
16005 		free_netdev(vsi->netdev);
16006 
16007 		goto unmap;
16008 	}
16009 
16010 	/* Client close must be called explicitly here because the timer
16011 	 * has been stopped.
16012 	 */
16013 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16014 
16015 	i40e_fdir_teardown(pf);
16016 
16017 	/* If there is a switch structure or any orphans, remove them.
16018 	 * This will leave only the PF's VSI remaining.
16019 	 */
16020 	for (i = 0; i < I40E_MAX_VEB; i++) {
16021 		if (!pf->veb[i])
16022 			continue;
16023 
16024 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16025 		    pf->veb[i]->uplink_seid == 0)
16026 			i40e_switch_branch_release(pf->veb[i]);
16027 	}
16028 
16029 	/* Now we can shutdown the PF's VSI, just before we kill
16030 	 * adminq and hmc.
16031 	 */
16032 	if (pf->vsi[pf->lan_vsi])
16033 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16034 
16035 	i40e_cloud_filter_exit(pf);
16036 
16037 	/* remove attached clients */
16038 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16039 		ret_code = i40e_lan_del_device(pf);
16040 		if (ret_code)
16041 			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16042 				 ret_code);
16043 	}
16044 
16045 	/* shutdown and destroy the HMC */
16046 	if (hw->hmc.hmc_obj) {
16047 		ret_code = i40e_shutdown_lan_hmc(hw);
16048 		if (ret_code)
16049 			dev_warn(&pdev->dev,
16050 				 "Failed to destroy the HMC resources: %d\n",
16051 				 ret_code);
16052 	}
16053 
16054 unmap:
16055 	/* Free MSI/legacy interrupt 0 when in recovery mode. */
16056 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16057 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16058 		free_irq(pf->pdev->irq, pf);
16059 
16060 	/* shutdown the adminq */
16061 	i40e_shutdown_adminq(hw);
16062 
16063 	/* destroy the locks only once, here */
16064 	mutex_destroy(&hw->aq.arq_mutex);
16065 	mutex_destroy(&hw->aq.asq_mutex);
16066 
16067 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16068 	rtnl_lock();
16069 	i40e_clear_interrupt_scheme(pf);
16070 	for (i = 0; i < pf->num_alloc_vsi; i++) {
16071 		if (pf->vsi[i]) {
16072 			if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16073 				i40e_vsi_clear_rings(pf->vsi[i]);
16074 			i40e_vsi_clear(pf->vsi[i]);
16075 			pf->vsi[i] = NULL;
16076 		}
16077 	}
16078 	rtnl_unlock();
16079 
16080 	for (i = 0; i < I40E_MAX_VEB; i++) {
16081 		kfree(pf->veb[i]);
16082 		pf->veb[i] = NULL;
16083 	}
16084 
16085 	kfree(pf->qp_pile);
16086 	kfree(pf->vsi);
16087 
16088 	iounmap(hw->hw_addr);
16089 	kfree(pf);
16090 	pci_release_mem_regions(pdev);
16091 
16092 	pci_disable_pcie_error_reporting(pdev);
16093 	pci_disable_device(pdev);
16094 }
16095 
16096 /**
16097  * i40e_pci_error_detected - warning that something funky happened in PCI land
16098  * @pdev: PCI device information struct
16099  * @error: the type of PCI error
16100  *
16101  * Called to warn that something happened and the error handling steps
16102  * are in progress.  Allows the driver to quiesce things, be ready for
16103  * remediation.
16104  **/
16105 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16106 						pci_channel_state_t error)
16107 {
16108 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16109 
16110 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16111 
16112 	if (!pf) {
16113 		dev_info(&pdev->dev,
16114 			 "Cannot recover - error happened during device probe\n");
16115 		return PCI_ERS_RESULT_DISCONNECT;
16116 	}
16117 
16118 	/* shutdown all operations */
16119 	if (!test_bit(__I40E_SUSPENDED, pf->state))
16120 		i40e_prep_for_reset(pf);
16121 
16122 	/* Request a slot reset */
16123 	return PCI_ERS_RESULT_NEED_RESET;
16124 }
16125 
16126 /**
16127  * i40e_pci_error_slot_reset - a PCI slot reset just happened
16128  * @pdev: PCI device information struct
16129  *
16130  * Called to find if the driver can work with the device now that
16131  * the pci slot has been reset.  If a basic connection seems good
16132  * (registers are readable and have sane content) then return a
16133  * happy little PCI_ERS_RESULT_xxx.
16134  **/
16135 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16136 {
16137 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16138 	pci_ers_result_t result;
16139 	u32 reg;
16140 
16141 	dev_dbg(&pdev->dev, "%s\n", __func__);
16142 	if (pci_enable_device_mem(pdev)) {
16143 		dev_info(&pdev->dev,
16144 			 "Cannot re-enable PCI device after reset.\n");
16145 		result = PCI_ERS_RESULT_DISCONNECT;
16146 	} else {
16147 		pci_set_master(pdev);
16148 		pci_restore_state(pdev);
16149 		pci_save_state(pdev);
16150 		pci_wake_from_d3(pdev, false);
16151 
16152 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16153 		if (reg == 0)
16154 			result = PCI_ERS_RESULT_RECOVERED;
16155 		else
16156 			result = PCI_ERS_RESULT_DISCONNECT;
16157 	}
16158 
16159 	return result;
16160 }
16161 
16162 /**
16163  * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16164  * @pdev: PCI device information struct
16165  */
16166 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16167 {
16168 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16169 
16170 	i40e_prep_for_reset(pf);
16171 }
16172 
16173 /**
16174  * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16175  * @pdev: PCI device information struct
16176  */
16177 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16178 {
16179 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16180 
16181 	if (test_bit(__I40E_IN_REMOVE, pf->state))
16182 		return;
16183 
16184 	i40e_reset_and_rebuild(pf, false, false);
16185 }
16186 
16187 /**
16188  * i40e_pci_error_resume - restart operations after PCI error recovery
16189  * @pdev: PCI device information struct
16190  *
16191  * Called to allow the driver to bring things back up after PCI error
16192  * and/or reset recovery has finished.
16193  **/
16194 static void i40e_pci_error_resume(struct pci_dev *pdev)
16195 {
16196 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16197 
16198 	dev_dbg(&pdev->dev, "%s\n", __func__);
16199 	if (test_bit(__I40E_SUSPENDED, pf->state))
16200 		return;
16201 
16202 	i40e_handle_reset_warning(pf, false);
16203 }
16204 
16205 /**
16206  * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16207  * using the mac_address_write admin q function
16208  * @pf: pointer to i40e_pf struct
16209  **/
16210 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16211 {
16212 	struct i40e_hw *hw = &pf->hw;
16213 	i40e_status ret;
16214 	u8 mac_addr[6];
16215 	u16 flags = 0;
16216 
16217 	/* Get current MAC address in case it's an LAA */
16218 	if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16219 		ether_addr_copy(mac_addr,
16220 				pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16221 	} else {
16222 		dev_err(&pf->pdev->dev,
16223 			"Failed to retrieve MAC address; using default\n");
16224 		ether_addr_copy(mac_addr, hw->mac.addr);
16225 	}
16226 
16227 	/* The FW expects the mac address write cmd to first be called with
16228 	 * one of these flags before calling it again with the multicast
16229 	 * enable flags.
16230 	 */
16231 	flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16232 
16233 	if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16234 		flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16235 
16236 	ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16237 	if (ret) {
16238 		dev_err(&pf->pdev->dev,
16239 			"Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16240 		return;
16241 	}
16242 
16243 	flags = I40E_AQC_MC_MAG_EN
16244 			| I40E_AQC_WOL_PRESERVE_ON_PFR
16245 			| I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16246 	ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16247 	if (ret)
16248 		dev_err(&pf->pdev->dev,
16249 			"Failed to enable Multicast Magic Packet wake up\n");
16250 }
16251 
16252 /**
16253  * i40e_shutdown - PCI callback for shutting down
16254  * @pdev: PCI device information struct
16255  **/
16256 static void i40e_shutdown(struct pci_dev *pdev)
16257 {
16258 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16259 	struct i40e_hw *hw = &pf->hw;
16260 
16261 	set_bit(__I40E_SUSPENDED, pf->state);
16262 	set_bit(__I40E_DOWN, pf->state);
16263 
16264 	del_timer_sync(&pf->service_timer);
16265 	cancel_work_sync(&pf->service_task);
16266 	i40e_cloud_filter_exit(pf);
16267 	i40e_fdir_teardown(pf);
16268 
16269 	/* Client close must be called explicitly here because the timer
16270 	 * has been stopped.
16271 	 */
16272 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16273 
16274 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16275 		i40e_enable_mc_magic_wake(pf);
16276 
16277 	i40e_prep_for_reset(pf);
16278 
16279 	wr32(hw, I40E_PFPM_APM,
16280 	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16281 	wr32(hw, I40E_PFPM_WUFC,
16282 	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16283 
16284 	/* Free MSI/legacy interrupt 0 when in recovery mode. */
16285 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16286 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16287 		free_irq(pf->pdev->irq, pf);
16288 
16289 	/* Since we're going to destroy queues during the
16290 	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16291 	 * whole section
16292 	 */
16293 	rtnl_lock();
16294 	i40e_clear_interrupt_scheme(pf);
16295 	rtnl_unlock();
16296 
16297 	if (system_state == SYSTEM_POWER_OFF) {
16298 		pci_wake_from_d3(pdev, pf->wol_en);
16299 		pci_set_power_state(pdev, PCI_D3hot);
16300 	}
16301 }
16302 
16303 /**
16304  * i40e_suspend - PM callback for moving to D3
16305  * @dev: generic device information structure
16306  **/
16307 static int __maybe_unused i40e_suspend(struct device *dev)
16308 {
16309 	struct i40e_pf *pf = dev_get_drvdata(dev);
16310 	struct i40e_hw *hw = &pf->hw;
16311 
16312 	/* If we're already suspended, then there is nothing to do */
16313 	if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16314 		return 0;
16315 
16316 	set_bit(__I40E_DOWN, pf->state);
16317 
16318 	/* Ensure service task will not be running */
16319 	del_timer_sync(&pf->service_timer);
16320 	cancel_work_sync(&pf->service_task);
16321 
16322 	/* Client close must be called explicitly here because the timer
16323 	 * has been stopped.
16324 	 */
16325 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16326 
16327 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16328 		i40e_enable_mc_magic_wake(pf);
16329 
16330 	/* Since we're going to destroy queues during the
16331 	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16332 	 * whole section
16333 	 */
16334 	rtnl_lock();
16335 
16336 	i40e_prep_for_reset(pf);
16337 
16338 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16339 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16340 
16341 	/* Clear the interrupt scheme and release our IRQs so that the system
16342 	 * can safely hibernate even when there are a large number of CPUs.
16343 	 * Otherwise hibernation might fail when mapping all the vectors back
16344 	 * to CPU0.
16345 	 */
16346 	i40e_clear_interrupt_scheme(pf);
16347 
16348 	rtnl_unlock();
16349 
16350 	return 0;
16351 }
16352 
16353 /**
16354  * i40e_resume - PM callback for waking up from D3
16355  * @dev: generic device information structure
16356  **/
16357 static int __maybe_unused i40e_resume(struct device *dev)
16358 {
16359 	struct i40e_pf *pf = dev_get_drvdata(dev);
16360 	int err;
16361 
16362 	/* If we're not suspended, then there is nothing to do */
16363 	if (!test_bit(__I40E_SUSPENDED, pf->state))
16364 		return 0;
16365 
16366 	/* We need to hold the RTNL lock prior to restoring interrupt schemes,
16367 	 * since we're going to be restoring queues
16368 	 */
16369 	rtnl_lock();
16370 
16371 	/* We cleared the interrupt scheme when we suspended, so we need to
16372 	 * restore it now to resume device functionality.
16373 	 */
16374 	err = i40e_restore_interrupt_scheme(pf);
16375 	if (err) {
16376 		dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16377 			err);
16378 	}
16379 
16380 	clear_bit(__I40E_DOWN, pf->state);
16381 	i40e_reset_and_rebuild(pf, false, true);
16382 
16383 	rtnl_unlock();
16384 
16385 	/* Clear suspended state last after everything is recovered */
16386 	clear_bit(__I40E_SUSPENDED, pf->state);
16387 
16388 	/* Restart the service task */
16389 	mod_timer(&pf->service_timer,
16390 		  round_jiffies(jiffies + pf->service_timer_period));
16391 
16392 	return 0;
16393 }
16394 
16395 static const struct pci_error_handlers i40e_err_handler = {
16396 	.error_detected = i40e_pci_error_detected,
16397 	.slot_reset = i40e_pci_error_slot_reset,
16398 	.reset_prepare = i40e_pci_error_reset_prepare,
16399 	.reset_done = i40e_pci_error_reset_done,
16400 	.resume = i40e_pci_error_resume,
16401 };
16402 
16403 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16404 
16405 static struct pci_driver i40e_driver = {
16406 	.name     = i40e_driver_name,
16407 	.id_table = i40e_pci_tbl,
16408 	.probe    = i40e_probe,
16409 	.remove   = i40e_remove,
16410 	.driver   = {
16411 		.pm = &i40e_pm_ops,
16412 	},
16413 	.shutdown = i40e_shutdown,
16414 	.err_handler = &i40e_err_handler,
16415 	.sriov_configure = i40e_pci_sriov_configure,
16416 };
16417 
16418 /**
16419  * i40e_init_module - Driver registration routine
16420  *
16421  * i40e_init_module is the first routine called when the driver is
16422  * loaded. All it does is register with the PCI subsystem.
16423  **/
16424 static int __init i40e_init_module(void)
16425 {
16426 	pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16427 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16428 
16429 	/* There is no need to throttle the number of active tasks because
16430 	 * each device limits its own task using a state bit for scheduling
16431 	 * the service task, and the device tasks do not interfere with each
16432 	 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16433 	 * since we need to be able to guarantee forward progress even under
16434 	 * memory pressure.
16435 	 */
16436 	i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16437 	if (!i40e_wq) {
16438 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16439 		return -ENOMEM;
16440 	}
16441 
16442 	i40e_dbg_init();
16443 	return pci_register_driver(&i40e_driver);
16444 }
16445 module_init(i40e_init_module);
16446 
16447 /**
16448  * i40e_exit_module - Driver exit cleanup routine
16449  *
16450  * i40e_exit_module is called just before the driver is removed
16451  * from memory.
16452  **/
16453 static void __exit i40e_exit_module(void)
16454 {
16455 	pci_unregister_driver(&i40e_driver);
16456 	destroy_workqueue(i40e_wq);
16457 	ida_destroy(&i40e_client_ida);
16458 	i40e_dbg_exit();
16459 }
16460 module_exit(i40e_exit_module);
16461