1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
6 #include <linux/pci.h>
7 #include <linux/bpf.h>
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 
11 /* Local includes */
12 #include "i40e.h"
13 #include "i40e_diag.h"
14 #include "i40e_xsk.h"
15 #include <net/udp_tunnel.h>
16 #include <net/xdp_sock_drv.h>
17 /* All i40e tracepoints are defined by the include below, which
18  * must be included exactly once across the whole kernel with
19  * CREATE_TRACE_POINTS defined
20  */
21 #define CREATE_TRACE_POINTS
22 #include "i40e_trace.h"
23 
24 const char i40e_driver_name[] = "i40e";
25 static const char i40e_driver_string[] =
26 			"Intel(R) Ethernet Connection XL710 Network Driver";
27 
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
29 
30 /* a bit of forward declarations */
31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33 static int i40e_add_vsi(struct i40e_vsi *vsi);
34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
36 static int i40e_setup_misc_vector(struct i40e_pf *pf);
37 static void i40e_determine_queue_usage(struct i40e_pf *pf);
38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39 static void i40e_prep_for_reset(struct i40e_pf *pf);
40 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
41 				   bool lock_acquired);
42 static int i40e_reset(struct i40e_pf *pf);
43 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50 static int i40e_get_capabilities(struct i40e_pf *pf,
51 				 enum i40e_admin_queue_opc list_type);
52 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
53 
54 /* i40e_pci_tbl - PCI Device ID Table
55  *
56  * Last entry must be all 0s
57  *
58  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59  *   Class, Class Mask, private data (not used) }
60  */
61 static const struct pci_device_id i40e_pci_tbl[] = {
62 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
70 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
71 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
72 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
78 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
79 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
80 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
81 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
82 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
83 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
84 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
85 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
86 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
91 
92 #define I40E_MAX_VF_COUNT 128
93 static int debug = -1;
94 module_param(debug, uint, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
96 
97 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
98 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
99 MODULE_LICENSE("GPL v2");
100 
101 static struct workqueue_struct *i40e_wq;
102 
103 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
104 				  struct net_device *netdev, int delta)
105 {
106 	struct netdev_hw_addr *ha;
107 
108 	if (!f || !netdev)
109 		return;
110 
111 	netdev_for_each_mc_addr(ha, netdev) {
112 		if (ether_addr_equal(ha->addr, f->macaddr)) {
113 			ha->refcount += delta;
114 			if (ha->refcount <= 0)
115 				ha->refcount = 1;
116 			break;
117 		}
118 	}
119 }
120 
121 /**
122  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
123  * @hw:   pointer to the HW structure
124  * @mem:  ptr to mem struct to fill out
125  * @size: size of memory requested
126  * @alignment: what to align the allocation to
127  **/
128 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
129 			    u64 size, u32 alignment)
130 {
131 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
132 
133 	mem->size = ALIGN(size, alignment);
134 	mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
135 				     GFP_KERNEL);
136 	if (!mem->va)
137 		return -ENOMEM;
138 
139 	return 0;
140 }
141 
142 /**
143  * i40e_free_dma_mem_d - OS specific memory free for shared code
144  * @hw:   pointer to the HW structure
145  * @mem:  ptr to mem struct to free
146  **/
147 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
148 {
149 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
150 
151 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
152 	mem->va = NULL;
153 	mem->pa = 0;
154 	mem->size = 0;
155 
156 	return 0;
157 }
158 
159 /**
160  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
161  * @hw:   pointer to the HW structure
162  * @mem:  ptr to mem struct to fill out
163  * @size: size of memory requested
164  **/
165 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
166 			     u32 size)
167 {
168 	mem->size = size;
169 	mem->va = kzalloc(size, GFP_KERNEL);
170 
171 	if (!mem->va)
172 		return -ENOMEM;
173 
174 	return 0;
175 }
176 
177 /**
178  * i40e_free_virt_mem_d - OS specific memory free for shared code
179  * @hw:   pointer to the HW structure
180  * @mem:  ptr to mem struct to free
181  **/
182 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
183 {
184 	/* it's ok to kfree a NULL pointer */
185 	kfree(mem->va);
186 	mem->va = NULL;
187 	mem->size = 0;
188 
189 	return 0;
190 }
191 
192 /**
193  * i40e_get_lump - find a lump of free generic resource
194  * @pf: board private structure
195  * @pile: the pile of resource to search
196  * @needed: the number of items needed
197  * @id: an owner id to stick on the items assigned
198  *
199  * Returns the base item index of the lump, or negative for error
200  **/
201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
202 			 u16 needed, u16 id)
203 {
204 	int ret = -ENOMEM;
205 	int i, j;
206 
207 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 		dev_info(&pf->pdev->dev,
209 			 "param err: pile=%s needed=%d id=0x%04x\n",
210 			 pile ? "<valid>" : "<null>", needed, id);
211 		return -EINVAL;
212 	}
213 
214 	/* Allocate last queue in the pile for FDIR VSI queue
215 	 * so it doesn't fragment the qp_pile
216 	 */
217 	if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
218 		if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
219 			dev_err(&pf->pdev->dev,
220 				"Cannot allocate queue %d for I40E_VSI_FDIR\n",
221 				pile->num_entries - 1);
222 			return -ENOMEM;
223 		}
224 		pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
225 		return pile->num_entries - 1;
226 	}
227 
228 	i = 0;
229 	while (i < pile->num_entries) {
230 		/* skip already allocated entries */
231 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
232 			i++;
233 			continue;
234 		}
235 
236 		/* do we have enough in this lump? */
237 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
238 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
239 				break;
240 		}
241 
242 		if (j == needed) {
243 			/* there was enough, so assign it to the requestor */
244 			for (j = 0; j < needed; j++)
245 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
246 			ret = i;
247 			break;
248 		}
249 
250 		/* not enough, so skip over it and continue looking */
251 		i += j;
252 	}
253 
254 	return ret;
255 }
256 
257 /**
258  * i40e_put_lump - return a lump of generic resource
259  * @pile: the pile of resource to search
260  * @index: the base item index
261  * @id: the owner id of the items assigned
262  *
263  * Returns the count of items in the lump
264  **/
265 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
266 {
267 	int valid_id = (id | I40E_PILE_VALID_BIT);
268 	int count = 0;
269 	u16 i;
270 
271 	if (!pile || index >= pile->num_entries)
272 		return -EINVAL;
273 
274 	for (i = index;
275 	     i < pile->num_entries && pile->list[i] == valid_id;
276 	     i++) {
277 		pile->list[i] = 0;
278 		count++;
279 	}
280 
281 
282 	return count;
283 }
284 
285 /**
286  * i40e_find_vsi_from_id - searches for the vsi with the given id
287  * @pf: the pf structure to search for the vsi
288  * @id: id of the vsi it is searching for
289  **/
290 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
291 {
292 	int i;
293 
294 	for (i = 0; i < pf->num_alloc_vsi; i++)
295 		if (pf->vsi[i] && (pf->vsi[i]->id == id))
296 			return pf->vsi[i];
297 
298 	return NULL;
299 }
300 
301 /**
302  * i40e_service_event_schedule - Schedule the service task to wake up
303  * @pf: board private structure
304  *
305  * If not already scheduled, this puts the task into the work queue
306  **/
307 void i40e_service_event_schedule(struct i40e_pf *pf)
308 {
309 	if ((!test_bit(__I40E_DOWN, pf->state) &&
310 	     !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
311 	      test_bit(__I40E_RECOVERY_MODE, pf->state))
312 		queue_work(i40e_wq, &pf->service_task);
313 }
314 
315 /**
316  * i40e_tx_timeout - Respond to a Tx Hang
317  * @netdev: network interface device structure
318  * @txqueue: queue number timing out
319  *
320  * If any port has noticed a Tx timeout, it is likely that the whole
321  * device is munged, not just the one netdev port, so go for the full
322  * reset.
323  **/
324 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
325 {
326 	struct i40e_netdev_priv *np = netdev_priv(netdev);
327 	struct i40e_vsi *vsi = np->vsi;
328 	struct i40e_pf *pf = vsi->back;
329 	struct i40e_ring *tx_ring = NULL;
330 	unsigned int i;
331 	u32 head, val;
332 
333 	pf->tx_timeout_count++;
334 
335 	/* with txqueue index, find the tx_ring struct */
336 	for (i = 0; i < vsi->num_queue_pairs; i++) {
337 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 			if (txqueue ==
339 			    vsi->tx_rings[i]->queue_index) {
340 				tx_ring = vsi->tx_rings[i];
341 				break;
342 			}
343 		}
344 	}
345 
346 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
347 		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
348 	else if (time_before(jiffies,
349 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
350 		return;   /* don't do any new action before the next timeout */
351 
352 	/* don't kick off another recovery if one is already pending */
353 	if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
354 		return;
355 
356 	if (tx_ring) {
357 		head = i40e_get_head(tx_ring);
358 		/* Read interrupt register */
359 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
360 			val = rd32(&pf->hw,
361 			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
362 						tx_ring->vsi->base_vector - 1));
363 		else
364 			val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
365 
366 		netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
367 			    vsi->seid, txqueue, tx_ring->next_to_clean,
368 			    head, tx_ring->next_to_use,
369 			    readl(tx_ring->tail), val);
370 	}
371 
372 	pf->tx_timeout_last_recovery = jiffies;
373 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
374 		    pf->tx_timeout_recovery_level, txqueue);
375 
376 	switch (pf->tx_timeout_recovery_level) {
377 	case 1:
378 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
379 		break;
380 	case 2:
381 		set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
382 		break;
383 	case 3:
384 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
385 		break;
386 	default:
387 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
388 		break;
389 	}
390 
391 	i40e_service_event_schedule(pf);
392 	pf->tx_timeout_recovery_level++;
393 }
394 
395 /**
396  * i40e_get_vsi_stats_struct - Get System Network Statistics
397  * @vsi: the VSI we care about
398  *
399  * Returns the address of the device statistics structure.
400  * The statistics are actually updated from the service task.
401  **/
402 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
403 {
404 	return &vsi->net_stats;
405 }
406 
407 /**
408  * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
409  * @ring: Tx ring to get statistics from
410  * @stats: statistics entry to be updated
411  **/
412 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
413 					    struct rtnl_link_stats64 *stats)
414 {
415 	u64 bytes, packets;
416 	unsigned int start;
417 
418 	do {
419 		start = u64_stats_fetch_begin_irq(&ring->syncp);
420 		packets = ring->stats.packets;
421 		bytes   = ring->stats.bytes;
422 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
423 
424 	stats->tx_packets += packets;
425 	stats->tx_bytes   += bytes;
426 }
427 
428 /**
429  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
430  * @netdev: network interface device structure
431  * @stats: data structure to store statistics
432  *
433  * Returns the address of the device statistics structure.
434  * The statistics are actually updated from the service task.
435  **/
436 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
437 				  struct rtnl_link_stats64 *stats)
438 {
439 	struct i40e_netdev_priv *np = netdev_priv(netdev);
440 	struct i40e_vsi *vsi = np->vsi;
441 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
442 	struct i40e_ring *ring;
443 	int i;
444 
445 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
446 		return;
447 
448 	if (!vsi->tx_rings)
449 		return;
450 
451 	rcu_read_lock();
452 	for (i = 0; i < vsi->num_queue_pairs; i++) {
453 		u64 bytes, packets;
454 		unsigned int start;
455 
456 		ring = READ_ONCE(vsi->tx_rings[i]);
457 		if (!ring)
458 			continue;
459 		i40e_get_netdev_stats_struct_tx(ring, stats);
460 
461 		if (i40e_enabled_xdp_vsi(vsi)) {
462 			ring = READ_ONCE(vsi->xdp_rings[i]);
463 			if (!ring)
464 				continue;
465 			i40e_get_netdev_stats_struct_tx(ring, stats);
466 		}
467 
468 		ring = READ_ONCE(vsi->rx_rings[i]);
469 		if (!ring)
470 			continue;
471 		do {
472 			start   = u64_stats_fetch_begin_irq(&ring->syncp);
473 			packets = ring->stats.packets;
474 			bytes   = ring->stats.bytes;
475 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
476 
477 		stats->rx_packets += packets;
478 		stats->rx_bytes   += bytes;
479 
480 	}
481 	rcu_read_unlock();
482 
483 	/* following stats updated by i40e_watchdog_subtask() */
484 	stats->multicast	= vsi_stats->multicast;
485 	stats->tx_errors	= vsi_stats->tx_errors;
486 	stats->tx_dropped	= vsi_stats->tx_dropped;
487 	stats->rx_errors	= vsi_stats->rx_errors;
488 	stats->rx_dropped	= vsi_stats->rx_dropped;
489 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
490 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
491 }
492 
493 /**
494  * i40e_vsi_reset_stats - Resets all stats of the given vsi
495  * @vsi: the VSI to have its stats reset
496  **/
497 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
498 {
499 	struct rtnl_link_stats64 *ns;
500 	int i;
501 
502 	if (!vsi)
503 		return;
504 
505 	ns = i40e_get_vsi_stats_struct(vsi);
506 	memset(ns, 0, sizeof(*ns));
507 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
508 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
509 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
510 	if (vsi->rx_rings && vsi->rx_rings[0]) {
511 		for (i = 0; i < vsi->num_queue_pairs; i++) {
512 			memset(&vsi->rx_rings[i]->stats, 0,
513 			       sizeof(vsi->rx_rings[i]->stats));
514 			memset(&vsi->rx_rings[i]->rx_stats, 0,
515 			       sizeof(vsi->rx_rings[i]->rx_stats));
516 			memset(&vsi->tx_rings[i]->stats, 0,
517 			       sizeof(vsi->tx_rings[i]->stats));
518 			memset(&vsi->tx_rings[i]->tx_stats, 0,
519 			       sizeof(vsi->tx_rings[i]->tx_stats));
520 		}
521 	}
522 	vsi->stat_offsets_loaded = false;
523 }
524 
525 /**
526  * i40e_pf_reset_stats - Reset all of the stats for the given PF
527  * @pf: the PF to be reset
528  **/
529 void i40e_pf_reset_stats(struct i40e_pf *pf)
530 {
531 	int i;
532 
533 	memset(&pf->stats, 0, sizeof(pf->stats));
534 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
535 	pf->stat_offsets_loaded = false;
536 
537 	for (i = 0; i < I40E_MAX_VEB; i++) {
538 		if (pf->veb[i]) {
539 			memset(&pf->veb[i]->stats, 0,
540 			       sizeof(pf->veb[i]->stats));
541 			memset(&pf->veb[i]->stats_offsets, 0,
542 			       sizeof(pf->veb[i]->stats_offsets));
543 			memset(&pf->veb[i]->tc_stats, 0,
544 			       sizeof(pf->veb[i]->tc_stats));
545 			memset(&pf->veb[i]->tc_stats_offsets, 0,
546 			       sizeof(pf->veb[i]->tc_stats_offsets));
547 			pf->veb[i]->stat_offsets_loaded = false;
548 		}
549 	}
550 	pf->hw_csum_rx_error = 0;
551 }
552 
553 /**
554  * i40e_stat_update48 - read and update a 48 bit stat from the chip
555  * @hw: ptr to the hardware info
556  * @hireg: the high 32 bit reg to read
557  * @loreg: the low 32 bit reg to read
558  * @offset_loaded: has the initial offset been loaded yet
559  * @offset: ptr to current offset value
560  * @stat: ptr to the stat
561  *
562  * Since the device stats are not reset at PFReset, they likely will not
563  * be zeroed when the driver starts.  We'll save the first values read
564  * and use them as offsets to be subtracted from the raw values in order
565  * to report stats that count from zero.  In the process, we also manage
566  * the potential roll-over.
567  **/
568 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
569 			       bool offset_loaded, u64 *offset, u64 *stat)
570 {
571 	u64 new_data;
572 
573 	if (hw->device_id == I40E_DEV_ID_QEMU) {
574 		new_data = rd32(hw, loreg);
575 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
576 	} else {
577 		new_data = rd64(hw, loreg);
578 	}
579 	if (!offset_loaded)
580 		*offset = new_data;
581 	if (likely(new_data >= *offset))
582 		*stat = new_data - *offset;
583 	else
584 		*stat = (new_data + BIT_ULL(48)) - *offset;
585 	*stat &= 0xFFFFFFFFFFFFULL;
586 }
587 
588 /**
589  * i40e_stat_update32 - read and update a 32 bit stat from the chip
590  * @hw: ptr to the hardware info
591  * @reg: the hw reg to read
592  * @offset_loaded: has the initial offset been loaded yet
593  * @offset: ptr to current offset value
594  * @stat: ptr to the stat
595  **/
596 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
597 			       bool offset_loaded, u64 *offset, u64 *stat)
598 {
599 	u32 new_data;
600 
601 	new_data = rd32(hw, reg);
602 	if (!offset_loaded)
603 		*offset = new_data;
604 	if (likely(new_data >= *offset))
605 		*stat = (u32)(new_data - *offset);
606 	else
607 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
608 }
609 
610 /**
611  * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
612  * @hw: ptr to the hardware info
613  * @reg: the hw reg to read and clear
614  * @stat: ptr to the stat
615  **/
616 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
617 {
618 	u32 new_data = rd32(hw, reg);
619 
620 	wr32(hw, reg, 1); /* must write a nonzero value to clear register */
621 	*stat += new_data;
622 }
623 
624 /**
625  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
626  * @vsi: the VSI to be updated
627  **/
628 void i40e_update_eth_stats(struct i40e_vsi *vsi)
629 {
630 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
631 	struct i40e_pf *pf = vsi->back;
632 	struct i40e_hw *hw = &pf->hw;
633 	struct i40e_eth_stats *oes;
634 	struct i40e_eth_stats *es;     /* device's eth stats */
635 
636 	es = &vsi->eth_stats;
637 	oes = &vsi->eth_stats_offsets;
638 
639 	/* Gather up the stats that the hw collects */
640 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
641 			   vsi->stat_offsets_loaded,
642 			   &oes->tx_errors, &es->tx_errors);
643 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
644 			   vsi->stat_offsets_loaded,
645 			   &oes->rx_discards, &es->rx_discards);
646 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
647 			   vsi->stat_offsets_loaded,
648 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
649 
650 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
651 			   I40E_GLV_GORCL(stat_idx),
652 			   vsi->stat_offsets_loaded,
653 			   &oes->rx_bytes, &es->rx_bytes);
654 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
655 			   I40E_GLV_UPRCL(stat_idx),
656 			   vsi->stat_offsets_loaded,
657 			   &oes->rx_unicast, &es->rx_unicast);
658 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
659 			   I40E_GLV_MPRCL(stat_idx),
660 			   vsi->stat_offsets_loaded,
661 			   &oes->rx_multicast, &es->rx_multicast);
662 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
663 			   I40E_GLV_BPRCL(stat_idx),
664 			   vsi->stat_offsets_loaded,
665 			   &oes->rx_broadcast, &es->rx_broadcast);
666 
667 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
668 			   I40E_GLV_GOTCL(stat_idx),
669 			   vsi->stat_offsets_loaded,
670 			   &oes->tx_bytes, &es->tx_bytes);
671 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
672 			   I40E_GLV_UPTCL(stat_idx),
673 			   vsi->stat_offsets_loaded,
674 			   &oes->tx_unicast, &es->tx_unicast);
675 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
676 			   I40E_GLV_MPTCL(stat_idx),
677 			   vsi->stat_offsets_loaded,
678 			   &oes->tx_multicast, &es->tx_multicast);
679 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
680 			   I40E_GLV_BPTCL(stat_idx),
681 			   vsi->stat_offsets_loaded,
682 			   &oes->tx_broadcast, &es->tx_broadcast);
683 	vsi->stat_offsets_loaded = true;
684 }
685 
686 /**
687  * i40e_update_veb_stats - Update Switch component statistics
688  * @veb: the VEB being updated
689  **/
690 void i40e_update_veb_stats(struct i40e_veb *veb)
691 {
692 	struct i40e_pf *pf = veb->pf;
693 	struct i40e_hw *hw = &pf->hw;
694 	struct i40e_eth_stats *oes;
695 	struct i40e_eth_stats *es;     /* device's eth stats */
696 	struct i40e_veb_tc_stats *veb_oes;
697 	struct i40e_veb_tc_stats *veb_es;
698 	int i, idx = 0;
699 
700 	idx = veb->stats_idx;
701 	es = &veb->stats;
702 	oes = &veb->stats_offsets;
703 	veb_es = &veb->tc_stats;
704 	veb_oes = &veb->tc_stats_offsets;
705 
706 	/* Gather up the stats that the hw collects */
707 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
708 			   veb->stat_offsets_loaded,
709 			   &oes->tx_discards, &es->tx_discards);
710 	if (hw->revision_id > 0)
711 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
712 				   veb->stat_offsets_loaded,
713 				   &oes->rx_unknown_protocol,
714 				   &es->rx_unknown_protocol);
715 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
716 			   veb->stat_offsets_loaded,
717 			   &oes->rx_bytes, &es->rx_bytes);
718 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
719 			   veb->stat_offsets_loaded,
720 			   &oes->rx_unicast, &es->rx_unicast);
721 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
722 			   veb->stat_offsets_loaded,
723 			   &oes->rx_multicast, &es->rx_multicast);
724 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
725 			   veb->stat_offsets_loaded,
726 			   &oes->rx_broadcast, &es->rx_broadcast);
727 
728 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
729 			   veb->stat_offsets_loaded,
730 			   &oes->tx_bytes, &es->tx_bytes);
731 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
732 			   veb->stat_offsets_loaded,
733 			   &oes->tx_unicast, &es->tx_unicast);
734 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
735 			   veb->stat_offsets_loaded,
736 			   &oes->tx_multicast, &es->tx_multicast);
737 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
738 			   veb->stat_offsets_loaded,
739 			   &oes->tx_broadcast, &es->tx_broadcast);
740 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
741 		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
742 				   I40E_GLVEBTC_RPCL(i, idx),
743 				   veb->stat_offsets_loaded,
744 				   &veb_oes->tc_rx_packets[i],
745 				   &veb_es->tc_rx_packets[i]);
746 		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
747 				   I40E_GLVEBTC_RBCL(i, idx),
748 				   veb->stat_offsets_loaded,
749 				   &veb_oes->tc_rx_bytes[i],
750 				   &veb_es->tc_rx_bytes[i]);
751 		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
752 				   I40E_GLVEBTC_TPCL(i, idx),
753 				   veb->stat_offsets_loaded,
754 				   &veb_oes->tc_tx_packets[i],
755 				   &veb_es->tc_tx_packets[i]);
756 		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
757 				   I40E_GLVEBTC_TBCL(i, idx),
758 				   veb->stat_offsets_loaded,
759 				   &veb_oes->tc_tx_bytes[i],
760 				   &veb_es->tc_tx_bytes[i]);
761 	}
762 	veb->stat_offsets_loaded = true;
763 }
764 
765 /**
766  * i40e_update_vsi_stats - Update the vsi statistics counters.
767  * @vsi: the VSI to be updated
768  *
769  * There are a few instances where we store the same stat in a
770  * couple of different structs.  This is partly because we have
771  * the netdev stats that need to be filled out, which is slightly
772  * different from the "eth_stats" defined by the chip and used in
773  * VF communications.  We sort it out here.
774  **/
775 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
776 {
777 	u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
778 	struct i40e_pf *pf = vsi->back;
779 	struct rtnl_link_stats64 *ons;
780 	struct rtnl_link_stats64 *ns;   /* netdev stats */
781 	struct i40e_eth_stats *oes;
782 	struct i40e_eth_stats *es;     /* device's eth stats */
783 	u64 tx_restart, tx_busy;
784 	struct i40e_ring *p;
785 	u64 bytes, packets;
786 	unsigned int start;
787 	u64 tx_linearize;
788 	u64 tx_force_wb;
789 	u64 tx_stopped;
790 	u64 rx_p, rx_b;
791 	u64 tx_p, tx_b;
792 	u16 q;
793 
794 	if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
795 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
796 		return;
797 
798 	ns = i40e_get_vsi_stats_struct(vsi);
799 	ons = &vsi->net_stats_offsets;
800 	es = &vsi->eth_stats;
801 	oes = &vsi->eth_stats_offsets;
802 
803 	/* Gather up the netdev and vsi stats that the driver collects
804 	 * on the fly during packet processing
805 	 */
806 	rx_b = rx_p = 0;
807 	tx_b = tx_p = 0;
808 	tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
809 	tx_stopped = 0;
810 	rx_page = 0;
811 	rx_buf = 0;
812 	rx_reuse = 0;
813 	rx_alloc = 0;
814 	rx_waive = 0;
815 	rx_busy = 0;
816 	rcu_read_lock();
817 	for (q = 0; q < vsi->num_queue_pairs; q++) {
818 		/* locate Tx ring */
819 		p = READ_ONCE(vsi->tx_rings[q]);
820 		if (!p)
821 			continue;
822 
823 		do {
824 			start = u64_stats_fetch_begin_irq(&p->syncp);
825 			packets = p->stats.packets;
826 			bytes = p->stats.bytes;
827 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
828 		tx_b += bytes;
829 		tx_p += packets;
830 		tx_restart += p->tx_stats.restart_queue;
831 		tx_busy += p->tx_stats.tx_busy;
832 		tx_linearize += p->tx_stats.tx_linearize;
833 		tx_force_wb += p->tx_stats.tx_force_wb;
834 		tx_stopped += p->tx_stats.tx_stopped;
835 
836 		/* locate Rx ring */
837 		p = READ_ONCE(vsi->rx_rings[q]);
838 		if (!p)
839 			continue;
840 
841 		do {
842 			start = u64_stats_fetch_begin_irq(&p->syncp);
843 			packets = p->stats.packets;
844 			bytes = p->stats.bytes;
845 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
846 		rx_b += bytes;
847 		rx_p += packets;
848 		rx_buf += p->rx_stats.alloc_buff_failed;
849 		rx_page += p->rx_stats.alloc_page_failed;
850 		rx_reuse += p->rx_stats.page_reuse_count;
851 		rx_alloc += p->rx_stats.page_alloc_count;
852 		rx_waive += p->rx_stats.page_waive_count;
853 		rx_busy += p->rx_stats.page_busy_count;
854 
855 		if (i40e_enabled_xdp_vsi(vsi)) {
856 			/* locate XDP ring */
857 			p = READ_ONCE(vsi->xdp_rings[q]);
858 			if (!p)
859 				continue;
860 
861 			do {
862 				start = u64_stats_fetch_begin_irq(&p->syncp);
863 				packets = p->stats.packets;
864 				bytes = p->stats.bytes;
865 			} while (u64_stats_fetch_retry_irq(&p->syncp, start));
866 			tx_b += bytes;
867 			tx_p += packets;
868 			tx_restart += p->tx_stats.restart_queue;
869 			tx_busy += p->tx_stats.tx_busy;
870 			tx_linearize += p->tx_stats.tx_linearize;
871 			tx_force_wb += p->tx_stats.tx_force_wb;
872 		}
873 	}
874 	rcu_read_unlock();
875 	vsi->tx_restart = tx_restart;
876 	vsi->tx_busy = tx_busy;
877 	vsi->tx_linearize = tx_linearize;
878 	vsi->tx_force_wb = tx_force_wb;
879 	vsi->tx_stopped = tx_stopped;
880 	vsi->rx_page_failed = rx_page;
881 	vsi->rx_buf_failed = rx_buf;
882 	vsi->rx_page_reuse = rx_reuse;
883 	vsi->rx_page_alloc = rx_alloc;
884 	vsi->rx_page_waive = rx_waive;
885 	vsi->rx_page_busy = rx_busy;
886 
887 	ns->rx_packets = rx_p;
888 	ns->rx_bytes = rx_b;
889 	ns->tx_packets = tx_p;
890 	ns->tx_bytes = tx_b;
891 
892 	/* update netdev stats from eth stats */
893 	i40e_update_eth_stats(vsi);
894 	ons->tx_errors = oes->tx_errors;
895 	ns->tx_errors = es->tx_errors;
896 	ons->multicast = oes->rx_multicast;
897 	ns->multicast = es->rx_multicast;
898 	ons->rx_dropped = oes->rx_discards;
899 	ns->rx_dropped = es->rx_discards;
900 	ons->tx_dropped = oes->tx_discards;
901 	ns->tx_dropped = es->tx_discards;
902 
903 	/* pull in a couple PF stats if this is the main vsi */
904 	if (vsi == pf->vsi[pf->lan_vsi]) {
905 		ns->rx_crc_errors = pf->stats.crc_errors;
906 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
907 		ns->rx_length_errors = pf->stats.rx_length_errors;
908 	}
909 }
910 
911 /**
912  * i40e_update_pf_stats - Update the PF statistics counters.
913  * @pf: the PF to be updated
914  **/
915 static void i40e_update_pf_stats(struct i40e_pf *pf)
916 {
917 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
918 	struct i40e_hw_port_stats *nsd = &pf->stats;
919 	struct i40e_hw *hw = &pf->hw;
920 	u32 val;
921 	int i;
922 
923 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
924 			   I40E_GLPRT_GORCL(hw->port),
925 			   pf->stat_offsets_loaded,
926 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
927 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
928 			   I40E_GLPRT_GOTCL(hw->port),
929 			   pf->stat_offsets_loaded,
930 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
931 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
932 			   pf->stat_offsets_loaded,
933 			   &osd->eth.rx_discards,
934 			   &nsd->eth.rx_discards);
935 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
936 			   I40E_GLPRT_UPRCL(hw->port),
937 			   pf->stat_offsets_loaded,
938 			   &osd->eth.rx_unicast,
939 			   &nsd->eth.rx_unicast);
940 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
941 			   I40E_GLPRT_MPRCL(hw->port),
942 			   pf->stat_offsets_loaded,
943 			   &osd->eth.rx_multicast,
944 			   &nsd->eth.rx_multicast);
945 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
946 			   I40E_GLPRT_BPRCL(hw->port),
947 			   pf->stat_offsets_loaded,
948 			   &osd->eth.rx_broadcast,
949 			   &nsd->eth.rx_broadcast);
950 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
951 			   I40E_GLPRT_UPTCL(hw->port),
952 			   pf->stat_offsets_loaded,
953 			   &osd->eth.tx_unicast,
954 			   &nsd->eth.tx_unicast);
955 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
956 			   I40E_GLPRT_MPTCL(hw->port),
957 			   pf->stat_offsets_loaded,
958 			   &osd->eth.tx_multicast,
959 			   &nsd->eth.tx_multicast);
960 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
961 			   I40E_GLPRT_BPTCL(hw->port),
962 			   pf->stat_offsets_loaded,
963 			   &osd->eth.tx_broadcast,
964 			   &nsd->eth.tx_broadcast);
965 
966 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
967 			   pf->stat_offsets_loaded,
968 			   &osd->tx_dropped_link_down,
969 			   &nsd->tx_dropped_link_down);
970 
971 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
972 			   pf->stat_offsets_loaded,
973 			   &osd->crc_errors, &nsd->crc_errors);
974 
975 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
976 			   pf->stat_offsets_loaded,
977 			   &osd->illegal_bytes, &nsd->illegal_bytes);
978 
979 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
980 			   pf->stat_offsets_loaded,
981 			   &osd->mac_local_faults,
982 			   &nsd->mac_local_faults);
983 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
984 			   pf->stat_offsets_loaded,
985 			   &osd->mac_remote_faults,
986 			   &nsd->mac_remote_faults);
987 
988 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
989 			   pf->stat_offsets_loaded,
990 			   &osd->rx_length_errors,
991 			   &nsd->rx_length_errors);
992 
993 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
994 			   pf->stat_offsets_loaded,
995 			   &osd->link_xon_rx, &nsd->link_xon_rx);
996 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
997 			   pf->stat_offsets_loaded,
998 			   &osd->link_xon_tx, &nsd->link_xon_tx);
999 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1000 			   pf->stat_offsets_loaded,
1001 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
1002 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1003 			   pf->stat_offsets_loaded,
1004 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
1005 
1006 	for (i = 0; i < 8; i++) {
1007 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1008 				   pf->stat_offsets_loaded,
1009 				   &osd->priority_xoff_rx[i],
1010 				   &nsd->priority_xoff_rx[i]);
1011 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1012 				   pf->stat_offsets_loaded,
1013 				   &osd->priority_xon_rx[i],
1014 				   &nsd->priority_xon_rx[i]);
1015 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1016 				   pf->stat_offsets_loaded,
1017 				   &osd->priority_xon_tx[i],
1018 				   &nsd->priority_xon_tx[i]);
1019 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1020 				   pf->stat_offsets_loaded,
1021 				   &osd->priority_xoff_tx[i],
1022 				   &nsd->priority_xoff_tx[i]);
1023 		i40e_stat_update32(hw,
1024 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1025 				   pf->stat_offsets_loaded,
1026 				   &osd->priority_xon_2_xoff[i],
1027 				   &nsd->priority_xon_2_xoff[i]);
1028 	}
1029 
1030 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1031 			   I40E_GLPRT_PRC64L(hw->port),
1032 			   pf->stat_offsets_loaded,
1033 			   &osd->rx_size_64, &nsd->rx_size_64);
1034 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1035 			   I40E_GLPRT_PRC127L(hw->port),
1036 			   pf->stat_offsets_loaded,
1037 			   &osd->rx_size_127, &nsd->rx_size_127);
1038 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1039 			   I40E_GLPRT_PRC255L(hw->port),
1040 			   pf->stat_offsets_loaded,
1041 			   &osd->rx_size_255, &nsd->rx_size_255);
1042 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1043 			   I40E_GLPRT_PRC511L(hw->port),
1044 			   pf->stat_offsets_loaded,
1045 			   &osd->rx_size_511, &nsd->rx_size_511);
1046 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1047 			   I40E_GLPRT_PRC1023L(hw->port),
1048 			   pf->stat_offsets_loaded,
1049 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1050 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1051 			   I40E_GLPRT_PRC1522L(hw->port),
1052 			   pf->stat_offsets_loaded,
1053 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1054 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1055 			   I40E_GLPRT_PRC9522L(hw->port),
1056 			   pf->stat_offsets_loaded,
1057 			   &osd->rx_size_big, &nsd->rx_size_big);
1058 
1059 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1060 			   I40E_GLPRT_PTC64L(hw->port),
1061 			   pf->stat_offsets_loaded,
1062 			   &osd->tx_size_64, &nsd->tx_size_64);
1063 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1064 			   I40E_GLPRT_PTC127L(hw->port),
1065 			   pf->stat_offsets_loaded,
1066 			   &osd->tx_size_127, &nsd->tx_size_127);
1067 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1068 			   I40E_GLPRT_PTC255L(hw->port),
1069 			   pf->stat_offsets_loaded,
1070 			   &osd->tx_size_255, &nsd->tx_size_255);
1071 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1072 			   I40E_GLPRT_PTC511L(hw->port),
1073 			   pf->stat_offsets_loaded,
1074 			   &osd->tx_size_511, &nsd->tx_size_511);
1075 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1076 			   I40E_GLPRT_PTC1023L(hw->port),
1077 			   pf->stat_offsets_loaded,
1078 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1079 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1080 			   I40E_GLPRT_PTC1522L(hw->port),
1081 			   pf->stat_offsets_loaded,
1082 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1083 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1084 			   I40E_GLPRT_PTC9522L(hw->port),
1085 			   pf->stat_offsets_loaded,
1086 			   &osd->tx_size_big, &nsd->tx_size_big);
1087 
1088 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1089 			   pf->stat_offsets_loaded,
1090 			   &osd->rx_undersize, &nsd->rx_undersize);
1091 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1092 			   pf->stat_offsets_loaded,
1093 			   &osd->rx_fragments, &nsd->rx_fragments);
1094 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1095 			   pf->stat_offsets_loaded,
1096 			   &osd->rx_oversize, &nsd->rx_oversize);
1097 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1098 			   pf->stat_offsets_loaded,
1099 			   &osd->rx_jabber, &nsd->rx_jabber);
1100 
1101 	/* FDIR stats */
1102 	i40e_stat_update_and_clear32(hw,
1103 			I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1104 			&nsd->fd_atr_match);
1105 	i40e_stat_update_and_clear32(hw,
1106 			I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1107 			&nsd->fd_sb_match);
1108 	i40e_stat_update_and_clear32(hw,
1109 			I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1110 			&nsd->fd_atr_tunnel_match);
1111 
1112 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1113 	nsd->tx_lpi_status =
1114 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1115 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1116 	nsd->rx_lpi_status =
1117 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1118 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1119 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1120 			   pf->stat_offsets_loaded,
1121 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1122 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1123 			   pf->stat_offsets_loaded,
1124 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1125 
1126 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1127 	    !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1128 		nsd->fd_sb_status = true;
1129 	else
1130 		nsd->fd_sb_status = false;
1131 
1132 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1133 	    !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1134 		nsd->fd_atr_status = true;
1135 	else
1136 		nsd->fd_atr_status = false;
1137 
1138 	pf->stat_offsets_loaded = true;
1139 }
1140 
1141 /**
1142  * i40e_update_stats - Update the various statistics counters.
1143  * @vsi: the VSI to be updated
1144  *
1145  * Update the various stats for this VSI and its related entities.
1146  **/
1147 void i40e_update_stats(struct i40e_vsi *vsi)
1148 {
1149 	struct i40e_pf *pf = vsi->back;
1150 
1151 	if (vsi == pf->vsi[pf->lan_vsi])
1152 		i40e_update_pf_stats(pf);
1153 
1154 	i40e_update_vsi_stats(vsi);
1155 }
1156 
1157 /**
1158  * i40e_count_filters - counts VSI mac filters
1159  * @vsi: the VSI to be searched
1160  *
1161  * Returns count of mac filters
1162  **/
1163 int i40e_count_filters(struct i40e_vsi *vsi)
1164 {
1165 	struct i40e_mac_filter *f;
1166 	struct hlist_node *h;
1167 	int bkt;
1168 	int cnt = 0;
1169 
1170 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1171 		++cnt;
1172 
1173 	return cnt;
1174 }
1175 
1176 /**
1177  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1178  * @vsi: the VSI to be searched
1179  * @macaddr: the MAC address
1180  * @vlan: the vlan
1181  *
1182  * Returns ptr to the filter object or NULL
1183  **/
1184 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1185 						const u8 *macaddr, s16 vlan)
1186 {
1187 	struct i40e_mac_filter *f;
1188 	u64 key;
1189 
1190 	if (!vsi || !macaddr)
1191 		return NULL;
1192 
1193 	key = i40e_addr_to_hkey(macaddr);
1194 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1195 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1196 		    (vlan == f->vlan))
1197 			return f;
1198 	}
1199 	return NULL;
1200 }
1201 
1202 /**
1203  * i40e_find_mac - Find a mac addr in the macvlan filters list
1204  * @vsi: the VSI to be searched
1205  * @macaddr: the MAC address we are searching for
1206  *
1207  * Returns the first filter with the provided MAC address or NULL if
1208  * MAC address was not found
1209  **/
1210 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1211 {
1212 	struct i40e_mac_filter *f;
1213 	u64 key;
1214 
1215 	if (!vsi || !macaddr)
1216 		return NULL;
1217 
1218 	key = i40e_addr_to_hkey(macaddr);
1219 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1220 		if ((ether_addr_equal(macaddr, f->macaddr)))
1221 			return f;
1222 	}
1223 	return NULL;
1224 }
1225 
1226 /**
1227  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1228  * @vsi: the VSI to be searched
1229  *
1230  * Returns true if VSI is in vlan mode or false otherwise
1231  **/
1232 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1233 {
1234 	/* If we have a PVID, always operate in VLAN mode */
1235 	if (vsi->info.pvid)
1236 		return true;
1237 
1238 	/* We need to operate in VLAN mode whenever we have any filters with
1239 	 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1240 	 * time, incurring search cost repeatedly. However, we can notice two
1241 	 * things:
1242 	 *
1243 	 * 1) the only place where we can gain a VLAN filter is in
1244 	 *    i40e_add_filter.
1245 	 *
1246 	 * 2) the only place where filters are actually removed is in
1247 	 *    i40e_sync_filters_subtask.
1248 	 *
1249 	 * Thus, we can simply use a boolean value, has_vlan_filters which we
1250 	 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1251 	 * we have to perform the full search after deleting filters in
1252 	 * i40e_sync_filters_subtask, but we already have to search
1253 	 * filters here and can perform the check at the same time. This
1254 	 * results in avoiding embedding a loop for VLAN mode inside another
1255 	 * loop over all the filters, and should maintain correctness as noted
1256 	 * above.
1257 	 */
1258 	return vsi->has_vlan_filter;
1259 }
1260 
1261 /**
1262  * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1263  * @vsi: the VSI to configure
1264  * @tmp_add_list: list of filters ready to be added
1265  * @tmp_del_list: list of filters ready to be deleted
1266  * @vlan_filters: the number of active VLAN filters
1267  *
1268  * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1269  * behave as expected. If we have any active VLAN filters remaining or about
1270  * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1271  * so that they only match against untagged traffic. If we no longer have any
1272  * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1273  * so that they match against both tagged and untagged traffic. In this way,
1274  * we ensure that we correctly receive the desired traffic. This ensures that
1275  * when we have an active VLAN we will receive only untagged traffic and
1276  * traffic matching active VLANs. If we have no active VLANs then we will
1277  * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1278  *
1279  * Finally, in a similar fashion, this function also corrects filters when
1280  * there is an active PVID assigned to this VSI.
1281  *
1282  * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1283  *
1284  * This function is only expected to be called from within
1285  * i40e_sync_vsi_filters.
1286  *
1287  * NOTE: This function expects to be called while under the
1288  * mac_filter_hash_lock
1289  */
1290 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1291 					 struct hlist_head *tmp_add_list,
1292 					 struct hlist_head *tmp_del_list,
1293 					 int vlan_filters)
1294 {
1295 	s16 pvid = le16_to_cpu(vsi->info.pvid);
1296 	struct i40e_mac_filter *f, *add_head;
1297 	struct i40e_new_mac_filter *new;
1298 	struct hlist_node *h;
1299 	int bkt, new_vlan;
1300 
1301 	/* To determine if a particular filter needs to be replaced we
1302 	 * have the three following conditions:
1303 	 *
1304 	 * a) if we have a PVID assigned, then all filters which are
1305 	 *    not marked as VLAN=PVID must be replaced with filters that
1306 	 *    are.
1307 	 * b) otherwise, if we have any active VLANS, all filters
1308 	 *    which are marked as VLAN=-1 must be replaced with
1309 	 *    filters marked as VLAN=0
1310 	 * c) finally, if we do not have any active VLANS, all filters
1311 	 *    which are marked as VLAN=0 must be replaced with filters
1312 	 *    marked as VLAN=-1
1313 	 */
1314 
1315 	/* Update the filters about to be added in place */
1316 	hlist_for_each_entry(new, tmp_add_list, hlist) {
1317 		if (pvid && new->f->vlan != pvid)
1318 			new->f->vlan = pvid;
1319 		else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1320 			new->f->vlan = 0;
1321 		else if (!vlan_filters && new->f->vlan == 0)
1322 			new->f->vlan = I40E_VLAN_ANY;
1323 	}
1324 
1325 	/* Update the remaining active filters */
1326 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1327 		/* Combine the checks for whether a filter needs to be changed
1328 		 * and then determine the new VLAN inside the if block, in
1329 		 * order to avoid duplicating code for adding the new filter
1330 		 * then deleting the old filter.
1331 		 */
1332 		if ((pvid && f->vlan != pvid) ||
1333 		    (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1334 		    (!vlan_filters && f->vlan == 0)) {
1335 			/* Determine the new vlan we will be adding */
1336 			if (pvid)
1337 				new_vlan = pvid;
1338 			else if (vlan_filters)
1339 				new_vlan = 0;
1340 			else
1341 				new_vlan = I40E_VLAN_ANY;
1342 
1343 			/* Create the new filter */
1344 			add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1345 			if (!add_head)
1346 				return -ENOMEM;
1347 
1348 			/* Create a temporary i40e_new_mac_filter */
1349 			new = kzalloc(sizeof(*new), GFP_ATOMIC);
1350 			if (!new)
1351 				return -ENOMEM;
1352 
1353 			new->f = add_head;
1354 			new->state = add_head->state;
1355 
1356 			/* Add the new filter to the tmp list */
1357 			hlist_add_head(&new->hlist, tmp_add_list);
1358 
1359 			/* Put the original filter into the delete list */
1360 			f->state = I40E_FILTER_REMOVE;
1361 			hash_del(&f->hlist);
1362 			hlist_add_head(&f->hlist, tmp_del_list);
1363 		}
1364 	}
1365 
1366 	vsi->has_vlan_filter = !!vlan_filters;
1367 
1368 	return 0;
1369 }
1370 
1371 /**
1372  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1373  * @vsi: the PF Main VSI - inappropriate for any other VSI
1374  * @macaddr: the MAC address
1375  *
1376  * Remove whatever filter the firmware set up so the driver can manage
1377  * its own filtering intelligently.
1378  **/
1379 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1380 {
1381 	struct i40e_aqc_remove_macvlan_element_data element;
1382 	struct i40e_pf *pf = vsi->back;
1383 
1384 	/* Only appropriate for the PF main VSI */
1385 	if (vsi->type != I40E_VSI_MAIN)
1386 		return;
1387 
1388 	memset(&element, 0, sizeof(element));
1389 	ether_addr_copy(element.mac_addr, macaddr);
1390 	element.vlan_tag = 0;
1391 	/* Ignore error returns, some firmware does it this way... */
1392 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1393 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1394 
1395 	memset(&element, 0, sizeof(element));
1396 	ether_addr_copy(element.mac_addr, macaddr);
1397 	element.vlan_tag = 0;
1398 	/* ...and some firmware does it this way. */
1399 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1400 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1401 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1402 }
1403 
1404 /**
1405  * i40e_add_filter - Add a mac/vlan filter to the VSI
1406  * @vsi: the VSI to be searched
1407  * @macaddr: the MAC address
1408  * @vlan: the vlan
1409  *
1410  * Returns ptr to the filter object or NULL when no memory available.
1411  *
1412  * NOTE: This function is expected to be called with mac_filter_hash_lock
1413  * being held.
1414  **/
1415 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1416 					const u8 *macaddr, s16 vlan)
1417 {
1418 	struct i40e_mac_filter *f;
1419 	u64 key;
1420 
1421 	if (!vsi || !macaddr)
1422 		return NULL;
1423 
1424 	f = i40e_find_filter(vsi, macaddr, vlan);
1425 	if (!f) {
1426 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1427 		if (!f)
1428 			return NULL;
1429 
1430 		/* Update the boolean indicating if we need to function in
1431 		 * VLAN mode.
1432 		 */
1433 		if (vlan >= 0)
1434 			vsi->has_vlan_filter = true;
1435 
1436 		ether_addr_copy(f->macaddr, macaddr);
1437 		f->vlan = vlan;
1438 		f->state = I40E_FILTER_NEW;
1439 		INIT_HLIST_NODE(&f->hlist);
1440 
1441 		key = i40e_addr_to_hkey(macaddr);
1442 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
1443 
1444 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1445 		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1446 	}
1447 
1448 	/* If we're asked to add a filter that has been marked for removal, it
1449 	 * is safe to simply restore it to active state. __i40e_del_filter
1450 	 * will have simply deleted any filters which were previously marked
1451 	 * NEW or FAILED, so if it is currently marked REMOVE it must have
1452 	 * previously been ACTIVE. Since we haven't yet run the sync filters
1453 	 * task, just restore this filter to the ACTIVE state so that the
1454 	 * sync task leaves it in place
1455 	 */
1456 	if (f->state == I40E_FILTER_REMOVE)
1457 		f->state = I40E_FILTER_ACTIVE;
1458 
1459 	return f;
1460 }
1461 
1462 /**
1463  * __i40e_del_filter - Remove a specific filter from the VSI
1464  * @vsi: VSI to remove from
1465  * @f: the filter to remove from the list
1466  *
1467  * This function should be called instead of i40e_del_filter only if you know
1468  * the exact filter you will remove already, such as via i40e_find_filter or
1469  * i40e_find_mac.
1470  *
1471  * NOTE: This function is expected to be called with mac_filter_hash_lock
1472  * being held.
1473  * ANOTHER NOTE: This function MUST be called from within the context of
1474  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1475  * instead of list_for_each_entry().
1476  **/
1477 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1478 {
1479 	if (!f)
1480 		return;
1481 
1482 	/* If the filter was never added to firmware then we can just delete it
1483 	 * directly and we don't want to set the status to remove or else an
1484 	 * admin queue command will unnecessarily fire.
1485 	 */
1486 	if ((f->state == I40E_FILTER_FAILED) ||
1487 	    (f->state == I40E_FILTER_NEW)) {
1488 		hash_del(&f->hlist);
1489 		kfree(f);
1490 	} else {
1491 		f->state = I40E_FILTER_REMOVE;
1492 	}
1493 
1494 	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1495 	set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1496 }
1497 
1498 /**
1499  * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1500  * @vsi: the VSI to be searched
1501  * @macaddr: the MAC address
1502  * @vlan: the VLAN
1503  *
1504  * NOTE: This function is expected to be called with mac_filter_hash_lock
1505  * being held.
1506  * ANOTHER NOTE: This function MUST be called from within the context of
1507  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1508  * instead of list_for_each_entry().
1509  **/
1510 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1511 {
1512 	struct i40e_mac_filter *f;
1513 
1514 	if (!vsi || !macaddr)
1515 		return;
1516 
1517 	f = i40e_find_filter(vsi, macaddr, vlan);
1518 	__i40e_del_filter(vsi, f);
1519 }
1520 
1521 /**
1522  * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1523  * @vsi: the VSI to be searched
1524  * @macaddr: the mac address to be filtered
1525  *
1526  * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1527  * go through all the macvlan filters and add a macvlan filter for each
1528  * unique vlan that already exists. If a PVID has been assigned, instead only
1529  * add the macaddr to that VLAN.
1530  *
1531  * Returns last filter added on success, else NULL
1532  **/
1533 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1534 					    const u8 *macaddr)
1535 {
1536 	struct i40e_mac_filter *f, *add = NULL;
1537 	struct hlist_node *h;
1538 	int bkt;
1539 
1540 	if (vsi->info.pvid)
1541 		return i40e_add_filter(vsi, macaddr,
1542 				       le16_to_cpu(vsi->info.pvid));
1543 
1544 	if (!i40e_is_vsi_in_vlan(vsi))
1545 		return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1546 
1547 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1548 		if (f->state == I40E_FILTER_REMOVE)
1549 			continue;
1550 		add = i40e_add_filter(vsi, macaddr, f->vlan);
1551 		if (!add)
1552 			return NULL;
1553 	}
1554 
1555 	return add;
1556 }
1557 
1558 /**
1559  * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1560  * @vsi: the VSI to be searched
1561  * @macaddr: the mac address to be removed
1562  *
1563  * Removes a given MAC address from a VSI regardless of what VLAN it has been
1564  * associated with.
1565  *
1566  * Returns 0 for success, or error
1567  **/
1568 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1569 {
1570 	struct i40e_mac_filter *f;
1571 	struct hlist_node *h;
1572 	bool found = false;
1573 	int bkt;
1574 
1575 	lockdep_assert_held(&vsi->mac_filter_hash_lock);
1576 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1577 		if (ether_addr_equal(macaddr, f->macaddr)) {
1578 			__i40e_del_filter(vsi, f);
1579 			found = true;
1580 		}
1581 	}
1582 
1583 	if (found)
1584 		return 0;
1585 	else
1586 		return -ENOENT;
1587 }
1588 
1589 /**
1590  * i40e_set_mac - NDO callback to set mac address
1591  * @netdev: network interface device structure
1592  * @p: pointer to an address structure
1593  *
1594  * Returns 0 on success, negative on failure
1595  **/
1596 static int i40e_set_mac(struct net_device *netdev, void *p)
1597 {
1598 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1599 	struct i40e_vsi *vsi = np->vsi;
1600 	struct i40e_pf *pf = vsi->back;
1601 	struct i40e_hw *hw = &pf->hw;
1602 	struct sockaddr *addr = p;
1603 
1604 	if (!is_valid_ether_addr(addr->sa_data))
1605 		return -EADDRNOTAVAIL;
1606 
1607 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1608 		netdev_info(netdev, "already using mac address %pM\n",
1609 			    addr->sa_data);
1610 		return 0;
1611 	}
1612 
1613 	if (test_bit(__I40E_DOWN, pf->state) ||
1614 	    test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1615 		return -EADDRNOTAVAIL;
1616 
1617 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1618 		netdev_info(netdev, "returning to hw mac address %pM\n",
1619 			    hw->mac.addr);
1620 	else
1621 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1622 
1623 	/* Copy the address first, so that we avoid a possible race with
1624 	 * .set_rx_mode().
1625 	 * - Remove old address from MAC filter
1626 	 * - Copy new address
1627 	 * - Add new address to MAC filter
1628 	 */
1629 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1630 	i40e_del_mac_filter(vsi, netdev->dev_addr);
1631 	eth_hw_addr_set(netdev, addr->sa_data);
1632 	i40e_add_mac_filter(vsi, netdev->dev_addr);
1633 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1634 
1635 	if (vsi->type == I40E_VSI_MAIN) {
1636 		i40e_status ret;
1637 
1638 		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1639 						addr->sa_data, NULL);
1640 		if (ret)
1641 			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1642 				    i40e_stat_str(hw, ret),
1643 				    i40e_aq_str(hw, hw->aq.asq_last_status));
1644 	}
1645 
1646 	/* schedule our worker thread which will take care of
1647 	 * applying the new filter changes
1648 	 */
1649 	i40e_service_event_schedule(pf);
1650 	return 0;
1651 }
1652 
1653 /**
1654  * i40e_config_rss_aq - Prepare for RSS using AQ commands
1655  * @vsi: vsi structure
1656  * @seed: RSS hash seed
1657  * @lut: pointer to lookup table of lut_size
1658  * @lut_size: size of the lookup table
1659  **/
1660 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1661 			      u8 *lut, u16 lut_size)
1662 {
1663 	struct i40e_pf *pf = vsi->back;
1664 	struct i40e_hw *hw = &pf->hw;
1665 	int ret = 0;
1666 
1667 	if (seed) {
1668 		struct i40e_aqc_get_set_rss_key_data *seed_dw =
1669 			(struct i40e_aqc_get_set_rss_key_data *)seed;
1670 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1671 		if (ret) {
1672 			dev_info(&pf->pdev->dev,
1673 				 "Cannot set RSS key, err %s aq_err %s\n",
1674 				 i40e_stat_str(hw, ret),
1675 				 i40e_aq_str(hw, hw->aq.asq_last_status));
1676 			return ret;
1677 		}
1678 	}
1679 	if (lut) {
1680 		bool pf_lut = vsi->type == I40E_VSI_MAIN;
1681 
1682 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1683 		if (ret) {
1684 			dev_info(&pf->pdev->dev,
1685 				 "Cannot set RSS lut, err %s aq_err %s\n",
1686 				 i40e_stat_str(hw, ret),
1687 				 i40e_aq_str(hw, hw->aq.asq_last_status));
1688 			return ret;
1689 		}
1690 	}
1691 	return ret;
1692 }
1693 
1694 /**
1695  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1696  * @vsi: VSI structure
1697  **/
1698 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1699 {
1700 	struct i40e_pf *pf = vsi->back;
1701 	u8 seed[I40E_HKEY_ARRAY_SIZE];
1702 	u8 *lut;
1703 	int ret;
1704 
1705 	if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1706 		return 0;
1707 	if (!vsi->rss_size)
1708 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
1709 				      vsi->num_queue_pairs);
1710 	if (!vsi->rss_size)
1711 		return -EINVAL;
1712 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1713 	if (!lut)
1714 		return -ENOMEM;
1715 
1716 	/* Use the user configured hash keys and lookup table if there is one,
1717 	 * otherwise use default
1718 	 */
1719 	if (vsi->rss_lut_user)
1720 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1721 	else
1722 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1723 	if (vsi->rss_hkey_user)
1724 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1725 	else
1726 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1727 	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1728 	kfree(lut);
1729 	return ret;
1730 }
1731 
1732 /**
1733  * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1734  * @vsi: the VSI being configured,
1735  * @ctxt: VSI context structure
1736  * @enabled_tc: number of traffic classes to enable
1737  *
1738  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1739  **/
1740 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1741 					   struct i40e_vsi_context *ctxt,
1742 					   u8 enabled_tc)
1743 {
1744 	u16 qcount = 0, max_qcount, qmap, sections = 0;
1745 	int i, override_q, pow, num_qps, ret;
1746 	u8 netdev_tc = 0, offset = 0;
1747 
1748 	if (vsi->type != I40E_VSI_MAIN)
1749 		return -EINVAL;
1750 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1751 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1752 	vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1753 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1754 	num_qps = vsi->mqprio_qopt.qopt.count[0];
1755 
1756 	/* find the next higher power-of-2 of num queue pairs */
1757 	pow = ilog2(num_qps);
1758 	if (!is_power_of_2(num_qps))
1759 		pow++;
1760 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1761 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1762 
1763 	/* Setup queue offset/count for all TCs for given VSI */
1764 	max_qcount = vsi->mqprio_qopt.qopt.count[0];
1765 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1766 		/* See if the given TC is enabled for the given VSI */
1767 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1768 			offset = vsi->mqprio_qopt.qopt.offset[i];
1769 			qcount = vsi->mqprio_qopt.qopt.count[i];
1770 			if (qcount > max_qcount)
1771 				max_qcount = qcount;
1772 			vsi->tc_config.tc_info[i].qoffset = offset;
1773 			vsi->tc_config.tc_info[i].qcount = qcount;
1774 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1775 		} else {
1776 			/* TC is not enabled so set the offset to
1777 			 * default queue and allocate one queue
1778 			 * for the given TC.
1779 			 */
1780 			vsi->tc_config.tc_info[i].qoffset = 0;
1781 			vsi->tc_config.tc_info[i].qcount = 1;
1782 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1783 		}
1784 	}
1785 
1786 	/* Set actual Tx/Rx queue pairs */
1787 	vsi->num_queue_pairs = offset + qcount;
1788 
1789 	/* Setup queue TC[0].qmap for given VSI context */
1790 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1791 	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1792 	ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1793 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1794 
1795 	/* Reconfigure RSS for main VSI with max queue count */
1796 	vsi->rss_size = max_qcount;
1797 	ret = i40e_vsi_config_rss(vsi);
1798 	if (ret) {
1799 		dev_info(&vsi->back->pdev->dev,
1800 			 "Failed to reconfig rss for num_queues (%u)\n",
1801 			 max_qcount);
1802 		return ret;
1803 	}
1804 	vsi->reconfig_rss = true;
1805 	dev_dbg(&vsi->back->pdev->dev,
1806 		"Reconfigured rss with num_queues (%u)\n", max_qcount);
1807 
1808 	/* Find queue count available for channel VSIs and starting offset
1809 	 * for channel VSIs
1810 	 */
1811 	override_q = vsi->mqprio_qopt.qopt.count[0];
1812 	if (override_q && override_q < vsi->num_queue_pairs) {
1813 		vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1814 		vsi->next_base_queue = override_q;
1815 	}
1816 	return 0;
1817 }
1818 
1819 /**
1820  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1821  * @vsi: the VSI being setup
1822  * @ctxt: VSI context structure
1823  * @enabled_tc: Enabled TCs bitmap
1824  * @is_add: True if called before Add VSI
1825  *
1826  * Setup VSI queue mapping for enabled traffic classes.
1827  **/
1828 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1829 				     struct i40e_vsi_context *ctxt,
1830 				     u8 enabled_tc,
1831 				     bool is_add)
1832 {
1833 	struct i40e_pf *pf = vsi->back;
1834 	u16 num_tc_qps = 0;
1835 	u16 sections = 0;
1836 	u8 netdev_tc = 0;
1837 	u16 numtc = 1;
1838 	u16 qcount;
1839 	u8 offset;
1840 	u16 qmap;
1841 	int i;
1842 
1843 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1844 	offset = 0;
1845 	/* zero out queue mapping, it will get updated on the end of the function */
1846 	memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
1847 
1848 	if (vsi->type == I40E_VSI_MAIN) {
1849 		/* This code helps add more queue to the VSI if we have
1850 		 * more cores than RSS can support, the higher cores will
1851 		 * be served by ATR or other filters. Furthermore, the
1852 		 * non-zero req_queue_pairs says that user requested a new
1853 		 * queue count via ethtool's set_channels, so use this
1854 		 * value for queues distribution across traffic classes
1855 		 */
1856 		if (vsi->req_queue_pairs > 0)
1857 			vsi->num_queue_pairs = vsi->req_queue_pairs;
1858 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1859 			vsi->num_queue_pairs = pf->num_lan_msix;
1860 	}
1861 
1862 	/* Number of queues per enabled TC */
1863 	if (vsi->type == I40E_VSI_MAIN ||
1864 	    (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
1865 		num_tc_qps = vsi->num_queue_pairs;
1866 	else
1867 		num_tc_qps = vsi->alloc_queue_pairs;
1868 
1869 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1870 		/* Find numtc from enabled TC bitmap */
1871 		for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1872 			if (enabled_tc & BIT(i)) /* TC is enabled */
1873 				numtc++;
1874 		}
1875 		if (!numtc) {
1876 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1877 			numtc = 1;
1878 		}
1879 		num_tc_qps = num_tc_qps / numtc;
1880 		num_tc_qps = min_t(int, num_tc_qps,
1881 				   i40e_pf_get_max_q_per_tc(pf));
1882 	}
1883 
1884 	vsi->tc_config.numtc = numtc;
1885 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1886 
1887 	/* Do not allow use more TC queue pairs than MSI-X vectors exist */
1888 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1889 		num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1890 
1891 	/* Setup queue offset/count for all TCs for given VSI */
1892 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1893 		/* See if the given TC is enabled for the given VSI */
1894 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1895 			/* TC is enabled */
1896 			int pow, num_qps;
1897 
1898 			switch (vsi->type) {
1899 			case I40E_VSI_MAIN:
1900 				if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1901 				    I40E_FLAG_FD_ATR_ENABLED)) ||
1902 				    vsi->tc_config.enabled_tc != 1) {
1903 					qcount = min_t(int, pf->alloc_rss_size,
1904 						       num_tc_qps);
1905 					break;
1906 				}
1907 				fallthrough;
1908 			case I40E_VSI_FDIR:
1909 			case I40E_VSI_SRIOV:
1910 			case I40E_VSI_VMDQ2:
1911 			default:
1912 				qcount = num_tc_qps;
1913 				WARN_ON(i != 0);
1914 				break;
1915 			}
1916 			vsi->tc_config.tc_info[i].qoffset = offset;
1917 			vsi->tc_config.tc_info[i].qcount = qcount;
1918 
1919 			/* find the next higher power-of-2 of num queue pairs */
1920 			num_qps = qcount;
1921 			pow = 0;
1922 			while (num_qps && (BIT_ULL(pow) < qcount)) {
1923 				pow++;
1924 				num_qps >>= 1;
1925 			}
1926 
1927 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1928 			qmap =
1929 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1930 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1931 
1932 			offset += qcount;
1933 		} else {
1934 			/* TC is not enabled so set the offset to
1935 			 * default queue and allocate one queue
1936 			 * for the given TC.
1937 			 */
1938 			vsi->tc_config.tc_info[i].qoffset = 0;
1939 			vsi->tc_config.tc_info[i].qcount = 1;
1940 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1941 
1942 			qmap = 0;
1943 		}
1944 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1945 	}
1946 	/* Do not change previously set num_queue_pairs for PFs and VFs*/
1947 	if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
1948 	    (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
1949 	    (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
1950 		vsi->num_queue_pairs = offset;
1951 
1952 	/* Scheduler section valid can only be set for ADD VSI */
1953 	if (is_add) {
1954 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1955 
1956 		ctxt->info.up_enable_bits = enabled_tc;
1957 	}
1958 	if (vsi->type == I40E_VSI_SRIOV) {
1959 		ctxt->info.mapping_flags |=
1960 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1961 		for (i = 0; i < vsi->num_queue_pairs; i++)
1962 			ctxt->info.queue_mapping[i] =
1963 					       cpu_to_le16(vsi->base_queue + i);
1964 	} else {
1965 		ctxt->info.mapping_flags |=
1966 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1967 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1968 	}
1969 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1970 }
1971 
1972 /**
1973  * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1974  * @netdev: the netdevice
1975  * @addr: address to add
1976  *
1977  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1978  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1979  */
1980 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1981 {
1982 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1983 	struct i40e_vsi *vsi = np->vsi;
1984 
1985 	if (i40e_add_mac_filter(vsi, addr))
1986 		return 0;
1987 	else
1988 		return -ENOMEM;
1989 }
1990 
1991 /**
1992  * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1993  * @netdev: the netdevice
1994  * @addr: address to add
1995  *
1996  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1997  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1998  */
1999 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2000 {
2001 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2002 	struct i40e_vsi *vsi = np->vsi;
2003 
2004 	/* Under some circumstances, we might receive a request to delete
2005 	 * our own device address from our uc list. Because we store the
2006 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2007 	 * such requests and not delete our device address from this list.
2008 	 */
2009 	if (ether_addr_equal(addr, netdev->dev_addr))
2010 		return 0;
2011 
2012 	i40e_del_mac_filter(vsi, addr);
2013 
2014 	return 0;
2015 }
2016 
2017 /**
2018  * i40e_set_rx_mode - NDO callback to set the netdev filters
2019  * @netdev: network interface device structure
2020  **/
2021 static void i40e_set_rx_mode(struct net_device *netdev)
2022 {
2023 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2024 	struct i40e_vsi *vsi = np->vsi;
2025 
2026 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2027 
2028 	__dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2029 	__dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2030 
2031 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2032 
2033 	/* check for other flag changes */
2034 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
2035 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2036 		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2037 	}
2038 }
2039 
2040 /**
2041  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2042  * @vsi: Pointer to VSI struct
2043  * @from: Pointer to list which contains MAC filter entries - changes to
2044  *        those entries needs to be undone.
2045  *
2046  * MAC filter entries from this list were slated for deletion.
2047  **/
2048 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2049 					 struct hlist_head *from)
2050 {
2051 	struct i40e_mac_filter *f;
2052 	struct hlist_node *h;
2053 
2054 	hlist_for_each_entry_safe(f, h, from, hlist) {
2055 		u64 key = i40e_addr_to_hkey(f->macaddr);
2056 
2057 		/* Move the element back into MAC filter list*/
2058 		hlist_del(&f->hlist);
2059 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
2060 	}
2061 }
2062 
2063 /**
2064  * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2065  * @vsi: Pointer to vsi struct
2066  * @from: Pointer to list which contains MAC filter entries - changes to
2067  *        those entries needs to be undone.
2068  *
2069  * MAC filter entries from this list were slated for addition.
2070  **/
2071 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2072 					 struct hlist_head *from)
2073 {
2074 	struct i40e_new_mac_filter *new;
2075 	struct hlist_node *h;
2076 
2077 	hlist_for_each_entry_safe(new, h, from, hlist) {
2078 		/* We can simply free the wrapper structure */
2079 		hlist_del(&new->hlist);
2080 		netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2081 		kfree(new);
2082 	}
2083 }
2084 
2085 /**
2086  * i40e_next_filter - Get the next non-broadcast filter from a list
2087  * @next: pointer to filter in list
2088  *
2089  * Returns the next non-broadcast filter in the list. Required so that we
2090  * ignore broadcast filters within the list, since these are not handled via
2091  * the normal firmware update path.
2092  */
2093 static
2094 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2095 {
2096 	hlist_for_each_entry_continue(next, hlist) {
2097 		if (!is_broadcast_ether_addr(next->f->macaddr))
2098 			return next;
2099 	}
2100 
2101 	return NULL;
2102 }
2103 
2104 /**
2105  * i40e_update_filter_state - Update filter state based on return data
2106  * from firmware
2107  * @count: Number of filters added
2108  * @add_list: return data from fw
2109  * @add_head: pointer to first filter in current batch
2110  *
2111  * MAC filter entries from list were slated to be added to device. Returns
2112  * number of successful filters. Note that 0 does NOT mean success!
2113  **/
2114 static int
2115 i40e_update_filter_state(int count,
2116 			 struct i40e_aqc_add_macvlan_element_data *add_list,
2117 			 struct i40e_new_mac_filter *add_head)
2118 {
2119 	int retval = 0;
2120 	int i;
2121 
2122 	for (i = 0; i < count; i++) {
2123 		/* Always check status of each filter. We don't need to check
2124 		 * the firmware return status because we pre-set the filter
2125 		 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2126 		 * request to the adminq. Thus, if it no longer matches then
2127 		 * we know the filter is active.
2128 		 */
2129 		if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2130 			add_head->state = I40E_FILTER_FAILED;
2131 		} else {
2132 			add_head->state = I40E_FILTER_ACTIVE;
2133 			retval++;
2134 		}
2135 
2136 		add_head = i40e_next_filter(add_head);
2137 		if (!add_head)
2138 			break;
2139 	}
2140 
2141 	return retval;
2142 }
2143 
2144 /**
2145  * i40e_aqc_del_filters - Request firmware to delete a set of filters
2146  * @vsi: ptr to the VSI
2147  * @vsi_name: name to display in messages
2148  * @list: the list of filters to send to firmware
2149  * @num_del: the number of filters to delete
2150  * @retval: Set to -EIO on failure to delete
2151  *
2152  * Send a request to firmware via AdminQ to delete a set of filters. Uses
2153  * *retval instead of a return value so that success does not force ret_val to
2154  * be set to 0. This ensures that a sequence of calls to this function
2155  * preserve the previous value of *retval on successful delete.
2156  */
2157 static
2158 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2159 			  struct i40e_aqc_remove_macvlan_element_data *list,
2160 			  int num_del, int *retval)
2161 {
2162 	struct i40e_hw *hw = &vsi->back->hw;
2163 	enum i40e_admin_queue_err aq_status;
2164 	i40e_status aq_ret;
2165 
2166 	aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2167 					   &aq_status);
2168 
2169 	/* Explicitly ignore and do not report when firmware returns ENOENT */
2170 	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2171 		*retval = -EIO;
2172 		dev_info(&vsi->back->pdev->dev,
2173 			 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2174 			 vsi_name, i40e_stat_str(hw, aq_ret),
2175 			 i40e_aq_str(hw, aq_status));
2176 	}
2177 }
2178 
2179 /**
2180  * i40e_aqc_add_filters - Request firmware to add a set of filters
2181  * @vsi: ptr to the VSI
2182  * @vsi_name: name to display in messages
2183  * @list: the list of filters to send to firmware
2184  * @add_head: Position in the add hlist
2185  * @num_add: the number of filters to add
2186  *
2187  * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2188  * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2189  * space for more filters.
2190  */
2191 static
2192 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2193 			  struct i40e_aqc_add_macvlan_element_data *list,
2194 			  struct i40e_new_mac_filter *add_head,
2195 			  int num_add)
2196 {
2197 	struct i40e_hw *hw = &vsi->back->hw;
2198 	enum i40e_admin_queue_err aq_status;
2199 	int fcnt;
2200 
2201 	i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2202 	fcnt = i40e_update_filter_state(num_add, list, add_head);
2203 
2204 	if (fcnt != num_add) {
2205 		if (vsi->type == I40E_VSI_MAIN) {
2206 			set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2207 			dev_warn(&vsi->back->pdev->dev,
2208 				 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2209 				 i40e_aq_str(hw, aq_status), vsi_name);
2210 		} else if (vsi->type == I40E_VSI_SRIOV ||
2211 			   vsi->type == I40E_VSI_VMDQ1 ||
2212 			   vsi->type == I40E_VSI_VMDQ2) {
2213 			dev_warn(&vsi->back->pdev->dev,
2214 				 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2215 				 i40e_aq_str(hw, aq_status), vsi_name,
2216 					     vsi_name);
2217 		} else {
2218 			dev_warn(&vsi->back->pdev->dev,
2219 				 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2220 				 i40e_aq_str(hw, aq_status), vsi_name,
2221 					     vsi->type);
2222 		}
2223 	}
2224 }
2225 
2226 /**
2227  * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2228  * @vsi: pointer to the VSI
2229  * @vsi_name: the VSI name
2230  * @f: filter data
2231  *
2232  * This function sets or clears the promiscuous broadcast flags for VLAN
2233  * filters in order to properly receive broadcast frames. Assumes that only
2234  * broadcast filters are passed.
2235  *
2236  * Returns status indicating success or failure;
2237  **/
2238 static i40e_status
2239 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2240 			  struct i40e_mac_filter *f)
2241 {
2242 	bool enable = f->state == I40E_FILTER_NEW;
2243 	struct i40e_hw *hw = &vsi->back->hw;
2244 	i40e_status aq_ret;
2245 
2246 	if (f->vlan == I40E_VLAN_ANY) {
2247 		aq_ret = i40e_aq_set_vsi_broadcast(hw,
2248 						   vsi->seid,
2249 						   enable,
2250 						   NULL);
2251 	} else {
2252 		aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2253 							    vsi->seid,
2254 							    enable,
2255 							    f->vlan,
2256 							    NULL);
2257 	}
2258 
2259 	if (aq_ret) {
2260 		set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2261 		dev_warn(&vsi->back->pdev->dev,
2262 			 "Error %s, forcing overflow promiscuous on %s\n",
2263 			 i40e_aq_str(hw, hw->aq.asq_last_status),
2264 			 vsi_name);
2265 	}
2266 
2267 	return aq_ret;
2268 }
2269 
2270 /**
2271  * i40e_set_promiscuous - set promiscuous mode
2272  * @pf: board private structure
2273  * @promisc: promisc on or off
2274  *
2275  * There are different ways of setting promiscuous mode on a PF depending on
2276  * what state/environment we're in.  This identifies and sets it appropriately.
2277  * Returns 0 on success.
2278  **/
2279 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2280 {
2281 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2282 	struct i40e_hw *hw = &pf->hw;
2283 	i40e_status aq_ret;
2284 
2285 	if (vsi->type == I40E_VSI_MAIN &&
2286 	    pf->lan_veb != I40E_NO_VEB &&
2287 	    !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2288 		/* set defport ON for Main VSI instead of true promisc
2289 		 * this way we will get all unicast/multicast and VLAN
2290 		 * promisc behavior but will not get VF or VMDq traffic
2291 		 * replicated on the Main VSI.
2292 		 */
2293 		if (promisc)
2294 			aq_ret = i40e_aq_set_default_vsi(hw,
2295 							 vsi->seid,
2296 							 NULL);
2297 		else
2298 			aq_ret = i40e_aq_clear_default_vsi(hw,
2299 							   vsi->seid,
2300 							   NULL);
2301 		if (aq_ret) {
2302 			dev_info(&pf->pdev->dev,
2303 				 "Set default VSI failed, err %s, aq_err %s\n",
2304 				 i40e_stat_str(hw, aq_ret),
2305 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2306 		}
2307 	} else {
2308 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2309 						  hw,
2310 						  vsi->seid,
2311 						  promisc, NULL,
2312 						  true);
2313 		if (aq_ret) {
2314 			dev_info(&pf->pdev->dev,
2315 				 "set unicast promisc failed, err %s, aq_err %s\n",
2316 				 i40e_stat_str(hw, aq_ret),
2317 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2318 		}
2319 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2320 						  hw,
2321 						  vsi->seid,
2322 						  promisc, NULL);
2323 		if (aq_ret) {
2324 			dev_info(&pf->pdev->dev,
2325 				 "set multicast promisc failed, err %s, aq_err %s\n",
2326 				 i40e_stat_str(hw, aq_ret),
2327 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2328 		}
2329 	}
2330 
2331 	if (!aq_ret)
2332 		pf->cur_promisc = promisc;
2333 
2334 	return aq_ret;
2335 }
2336 
2337 /**
2338  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2339  * @vsi: ptr to the VSI
2340  *
2341  * Push any outstanding VSI filter changes through the AdminQ.
2342  *
2343  * Returns 0 or error value
2344  **/
2345 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2346 {
2347 	struct hlist_head tmp_add_list, tmp_del_list;
2348 	struct i40e_mac_filter *f;
2349 	struct i40e_new_mac_filter *new, *add_head = NULL;
2350 	struct i40e_hw *hw = &vsi->back->hw;
2351 	bool old_overflow, new_overflow;
2352 	unsigned int failed_filters = 0;
2353 	unsigned int vlan_filters = 0;
2354 	char vsi_name[16] = "PF";
2355 	int filter_list_len = 0;
2356 	i40e_status aq_ret = 0;
2357 	u32 changed_flags = 0;
2358 	struct hlist_node *h;
2359 	struct i40e_pf *pf;
2360 	int num_add = 0;
2361 	int num_del = 0;
2362 	int retval = 0;
2363 	u16 cmd_flags;
2364 	int list_size;
2365 	int bkt;
2366 
2367 	/* empty array typed pointers, kcalloc later */
2368 	struct i40e_aqc_add_macvlan_element_data *add_list;
2369 	struct i40e_aqc_remove_macvlan_element_data *del_list;
2370 
2371 	while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2372 		usleep_range(1000, 2000);
2373 	pf = vsi->back;
2374 
2375 	old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2376 
2377 	if (vsi->netdev) {
2378 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2379 		vsi->current_netdev_flags = vsi->netdev->flags;
2380 	}
2381 
2382 	INIT_HLIST_HEAD(&tmp_add_list);
2383 	INIT_HLIST_HEAD(&tmp_del_list);
2384 
2385 	if (vsi->type == I40E_VSI_SRIOV)
2386 		snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2387 	else if (vsi->type != I40E_VSI_MAIN)
2388 		snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2389 
2390 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2391 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2392 
2393 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2394 		/* Create a list of filters to delete. */
2395 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2396 			if (f->state == I40E_FILTER_REMOVE) {
2397 				/* Move the element into temporary del_list */
2398 				hash_del(&f->hlist);
2399 				hlist_add_head(&f->hlist, &tmp_del_list);
2400 
2401 				/* Avoid counting removed filters */
2402 				continue;
2403 			}
2404 			if (f->state == I40E_FILTER_NEW) {
2405 				/* Create a temporary i40e_new_mac_filter */
2406 				new = kzalloc(sizeof(*new), GFP_ATOMIC);
2407 				if (!new)
2408 					goto err_no_memory_locked;
2409 
2410 				/* Store pointer to the real filter */
2411 				new->f = f;
2412 				new->state = f->state;
2413 
2414 				/* Add it to the hash list */
2415 				hlist_add_head(&new->hlist, &tmp_add_list);
2416 			}
2417 
2418 			/* Count the number of active (current and new) VLAN
2419 			 * filters we have now. Does not count filters which
2420 			 * are marked for deletion.
2421 			 */
2422 			if (f->vlan > 0)
2423 				vlan_filters++;
2424 		}
2425 
2426 		retval = i40e_correct_mac_vlan_filters(vsi,
2427 						       &tmp_add_list,
2428 						       &tmp_del_list,
2429 						       vlan_filters);
2430 
2431 		hlist_for_each_entry(new, &tmp_add_list, hlist)
2432 			netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2433 
2434 		if (retval)
2435 			goto err_no_memory_locked;
2436 
2437 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2438 	}
2439 
2440 	/* Now process 'del_list' outside the lock */
2441 	if (!hlist_empty(&tmp_del_list)) {
2442 		filter_list_len = hw->aq.asq_buf_size /
2443 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
2444 		list_size = filter_list_len *
2445 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
2446 		del_list = kzalloc(list_size, GFP_ATOMIC);
2447 		if (!del_list)
2448 			goto err_no_memory;
2449 
2450 		hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2451 			cmd_flags = 0;
2452 
2453 			/* handle broadcast filters by updating the broadcast
2454 			 * promiscuous flag and release filter list.
2455 			 */
2456 			if (is_broadcast_ether_addr(f->macaddr)) {
2457 				i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2458 
2459 				hlist_del(&f->hlist);
2460 				kfree(f);
2461 				continue;
2462 			}
2463 
2464 			/* add to delete list */
2465 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2466 			if (f->vlan == I40E_VLAN_ANY) {
2467 				del_list[num_del].vlan_tag = 0;
2468 				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2469 			} else {
2470 				del_list[num_del].vlan_tag =
2471 					cpu_to_le16((u16)(f->vlan));
2472 			}
2473 
2474 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2475 			del_list[num_del].flags = cmd_flags;
2476 			num_del++;
2477 
2478 			/* flush a full buffer */
2479 			if (num_del == filter_list_len) {
2480 				i40e_aqc_del_filters(vsi, vsi_name, del_list,
2481 						     num_del, &retval);
2482 				memset(del_list, 0, list_size);
2483 				num_del = 0;
2484 			}
2485 			/* Release memory for MAC filter entries which were
2486 			 * synced up with HW.
2487 			 */
2488 			hlist_del(&f->hlist);
2489 			kfree(f);
2490 		}
2491 
2492 		if (num_del) {
2493 			i40e_aqc_del_filters(vsi, vsi_name, del_list,
2494 					     num_del, &retval);
2495 		}
2496 
2497 		kfree(del_list);
2498 		del_list = NULL;
2499 	}
2500 
2501 	if (!hlist_empty(&tmp_add_list)) {
2502 		/* Do all the adds now. */
2503 		filter_list_len = hw->aq.asq_buf_size /
2504 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
2505 		list_size = filter_list_len *
2506 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
2507 		add_list = kzalloc(list_size, GFP_ATOMIC);
2508 		if (!add_list)
2509 			goto err_no_memory;
2510 
2511 		num_add = 0;
2512 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2513 			/* handle broadcast filters by updating the broadcast
2514 			 * promiscuous flag instead of adding a MAC filter.
2515 			 */
2516 			if (is_broadcast_ether_addr(new->f->macaddr)) {
2517 				if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2518 							      new->f))
2519 					new->state = I40E_FILTER_FAILED;
2520 				else
2521 					new->state = I40E_FILTER_ACTIVE;
2522 				continue;
2523 			}
2524 
2525 			/* add to add array */
2526 			if (num_add == 0)
2527 				add_head = new;
2528 			cmd_flags = 0;
2529 			ether_addr_copy(add_list[num_add].mac_addr,
2530 					new->f->macaddr);
2531 			if (new->f->vlan == I40E_VLAN_ANY) {
2532 				add_list[num_add].vlan_tag = 0;
2533 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2534 			} else {
2535 				add_list[num_add].vlan_tag =
2536 					cpu_to_le16((u16)(new->f->vlan));
2537 			}
2538 			add_list[num_add].queue_number = 0;
2539 			/* set invalid match method for later detection */
2540 			add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2541 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2542 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
2543 			num_add++;
2544 
2545 			/* flush a full buffer */
2546 			if (num_add == filter_list_len) {
2547 				i40e_aqc_add_filters(vsi, vsi_name, add_list,
2548 						     add_head, num_add);
2549 				memset(add_list, 0, list_size);
2550 				num_add = 0;
2551 			}
2552 		}
2553 		if (num_add) {
2554 			i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2555 					     num_add);
2556 		}
2557 		/* Now move all of the filters from the temp add list back to
2558 		 * the VSI's list.
2559 		 */
2560 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2561 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2562 			/* Only update the state if we're still NEW */
2563 			if (new->f->state == I40E_FILTER_NEW)
2564 				new->f->state = new->state;
2565 			hlist_del(&new->hlist);
2566 			netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2567 			kfree(new);
2568 		}
2569 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2570 		kfree(add_list);
2571 		add_list = NULL;
2572 	}
2573 
2574 	/* Determine the number of active and failed filters. */
2575 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2576 	vsi->active_filters = 0;
2577 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2578 		if (f->state == I40E_FILTER_ACTIVE)
2579 			vsi->active_filters++;
2580 		else if (f->state == I40E_FILTER_FAILED)
2581 			failed_filters++;
2582 	}
2583 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2584 
2585 	/* Check if we are able to exit overflow promiscuous mode. We can
2586 	 * safely exit if we didn't just enter, we no longer have any failed
2587 	 * filters, and we have reduced filters below the threshold value.
2588 	 */
2589 	if (old_overflow && !failed_filters &&
2590 	    vsi->active_filters < vsi->promisc_threshold) {
2591 		dev_info(&pf->pdev->dev,
2592 			 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2593 			 vsi_name);
2594 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2595 		vsi->promisc_threshold = 0;
2596 	}
2597 
2598 	/* if the VF is not trusted do not do promisc */
2599 	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2600 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2601 		goto out;
2602 	}
2603 
2604 	new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2605 
2606 	/* If we are entering overflow promiscuous, we need to calculate a new
2607 	 * threshold for when we are safe to exit
2608 	 */
2609 	if (!old_overflow && new_overflow)
2610 		vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2611 
2612 	/* check for changes in promiscuous modes */
2613 	if (changed_flags & IFF_ALLMULTI) {
2614 		bool cur_multipromisc;
2615 
2616 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2617 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2618 							       vsi->seid,
2619 							       cur_multipromisc,
2620 							       NULL);
2621 		if (aq_ret) {
2622 			retval = i40e_aq_rc_to_posix(aq_ret,
2623 						     hw->aq.asq_last_status);
2624 			dev_info(&pf->pdev->dev,
2625 				 "set multi promisc failed on %s, err %s aq_err %s\n",
2626 				 vsi_name,
2627 				 i40e_stat_str(hw, aq_ret),
2628 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2629 		} else {
2630 			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2631 				 cur_multipromisc ? "entering" : "leaving");
2632 		}
2633 	}
2634 
2635 	if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2636 		bool cur_promisc;
2637 
2638 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2639 			       new_overflow);
2640 		aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2641 		if (aq_ret) {
2642 			retval = i40e_aq_rc_to_posix(aq_ret,
2643 						     hw->aq.asq_last_status);
2644 			dev_info(&pf->pdev->dev,
2645 				 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2646 				 cur_promisc ? "on" : "off",
2647 				 vsi_name,
2648 				 i40e_stat_str(hw, aq_ret),
2649 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2650 		}
2651 	}
2652 out:
2653 	/* if something went wrong then set the changed flag so we try again */
2654 	if (retval)
2655 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2656 
2657 	clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2658 	return retval;
2659 
2660 err_no_memory:
2661 	/* Restore elements on the temporary add and delete lists */
2662 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2663 err_no_memory_locked:
2664 	i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2665 	i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2666 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2667 
2668 	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2669 	clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2670 	return -ENOMEM;
2671 }
2672 
2673 /**
2674  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2675  * @pf: board private structure
2676  **/
2677 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2678 {
2679 	int v;
2680 
2681 	if (!pf)
2682 		return;
2683 	if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2684 		return;
2685 	if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2686 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2687 		return;
2688 	}
2689 
2690 	for (v = 0; v < pf->num_alloc_vsi; v++) {
2691 		if (pf->vsi[v] &&
2692 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2693 		    !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2694 			int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2695 
2696 			if (ret) {
2697 				/* come back and try again later */
2698 				set_bit(__I40E_MACVLAN_SYNC_PENDING,
2699 					pf->state);
2700 				break;
2701 			}
2702 		}
2703 	}
2704 }
2705 
2706 /**
2707  * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2708  * @vsi: the vsi
2709  **/
2710 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2711 {
2712 	if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2713 		return I40E_RXBUFFER_2048;
2714 	else
2715 		return I40E_RXBUFFER_3072;
2716 }
2717 
2718 /**
2719  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2720  * @netdev: network interface device structure
2721  * @new_mtu: new value for maximum frame size
2722  *
2723  * Returns 0 on success, negative on failure
2724  **/
2725 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2726 {
2727 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2728 	struct i40e_vsi *vsi = np->vsi;
2729 	struct i40e_pf *pf = vsi->back;
2730 
2731 	if (i40e_enabled_xdp_vsi(vsi)) {
2732 		int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2733 
2734 		if (frame_size > i40e_max_xdp_frame_size(vsi))
2735 			return -EINVAL;
2736 	}
2737 
2738 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
2739 		   netdev->mtu, new_mtu);
2740 	netdev->mtu = new_mtu;
2741 	if (netif_running(netdev))
2742 		i40e_vsi_reinit_locked(vsi);
2743 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2744 	set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2745 	return 0;
2746 }
2747 
2748 /**
2749  * i40e_ioctl - Access the hwtstamp interface
2750  * @netdev: network interface device structure
2751  * @ifr: interface request data
2752  * @cmd: ioctl command
2753  **/
2754 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2755 {
2756 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2757 	struct i40e_pf *pf = np->vsi->back;
2758 
2759 	switch (cmd) {
2760 	case SIOCGHWTSTAMP:
2761 		return i40e_ptp_get_ts_config(pf, ifr);
2762 	case SIOCSHWTSTAMP:
2763 		return i40e_ptp_set_ts_config(pf, ifr);
2764 	default:
2765 		return -EOPNOTSUPP;
2766 	}
2767 }
2768 
2769 /**
2770  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2771  * @vsi: the vsi being adjusted
2772  **/
2773 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2774 {
2775 	struct i40e_vsi_context ctxt;
2776 	i40e_status ret;
2777 
2778 	/* Don't modify stripping options if a port VLAN is active */
2779 	if (vsi->info.pvid)
2780 		return;
2781 
2782 	if ((vsi->info.valid_sections &
2783 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2784 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2785 		return;  /* already enabled */
2786 
2787 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2788 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2789 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2790 
2791 	ctxt.seid = vsi->seid;
2792 	ctxt.info = vsi->info;
2793 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2794 	if (ret) {
2795 		dev_info(&vsi->back->pdev->dev,
2796 			 "update vlan stripping failed, err %s aq_err %s\n",
2797 			 i40e_stat_str(&vsi->back->hw, ret),
2798 			 i40e_aq_str(&vsi->back->hw,
2799 				     vsi->back->hw.aq.asq_last_status));
2800 	}
2801 }
2802 
2803 /**
2804  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2805  * @vsi: the vsi being adjusted
2806  **/
2807 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2808 {
2809 	struct i40e_vsi_context ctxt;
2810 	i40e_status ret;
2811 
2812 	/* Don't modify stripping options if a port VLAN is active */
2813 	if (vsi->info.pvid)
2814 		return;
2815 
2816 	if ((vsi->info.valid_sections &
2817 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2818 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2819 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
2820 		return;  /* already disabled */
2821 
2822 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2823 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2824 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2825 
2826 	ctxt.seid = vsi->seid;
2827 	ctxt.info = vsi->info;
2828 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2829 	if (ret) {
2830 		dev_info(&vsi->back->pdev->dev,
2831 			 "update vlan stripping failed, err %s aq_err %s\n",
2832 			 i40e_stat_str(&vsi->back->hw, ret),
2833 			 i40e_aq_str(&vsi->back->hw,
2834 				     vsi->back->hw.aq.asq_last_status));
2835 	}
2836 }
2837 
2838 /**
2839  * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2840  * @vsi: the vsi being configured
2841  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2842  *
2843  * This is a helper function for adding a new MAC/VLAN filter with the
2844  * specified VLAN for each existing MAC address already in the hash table.
2845  * This function does *not* perform any accounting to update filters based on
2846  * VLAN mode.
2847  *
2848  * NOTE: this function expects to be called while under the
2849  * mac_filter_hash_lock
2850  **/
2851 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2852 {
2853 	struct i40e_mac_filter *f, *add_f;
2854 	struct hlist_node *h;
2855 	int bkt;
2856 
2857 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2858 		if (f->state == I40E_FILTER_REMOVE)
2859 			continue;
2860 		add_f = i40e_add_filter(vsi, f->macaddr, vid);
2861 		if (!add_f) {
2862 			dev_info(&vsi->back->pdev->dev,
2863 				 "Could not add vlan filter %d for %pM\n",
2864 				 vid, f->macaddr);
2865 			return -ENOMEM;
2866 		}
2867 	}
2868 
2869 	return 0;
2870 }
2871 
2872 /**
2873  * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2874  * @vsi: the VSI being configured
2875  * @vid: VLAN id to be added
2876  **/
2877 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2878 {
2879 	int err;
2880 
2881 	if (vsi->info.pvid)
2882 		return -EINVAL;
2883 
2884 	/* The network stack will attempt to add VID=0, with the intention to
2885 	 * receive priority tagged packets with a VLAN of 0. Our HW receives
2886 	 * these packets by default when configured to receive untagged
2887 	 * packets, so we don't need to add a filter for this case.
2888 	 * Additionally, HW interprets adding a VID=0 filter as meaning to
2889 	 * receive *only* tagged traffic and stops receiving untagged traffic.
2890 	 * Thus, we do not want to actually add a filter for VID=0
2891 	 */
2892 	if (!vid)
2893 		return 0;
2894 
2895 	/* Locked once because all functions invoked below iterates list*/
2896 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2897 	err = i40e_add_vlan_all_mac(vsi, vid);
2898 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2899 	if (err)
2900 		return err;
2901 
2902 	/* schedule our worker thread which will take care of
2903 	 * applying the new filter changes
2904 	 */
2905 	i40e_service_event_schedule(vsi->back);
2906 	return 0;
2907 }
2908 
2909 /**
2910  * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2911  * @vsi: the vsi being configured
2912  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2913  *
2914  * This function should be used to remove all VLAN filters which match the
2915  * given VID. It does not schedule the service event and does not take the
2916  * mac_filter_hash_lock so it may be combined with other operations under
2917  * a single invocation of the mac_filter_hash_lock.
2918  *
2919  * NOTE: this function expects to be called while under the
2920  * mac_filter_hash_lock
2921  */
2922 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2923 {
2924 	struct i40e_mac_filter *f;
2925 	struct hlist_node *h;
2926 	int bkt;
2927 
2928 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2929 		if (f->vlan == vid)
2930 			__i40e_del_filter(vsi, f);
2931 	}
2932 }
2933 
2934 /**
2935  * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2936  * @vsi: the VSI being configured
2937  * @vid: VLAN id to be removed
2938  **/
2939 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2940 {
2941 	if (!vid || vsi->info.pvid)
2942 		return;
2943 
2944 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2945 	i40e_rm_vlan_all_mac(vsi, vid);
2946 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2947 
2948 	/* schedule our worker thread which will take care of
2949 	 * applying the new filter changes
2950 	 */
2951 	i40e_service_event_schedule(vsi->back);
2952 }
2953 
2954 /**
2955  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2956  * @netdev: network interface to be adjusted
2957  * @proto: unused protocol value
2958  * @vid: vlan id to be added
2959  *
2960  * net_device_ops implementation for adding vlan ids
2961  **/
2962 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2963 				__always_unused __be16 proto, u16 vid)
2964 {
2965 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2966 	struct i40e_vsi *vsi = np->vsi;
2967 	int ret = 0;
2968 
2969 	if (vid >= VLAN_N_VID)
2970 		return -EINVAL;
2971 
2972 	ret = i40e_vsi_add_vlan(vsi, vid);
2973 	if (!ret)
2974 		set_bit(vid, vsi->active_vlans);
2975 
2976 	return ret;
2977 }
2978 
2979 /**
2980  * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2981  * @netdev: network interface to be adjusted
2982  * @proto: unused protocol value
2983  * @vid: vlan id to be added
2984  **/
2985 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2986 				    __always_unused __be16 proto, u16 vid)
2987 {
2988 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2989 	struct i40e_vsi *vsi = np->vsi;
2990 
2991 	if (vid >= VLAN_N_VID)
2992 		return;
2993 	set_bit(vid, vsi->active_vlans);
2994 }
2995 
2996 /**
2997  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2998  * @netdev: network interface to be adjusted
2999  * @proto: unused protocol value
3000  * @vid: vlan id to be removed
3001  *
3002  * net_device_ops implementation for removing vlan ids
3003  **/
3004 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3005 				 __always_unused __be16 proto, u16 vid)
3006 {
3007 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3008 	struct i40e_vsi *vsi = np->vsi;
3009 
3010 	/* return code is ignored as there is nothing a user
3011 	 * can do about failure to remove and a log message was
3012 	 * already printed from the other function
3013 	 */
3014 	i40e_vsi_kill_vlan(vsi, vid);
3015 
3016 	clear_bit(vid, vsi->active_vlans);
3017 
3018 	return 0;
3019 }
3020 
3021 /**
3022  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3023  * @vsi: the vsi being brought back up
3024  **/
3025 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3026 {
3027 	u16 vid;
3028 
3029 	if (!vsi->netdev)
3030 		return;
3031 
3032 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3033 		i40e_vlan_stripping_enable(vsi);
3034 	else
3035 		i40e_vlan_stripping_disable(vsi);
3036 
3037 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3038 		i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3039 					vid);
3040 }
3041 
3042 /**
3043  * i40e_vsi_add_pvid - Add pvid for the VSI
3044  * @vsi: the vsi being adjusted
3045  * @vid: the vlan id to set as a PVID
3046  **/
3047 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3048 {
3049 	struct i40e_vsi_context ctxt;
3050 	i40e_status ret;
3051 
3052 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3053 	vsi->info.pvid = cpu_to_le16(vid);
3054 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3055 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
3056 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
3057 
3058 	ctxt.seid = vsi->seid;
3059 	ctxt.info = vsi->info;
3060 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3061 	if (ret) {
3062 		dev_info(&vsi->back->pdev->dev,
3063 			 "add pvid failed, err %s aq_err %s\n",
3064 			 i40e_stat_str(&vsi->back->hw, ret),
3065 			 i40e_aq_str(&vsi->back->hw,
3066 				     vsi->back->hw.aq.asq_last_status));
3067 		return -ENOENT;
3068 	}
3069 
3070 	return 0;
3071 }
3072 
3073 /**
3074  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3075  * @vsi: the vsi being adjusted
3076  *
3077  * Just use the vlan_rx_register() service to put it back to normal
3078  **/
3079 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3080 {
3081 	vsi->info.pvid = 0;
3082 
3083 	i40e_vlan_stripping_disable(vsi);
3084 }
3085 
3086 /**
3087  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3088  * @vsi: ptr to the VSI
3089  *
3090  * If this function returns with an error, then it's possible one or
3091  * more of the rings is populated (while the rest are not).  It is the
3092  * callers duty to clean those orphaned rings.
3093  *
3094  * Return 0 on success, negative on failure
3095  **/
3096 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3097 {
3098 	int i, err = 0;
3099 
3100 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3101 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3102 
3103 	if (!i40e_enabled_xdp_vsi(vsi))
3104 		return err;
3105 
3106 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3107 		err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3108 
3109 	return err;
3110 }
3111 
3112 /**
3113  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3114  * @vsi: ptr to the VSI
3115  *
3116  * Free VSI's transmit software resources
3117  **/
3118 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3119 {
3120 	int i;
3121 
3122 	if (vsi->tx_rings) {
3123 		for (i = 0; i < vsi->num_queue_pairs; i++)
3124 			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3125 				i40e_free_tx_resources(vsi->tx_rings[i]);
3126 	}
3127 
3128 	if (vsi->xdp_rings) {
3129 		for (i = 0; i < vsi->num_queue_pairs; i++)
3130 			if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3131 				i40e_free_tx_resources(vsi->xdp_rings[i]);
3132 	}
3133 }
3134 
3135 /**
3136  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3137  * @vsi: ptr to the VSI
3138  *
3139  * If this function returns with an error, then it's possible one or
3140  * more of the rings is populated (while the rest are not).  It is the
3141  * callers duty to clean those orphaned rings.
3142  *
3143  * Return 0 on success, negative on failure
3144  **/
3145 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3146 {
3147 	int i, err = 0;
3148 
3149 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3150 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3151 	return err;
3152 }
3153 
3154 /**
3155  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3156  * @vsi: ptr to the VSI
3157  *
3158  * Free all receive software resources
3159  **/
3160 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3161 {
3162 	int i;
3163 
3164 	if (!vsi->rx_rings)
3165 		return;
3166 
3167 	for (i = 0; i < vsi->num_queue_pairs; i++)
3168 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3169 			i40e_free_rx_resources(vsi->rx_rings[i]);
3170 }
3171 
3172 /**
3173  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3174  * @ring: The Tx ring to configure
3175  *
3176  * This enables/disables XPS for a given Tx descriptor ring
3177  * based on the TCs enabled for the VSI that ring belongs to.
3178  **/
3179 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3180 {
3181 	int cpu;
3182 
3183 	if (!ring->q_vector || !ring->netdev || ring->ch)
3184 		return;
3185 
3186 	/* We only initialize XPS once, so as not to overwrite user settings */
3187 	if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3188 		return;
3189 
3190 	cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3191 	netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3192 			    ring->queue_index);
3193 }
3194 
3195 /**
3196  * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3197  * @ring: The Tx or Rx ring
3198  *
3199  * Returns the AF_XDP buffer pool or NULL.
3200  **/
3201 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3202 {
3203 	bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3204 	int qid = ring->queue_index;
3205 
3206 	if (ring_is_xdp(ring))
3207 		qid -= ring->vsi->alloc_queue_pairs;
3208 
3209 	if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3210 		return NULL;
3211 
3212 	return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3213 }
3214 
3215 /**
3216  * i40e_configure_tx_ring - Configure a transmit ring context and rest
3217  * @ring: The Tx ring to configure
3218  *
3219  * Configure the Tx descriptor ring in the HMC context.
3220  **/
3221 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3222 {
3223 	struct i40e_vsi *vsi = ring->vsi;
3224 	u16 pf_q = vsi->base_queue + ring->queue_index;
3225 	struct i40e_hw *hw = &vsi->back->hw;
3226 	struct i40e_hmc_obj_txq tx_ctx;
3227 	i40e_status err = 0;
3228 	u32 qtx_ctl = 0;
3229 
3230 	if (ring_is_xdp(ring))
3231 		ring->xsk_pool = i40e_xsk_pool(ring);
3232 
3233 	/* some ATR related tx ring init */
3234 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3235 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
3236 		ring->atr_count = 0;
3237 	} else {
3238 		ring->atr_sample_rate = 0;
3239 	}
3240 
3241 	/* configure XPS */
3242 	i40e_config_xps_tx_ring(ring);
3243 
3244 	/* clear the context structure first */
3245 	memset(&tx_ctx, 0, sizeof(tx_ctx));
3246 
3247 	tx_ctx.new_context = 1;
3248 	tx_ctx.base = (ring->dma / 128);
3249 	tx_ctx.qlen = ring->count;
3250 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3251 					       I40E_FLAG_FD_ATR_ENABLED));
3252 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3253 	/* FDIR VSI tx ring can still use RS bit and writebacks */
3254 	if (vsi->type != I40E_VSI_FDIR)
3255 		tx_ctx.head_wb_ena = 1;
3256 	tx_ctx.head_wb_addr = ring->dma +
3257 			      (ring->count * sizeof(struct i40e_tx_desc));
3258 
3259 	/* As part of VSI creation/update, FW allocates certain
3260 	 * Tx arbitration queue sets for each TC enabled for
3261 	 * the VSI. The FW returns the handles to these queue
3262 	 * sets as part of the response buffer to Add VSI,
3263 	 * Update VSI, etc. AQ commands. It is expected that
3264 	 * these queue set handles be associated with the Tx
3265 	 * queues by the driver as part of the TX queue context
3266 	 * initialization. This has to be done regardless of
3267 	 * DCB as by default everything is mapped to TC0.
3268 	 */
3269 
3270 	if (ring->ch)
3271 		tx_ctx.rdylist =
3272 			le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3273 
3274 	else
3275 		tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3276 
3277 	tx_ctx.rdylist_act = 0;
3278 
3279 	/* clear the context in the HMC */
3280 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3281 	if (err) {
3282 		dev_info(&vsi->back->pdev->dev,
3283 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3284 			 ring->queue_index, pf_q, err);
3285 		return -ENOMEM;
3286 	}
3287 
3288 	/* set the context in the HMC */
3289 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3290 	if (err) {
3291 		dev_info(&vsi->back->pdev->dev,
3292 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3293 			 ring->queue_index, pf_q, err);
3294 		return -ENOMEM;
3295 	}
3296 
3297 	/* Now associate this queue with this PCI function */
3298 	if (ring->ch) {
3299 		if (ring->ch->type == I40E_VSI_VMDQ2)
3300 			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3301 		else
3302 			return -EINVAL;
3303 
3304 		qtx_ctl |= (ring->ch->vsi_number <<
3305 			    I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3306 			    I40E_QTX_CTL_VFVM_INDX_MASK;
3307 	} else {
3308 		if (vsi->type == I40E_VSI_VMDQ2) {
3309 			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3310 			qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3311 				    I40E_QTX_CTL_VFVM_INDX_MASK;
3312 		} else {
3313 			qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3314 		}
3315 	}
3316 
3317 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3318 		    I40E_QTX_CTL_PF_INDX_MASK);
3319 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3320 	i40e_flush(hw);
3321 
3322 	/* cache tail off for easier writes later */
3323 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3324 
3325 	return 0;
3326 }
3327 
3328 /**
3329  * i40e_rx_offset - Return expected offset into page to access data
3330  * @rx_ring: Ring we are requesting offset of
3331  *
3332  * Returns the offset value for ring into the data buffer.
3333  */
3334 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3335 {
3336 	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3337 }
3338 
3339 /**
3340  * i40e_configure_rx_ring - Configure a receive ring context
3341  * @ring: The Rx ring to configure
3342  *
3343  * Configure the Rx descriptor ring in the HMC context.
3344  **/
3345 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3346 {
3347 	struct i40e_vsi *vsi = ring->vsi;
3348 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3349 	u16 pf_q = vsi->base_queue + ring->queue_index;
3350 	struct i40e_hw *hw = &vsi->back->hw;
3351 	struct i40e_hmc_obj_rxq rx_ctx;
3352 	i40e_status err = 0;
3353 	bool ok;
3354 	int ret;
3355 
3356 	bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3357 
3358 	/* clear the context structure first */
3359 	memset(&rx_ctx, 0, sizeof(rx_ctx));
3360 
3361 	if (ring->vsi->type == I40E_VSI_MAIN)
3362 		xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3363 
3364 	kfree(ring->rx_bi);
3365 	ring->xsk_pool = i40e_xsk_pool(ring);
3366 	if (ring->xsk_pool) {
3367 		ret = i40e_alloc_rx_bi_zc(ring);
3368 		if (ret)
3369 			return ret;
3370 		ring->rx_buf_len =
3371 		  xsk_pool_get_rx_frame_size(ring->xsk_pool);
3372 		/* For AF_XDP ZC, we disallow packets to span on
3373 		 * multiple buffers, thus letting us skip that
3374 		 * handling in the fast-path.
3375 		 */
3376 		chain_len = 1;
3377 		ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3378 						 MEM_TYPE_XSK_BUFF_POOL,
3379 						 NULL);
3380 		if (ret)
3381 			return ret;
3382 		dev_info(&vsi->back->pdev->dev,
3383 			 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3384 			 ring->queue_index);
3385 
3386 	} else {
3387 		ret = i40e_alloc_rx_bi(ring);
3388 		if (ret)
3389 			return ret;
3390 		ring->rx_buf_len = vsi->rx_buf_len;
3391 		if (ring->vsi->type == I40E_VSI_MAIN) {
3392 			ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3393 							 MEM_TYPE_PAGE_SHARED,
3394 							 NULL);
3395 			if (ret)
3396 				return ret;
3397 		}
3398 	}
3399 
3400 	rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3401 				    BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3402 
3403 	rx_ctx.base = (ring->dma / 128);
3404 	rx_ctx.qlen = ring->count;
3405 
3406 	/* use 16 byte descriptors */
3407 	rx_ctx.dsize = 0;
3408 
3409 	/* descriptor type is always zero
3410 	 * rx_ctx.dtype = 0;
3411 	 */
3412 	rx_ctx.hsplit_0 = 0;
3413 
3414 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3415 	if (hw->revision_id == 0)
3416 		rx_ctx.lrxqthresh = 0;
3417 	else
3418 		rx_ctx.lrxqthresh = 1;
3419 	rx_ctx.crcstrip = 1;
3420 	rx_ctx.l2tsel = 1;
3421 	/* this controls whether VLAN is stripped from inner headers */
3422 	rx_ctx.showiv = 0;
3423 	/* set the prefena field to 1 because the manual says to */
3424 	rx_ctx.prefena = 1;
3425 
3426 	/* clear the context in the HMC */
3427 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3428 	if (err) {
3429 		dev_info(&vsi->back->pdev->dev,
3430 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3431 			 ring->queue_index, pf_q, err);
3432 		return -ENOMEM;
3433 	}
3434 
3435 	/* set the context in the HMC */
3436 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3437 	if (err) {
3438 		dev_info(&vsi->back->pdev->dev,
3439 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3440 			 ring->queue_index, pf_q, err);
3441 		return -ENOMEM;
3442 	}
3443 
3444 	/* configure Rx buffer alignment */
3445 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3446 		clear_ring_build_skb_enabled(ring);
3447 	else
3448 		set_ring_build_skb_enabled(ring);
3449 
3450 	ring->rx_offset = i40e_rx_offset(ring);
3451 
3452 	/* cache tail for quicker writes, and clear the reg before use */
3453 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3454 	writel(0, ring->tail);
3455 
3456 	if (ring->xsk_pool) {
3457 		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3458 		ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3459 	} else {
3460 		ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3461 	}
3462 	if (!ok) {
3463 		/* Log this in case the user has forgotten to give the kernel
3464 		 * any buffers, even later in the application.
3465 		 */
3466 		dev_info(&vsi->back->pdev->dev,
3467 			 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3468 			 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3469 			 ring->queue_index, pf_q);
3470 	}
3471 
3472 	return 0;
3473 }
3474 
3475 /**
3476  * i40e_vsi_configure_tx - Configure the VSI for Tx
3477  * @vsi: VSI structure describing this set of rings and resources
3478  *
3479  * Configure the Tx VSI for operation.
3480  **/
3481 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3482 {
3483 	int err = 0;
3484 	u16 i;
3485 
3486 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3487 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3488 
3489 	if (err || !i40e_enabled_xdp_vsi(vsi))
3490 		return err;
3491 
3492 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3493 		err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3494 
3495 	return err;
3496 }
3497 
3498 /**
3499  * i40e_vsi_configure_rx - Configure the VSI for Rx
3500  * @vsi: the VSI being configured
3501  *
3502  * Configure the Rx VSI for operation.
3503  **/
3504 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3505 {
3506 	int err = 0;
3507 	u16 i;
3508 
3509 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3510 		vsi->max_frame = I40E_MAX_RXBUFFER;
3511 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
3512 #if (PAGE_SIZE < 8192)
3513 	} else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3514 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3515 		vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3516 		vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3517 #endif
3518 	} else {
3519 		vsi->max_frame = I40E_MAX_RXBUFFER;
3520 		vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3521 						       I40E_RXBUFFER_2048;
3522 	}
3523 
3524 	/* set up individual rings */
3525 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3526 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3527 
3528 	return err;
3529 }
3530 
3531 /**
3532  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3533  * @vsi: ptr to the VSI
3534  **/
3535 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3536 {
3537 	struct i40e_ring *tx_ring, *rx_ring;
3538 	u16 qoffset, qcount;
3539 	int i, n;
3540 
3541 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3542 		/* Reset the TC information */
3543 		for (i = 0; i < vsi->num_queue_pairs; i++) {
3544 			rx_ring = vsi->rx_rings[i];
3545 			tx_ring = vsi->tx_rings[i];
3546 			rx_ring->dcb_tc = 0;
3547 			tx_ring->dcb_tc = 0;
3548 		}
3549 		return;
3550 	}
3551 
3552 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3553 		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3554 			continue;
3555 
3556 		qoffset = vsi->tc_config.tc_info[n].qoffset;
3557 		qcount = vsi->tc_config.tc_info[n].qcount;
3558 		for (i = qoffset; i < (qoffset + qcount); i++) {
3559 			rx_ring = vsi->rx_rings[i];
3560 			tx_ring = vsi->tx_rings[i];
3561 			rx_ring->dcb_tc = n;
3562 			tx_ring->dcb_tc = n;
3563 		}
3564 	}
3565 }
3566 
3567 /**
3568  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3569  * @vsi: ptr to the VSI
3570  **/
3571 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3572 {
3573 	if (vsi->netdev)
3574 		i40e_set_rx_mode(vsi->netdev);
3575 }
3576 
3577 /**
3578  * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3579  * @pf: Pointer to the targeted PF
3580  *
3581  * Set all flow director counters to 0.
3582  */
3583 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3584 {
3585 	pf->fd_tcp4_filter_cnt = 0;
3586 	pf->fd_udp4_filter_cnt = 0;
3587 	pf->fd_sctp4_filter_cnt = 0;
3588 	pf->fd_ip4_filter_cnt = 0;
3589 	pf->fd_tcp6_filter_cnt = 0;
3590 	pf->fd_udp6_filter_cnt = 0;
3591 	pf->fd_sctp6_filter_cnt = 0;
3592 	pf->fd_ip6_filter_cnt = 0;
3593 }
3594 
3595 /**
3596  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3597  * @vsi: Pointer to the targeted VSI
3598  *
3599  * This function replays the hlist on the hw where all the SB Flow Director
3600  * filters were saved.
3601  **/
3602 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3603 {
3604 	struct i40e_fdir_filter *filter;
3605 	struct i40e_pf *pf = vsi->back;
3606 	struct hlist_node *node;
3607 
3608 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3609 		return;
3610 
3611 	/* Reset FDir counters as we're replaying all existing filters */
3612 	i40e_reset_fdir_filter_cnt(pf);
3613 
3614 	hlist_for_each_entry_safe(filter, node,
3615 				  &pf->fdir_filter_list, fdir_node) {
3616 		i40e_add_del_fdir(vsi, filter, true);
3617 	}
3618 }
3619 
3620 /**
3621  * i40e_vsi_configure - Set up the VSI for action
3622  * @vsi: the VSI being configured
3623  **/
3624 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3625 {
3626 	int err;
3627 
3628 	i40e_set_vsi_rx_mode(vsi);
3629 	i40e_restore_vlan(vsi);
3630 	i40e_vsi_config_dcb_rings(vsi);
3631 	err = i40e_vsi_configure_tx(vsi);
3632 	if (!err)
3633 		err = i40e_vsi_configure_rx(vsi);
3634 
3635 	return err;
3636 }
3637 
3638 /**
3639  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3640  * @vsi: the VSI being configured
3641  **/
3642 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3643 {
3644 	bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3645 	struct i40e_pf *pf = vsi->back;
3646 	struct i40e_hw *hw = &pf->hw;
3647 	u16 vector;
3648 	int i, q;
3649 	u32 qp;
3650 
3651 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
3652 	 * and PFINT_LNKLSTn registers, e.g.:
3653 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3654 	 */
3655 	qp = vsi->base_queue;
3656 	vector = vsi->base_vector;
3657 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3658 		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3659 
3660 		q_vector->rx.next_update = jiffies + 1;
3661 		q_vector->rx.target_itr =
3662 			ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3663 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3664 		     q_vector->rx.target_itr >> 1);
3665 		q_vector->rx.current_itr = q_vector->rx.target_itr;
3666 
3667 		q_vector->tx.next_update = jiffies + 1;
3668 		q_vector->tx.target_itr =
3669 			ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3670 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3671 		     q_vector->tx.target_itr >> 1);
3672 		q_vector->tx.current_itr = q_vector->tx.target_itr;
3673 
3674 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
3675 		     i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3676 
3677 		/* Linked list for the queuepairs assigned to this vector */
3678 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3679 		for (q = 0; q < q_vector->num_ringpairs; q++) {
3680 			u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3681 			u32 val;
3682 
3683 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3684 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3685 			      (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3686 			      (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3687 			      (I40E_QUEUE_TYPE_TX <<
3688 			       I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3689 
3690 			wr32(hw, I40E_QINT_RQCTL(qp), val);
3691 
3692 			if (has_xdp) {
3693 				val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3694 				      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3695 				      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3696 				      (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3697 				      (I40E_QUEUE_TYPE_TX <<
3698 				       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3699 
3700 				wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3701 			}
3702 
3703 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3704 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3705 			      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3706 			      ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3707 			      (I40E_QUEUE_TYPE_RX <<
3708 			       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3709 
3710 			/* Terminate the linked list */
3711 			if (q == (q_vector->num_ringpairs - 1))
3712 				val |= (I40E_QUEUE_END_OF_LIST <<
3713 					I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3714 
3715 			wr32(hw, I40E_QINT_TQCTL(qp), val);
3716 			qp++;
3717 		}
3718 	}
3719 
3720 	i40e_flush(hw);
3721 }
3722 
3723 /**
3724  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3725  * @pf: pointer to private device data structure
3726  **/
3727 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3728 {
3729 	struct i40e_hw *hw = &pf->hw;
3730 	u32 val;
3731 
3732 	/* clear things first */
3733 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3734 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3735 
3736 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3737 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3738 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
3739 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3740 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3741 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3742 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3743 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3744 
3745 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3746 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3747 
3748 	if (pf->flags & I40E_FLAG_PTP)
3749 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3750 
3751 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
3752 
3753 	/* SW_ITR_IDX = 0, but don't change INTENA */
3754 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3755 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3756 
3757 	/* OTHER_ITR_IDX = 0 */
3758 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3759 }
3760 
3761 /**
3762  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3763  * @vsi: the VSI being configured
3764  **/
3765 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3766 {
3767 	u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3768 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3769 	struct i40e_pf *pf = vsi->back;
3770 	struct i40e_hw *hw = &pf->hw;
3771 	u32 val;
3772 
3773 	/* set the ITR configuration */
3774 	q_vector->rx.next_update = jiffies + 1;
3775 	q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3776 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3777 	q_vector->rx.current_itr = q_vector->rx.target_itr;
3778 	q_vector->tx.next_update = jiffies + 1;
3779 	q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3780 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3781 	q_vector->tx.current_itr = q_vector->tx.target_itr;
3782 
3783 	i40e_enable_misc_int_causes(pf);
3784 
3785 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3786 	wr32(hw, I40E_PFINT_LNKLST0, 0);
3787 
3788 	/* Associate the queue pair to the vector and enable the queue int */
3789 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		       |
3790 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3791 	      (nextqp	   << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3792 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3793 
3794 	wr32(hw, I40E_QINT_RQCTL(0), val);
3795 
3796 	if (i40e_enabled_xdp_vsi(vsi)) {
3797 		val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		     |
3798 		      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3799 		      (I40E_QUEUE_TYPE_TX
3800 		       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3801 
3802 		wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3803 	}
3804 
3805 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
3806 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3807 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3808 
3809 	wr32(hw, I40E_QINT_TQCTL(0), val);
3810 	i40e_flush(hw);
3811 }
3812 
3813 /**
3814  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3815  * @pf: board private structure
3816  **/
3817 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3818 {
3819 	struct i40e_hw *hw = &pf->hw;
3820 
3821 	wr32(hw, I40E_PFINT_DYN_CTL0,
3822 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3823 	i40e_flush(hw);
3824 }
3825 
3826 /**
3827  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3828  * @pf: board private structure
3829  **/
3830 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3831 {
3832 	struct i40e_hw *hw = &pf->hw;
3833 	u32 val;
3834 
3835 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3836 	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3837 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3838 
3839 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
3840 	i40e_flush(hw);
3841 }
3842 
3843 /**
3844  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3845  * @irq: interrupt number
3846  * @data: pointer to a q_vector
3847  **/
3848 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3849 {
3850 	struct i40e_q_vector *q_vector = data;
3851 
3852 	if (!q_vector->tx.ring && !q_vector->rx.ring)
3853 		return IRQ_HANDLED;
3854 
3855 	napi_schedule_irqoff(&q_vector->napi);
3856 
3857 	return IRQ_HANDLED;
3858 }
3859 
3860 /**
3861  * i40e_irq_affinity_notify - Callback for affinity changes
3862  * @notify: context as to what irq was changed
3863  * @mask: the new affinity mask
3864  *
3865  * This is a callback function used by the irq_set_affinity_notifier function
3866  * so that we may register to receive changes to the irq affinity masks.
3867  **/
3868 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3869 				     const cpumask_t *mask)
3870 {
3871 	struct i40e_q_vector *q_vector =
3872 		container_of(notify, struct i40e_q_vector, affinity_notify);
3873 
3874 	cpumask_copy(&q_vector->affinity_mask, mask);
3875 }
3876 
3877 /**
3878  * i40e_irq_affinity_release - Callback for affinity notifier release
3879  * @ref: internal core kernel usage
3880  *
3881  * This is a callback function used by the irq_set_affinity_notifier function
3882  * to inform the current notification subscriber that they will no longer
3883  * receive notifications.
3884  **/
3885 static void i40e_irq_affinity_release(struct kref *ref) {}
3886 
3887 /**
3888  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3889  * @vsi: the VSI being configured
3890  * @basename: name for the vector
3891  *
3892  * Allocates MSI-X vectors and requests interrupts from the kernel.
3893  **/
3894 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3895 {
3896 	int q_vectors = vsi->num_q_vectors;
3897 	struct i40e_pf *pf = vsi->back;
3898 	int base = vsi->base_vector;
3899 	int rx_int_idx = 0;
3900 	int tx_int_idx = 0;
3901 	int vector, err;
3902 	int irq_num;
3903 	int cpu;
3904 
3905 	for (vector = 0; vector < q_vectors; vector++) {
3906 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3907 
3908 		irq_num = pf->msix_entries[base + vector].vector;
3909 
3910 		if (q_vector->tx.ring && q_vector->rx.ring) {
3911 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3912 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3913 			tx_int_idx++;
3914 		} else if (q_vector->rx.ring) {
3915 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3916 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3917 		} else if (q_vector->tx.ring) {
3918 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3919 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3920 		} else {
3921 			/* skip this unused q_vector */
3922 			continue;
3923 		}
3924 		err = request_irq(irq_num,
3925 				  vsi->irq_handler,
3926 				  0,
3927 				  q_vector->name,
3928 				  q_vector);
3929 		if (err) {
3930 			dev_info(&pf->pdev->dev,
3931 				 "MSIX request_irq failed, error: %d\n", err);
3932 			goto free_queue_irqs;
3933 		}
3934 
3935 		/* register for affinity change notifications */
3936 		q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3937 		q_vector->affinity_notify.release = i40e_irq_affinity_release;
3938 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3939 		/* Spread affinity hints out across online CPUs.
3940 		 *
3941 		 * get_cpu_mask returns a static constant mask with
3942 		 * a permanent lifetime so it's ok to pass to
3943 		 * irq_update_affinity_hint without making a copy.
3944 		 */
3945 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
3946 		irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
3947 	}
3948 
3949 	vsi->irqs_ready = true;
3950 	return 0;
3951 
3952 free_queue_irqs:
3953 	while (vector) {
3954 		vector--;
3955 		irq_num = pf->msix_entries[base + vector].vector;
3956 		irq_set_affinity_notifier(irq_num, NULL);
3957 		irq_update_affinity_hint(irq_num, NULL);
3958 		free_irq(irq_num, &vsi->q_vectors[vector]);
3959 	}
3960 	return err;
3961 }
3962 
3963 /**
3964  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3965  * @vsi: the VSI being un-configured
3966  **/
3967 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3968 {
3969 	struct i40e_pf *pf = vsi->back;
3970 	struct i40e_hw *hw = &pf->hw;
3971 	int base = vsi->base_vector;
3972 	int i;
3973 
3974 	/* disable interrupt causation from each queue */
3975 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3976 		u32 val;
3977 
3978 		val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3979 		val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3980 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3981 
3982 		val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3983 		val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3984 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3985 
3986 		if (!i40e_enabled_xdp_vsi(vsi))
3987 			continue;
3988 		wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3989 	}
3990 
3991 	/* disable each interrupt */
3992 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3993 		for (i = vsi->base_vector;
3994 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3995 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3996 
3997 		i40e_flush(hw);
3998 		for (i = 0; i < vsi->num_q_vectors; i++)
3999 			synchronize_irq(pf->msix_entries[i + base].vector);
4000 	} else {
4001 		/* Legacy and MSI mode - this stops all interrupt handling */
4002 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4003 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4004 		i40e_flush(hw);
4005 		synchronize_irq(pf->pdev->irq);
4006 	}
4007 }
4008 
4009 /**
4010  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4011  * @vsi: the VSI being configured
4012  **/
4013 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4014 {
4015 	struct i40e_pf *pf = vsi->back;
4016 	int i;
4017 
4018 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4019 		for (i = 0; i < vsi->num_q_vectors; i++)
4020 			i40e_irq_dynamic_enable(vsi, i);
4021 	} else {
4022 		i40e_irq_dynamic_enable_icr0(pf);
4023 	}
4024 
4025 	i40e_flush(&pf->hw);
4026 	return 0;
4027 }
4028 
4029 /**
4030  * i40e_free_misc_vector - Free the vector that handles non-queue events
4031  * @pf: board private structure
4032  **/
4033 static void i40e_free_misc_vector(struct i40e_pf *pf)
4034 {
4035 	/* Disable ICR 0 */
4036 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4037 	i40e_flush(&pf->hw);
4038 
4039 	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4040 		synchronize_irq(pf->msix_entries[0].vector);
4041 		free_irq(pf->msix_entries[0].vector, pf);
4042 		clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4043 	}
4044 }
4045 
4046 /**
4047  * i40e_intr - MSI/Legacy and non-queue interrupt handler
4048  * @irq: interrupt number
4049  * @data: pointer to a q_vector
4050  *
4051  * This is the handler used for all MSI/Legacy interrupts, and deals
4052  * with both queue and non-queue interrupts.  This is also used in
4053  * MSIX mode to handle the non-queue interrupts.
4054  **/
4055 static irqreturn_t i40e_intr(int irq, void *data)
4056 {
4057 	struct i40e_pf *pf = (struct i40e_pf *)data;
4058 	struct i40e_hw *hw = &pf->hw;
4059 	irqreturn_t ret = IRQ_NONE;
4060 	u32 icr0, icr0_remaining;
4061 	u32 val, ena_mask;
4062 
4063 	icr0 = rd32(hw, I40E_PFINT_ICR0);
4064 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4065 
4066 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
4067 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4068 		goto enable_intr;
4069 
4070 	/* if interrupt but no bits showing, must be SWINT */
4071 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4072 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4073 		pf->sw_int_count++;
4074 
4075 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4076 	    (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4077 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4078 		dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4079 		set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4080 	}
4081 
4082 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4083 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4084 		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4085 		struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4086 
4087 		/* We do not have a way to disarm Queue causes while leaving
4088 		 * interrupt enabled for all other causes, ideally
4089 		 * interrupt should be disabled while we are in NAPI but
4090 		 * this is not a performance path and napi_schedule()
4091 		 * can deal with rescheduling.
4092 		 */
4093 		if (!test_bit(__I40E_DOWN, pf->state))
4094 			napi_schedule_irqoff(&q_vector->napi);
4095 	}
4096 
4097 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4098 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4099 		set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4100 		i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4101 	}
4102 
4103 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4104 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4105 		set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4106 	}
4107 
4108 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4109 		/* disable any further VFLR event notifications */
4110 		if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4111 			u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4112 
4113 			reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4114 			wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4115 		} else {
4116 			ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4117 			set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4118 		}
4119 	}
4120 
4121 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4122 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4123 			set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4124 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4125 		val = rd32(hw, I40E_GLGEN_RSTAT);
4126 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4127 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4128 		if (val == I40E_RESET_CORER) {
4129 			pf->corer_count++;
4130 		} else if (val == I40E_RESET_GLOBR) {
4131 			pf->globr_count++;
4132 		} else if (val == I40E_RESET_EMPR) {
4133 			pf->empr_count++;
4134 			set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4135 		}
4136 	}
4137 
4138 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4139 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4140 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4141 		dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4142 			 rd32(hw, I40E_PFHMC_ERRORINFO),
4143 			 rd32(hw, I40E_PFHMC_ERRORDATA));
4144 	}
4145 
4146 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4147 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4148 
4149 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4150 			schedule_work(&pf->ptp_extts0_work);
4151 
4152 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4153 			i40e_ptp_tx_hwtstamp(pf);
4154 
4155 		icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4156 	}
4157 
4158 	/* If a critical error is pending we have no choice but to reset the
4159 	 * device.
4160 	 * Report and mask out any remaining unexpected interrupts.
4161 	 */
4162 	icr0_remaining = icr0 & ena_mask;
4163 	if (icr0_remaining) {
4164 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4165 			 icr0_remaining);
4166 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4167 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4168 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4169 			dev_info(&pf->pdev->dev, "device will be reset\n");
4170 			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4171 			i40e_service_event_schedule(pf);
4172 		}
4173 		ena_mask &= ~icr0_remaining;
4174 	}
4175 	ret = IRQ_HANDLED;
4176 
4177 enable_intr:
4178 	/* re-enable interrupt causes */
4179 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4180 	if (!test_bit(__I40E_DOWN, pf->state) ||
4181 	    test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4182 		i40e_service_event_schedule(pf);
4183 		i40e_irq_dynamic_enable_icr0(pf);
4184 	}
4185 
4186 	return ret;
4187 }
4188 
4189 /**
4190  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4191  * @tx_ring:  tx ring to clean
4192  * @budget:   how many cleans we're allowed
4193  *
4194  * Returns true if there's any budget left (e.g. the clean is finished)
4195  **/
4196 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4197 {
4198 	struct i40e_vsi *vsi = tx_ring->vsi;
4199 	u16 i = tx_ring->next_to_clean;
4200 	struct i40e_tx_buffer *tx_buf;
4201 	struct i40e_tx_desc *tx_desc;
4202 
4203 	tx_buf = &tx_ring->tx_bi[i];
4204 	tx_desc = I40E_TX_DESC(tx_ring, i);
4205 	i -= tx_ring->count;
4206 
4207 	do {
4208 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4209 
4210 		/* if next_to_watch is not set then there is no work pending */
4211 		if (!eop_desc)
4212 			break;
4213 
4214 		/* prevent any other reads prior to eop_desc */
4215 		smp_rmb();
4216 
4217 		/* if the descriptor isn't done, no work yet to do */
4218 		if (!(eop_desc->cmd_type_offset_bsz &
4219 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4220 			break;
4221 
4222 		/* clear next_to_watch to prevent false hangs */
4223 		tx_buf->next_to_watch = NULL;
4224 
4225 		tx_desc->buffer_addr = 0;
4226 		tx_desc->cmd_type_offset_bsz = 0;
4227 		/* move past filter desc */
4228 		tx_buf++;
4229 		tx_desc++;
4230 		i++;
4231 		if (unlikely(!i)) {
4232 			i -= tx_ring->count;
4233 			tx_buf = tx_ring->tx_bi;
4234 			tx_desc = I40E_TX_DESC(tx_ring, 0);
4235 		}
4236 		/* unmap skb header data */
4237 		dma_unmap_single(tx_ring->dev,
4238 				 dma_unmap_addr(tx_buf, dma),
4239 				 dma_unmap_len(tx_buf, len),
4240 				 DMA_TO_DEVICE);
4241 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4242 			kfree(tx_buf->raw_buf);
4243 
4244 		tx_buf->raw_buf = NULL;
4245 		tx_buf->tx_flags = 0;
4246 		tx_buf->next_to_watch = NULL;
4247 		dma_unmap_len_set(tx_buf, len, 0);
4248 		tx_desc->buffer_addr = 0;
4249 		tx_desc->cmd_type_offset_bsz = 0;
4250 
4251 		/* move us past the eop_desc for start of next FD desc */
4252 		tx_buf++;
4253 		tx_desc++;
4254 		i++;
4255 		if (unlikely(!i)) {
4256 			i -= tx_ring->count;
4257 			tx_buf = tx_ring->tx_bi;
4258 			tx_desc = I40E_TX_DESC(tx_ring, 0);
4259 		}
4260 
4261 		/* update budget accounting */
4262 		budget--;
4263 	} while (likely(budget));
4264 
4265 	i += tx_ring->count;
4266 	tx_ring->next_to_clean = i;
4267 
4268 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4269 		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4270 
4271 	return budget > 0;
4272 }
4273 
4274 /**
4275  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4276  * @irq: interrupt number
4277  * @data: pointer to a q_vector
4278  **/
4279 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4280 {
4281 	struct i40e_q_vector *q_vector = data;
4282 	struct i40e_vsi *vsi;
4283 
4284 	if (!q_vector->tx.ring)
4285 		return IRQ_HANDLED;
4286 
4287 	vsi = q_vector->tx.ring->vsi;
4288 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4289 
4290 	return IRQ_HANDLED;
4291 }
4292 
4293 /**
4294  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4295  * @vsi: the VSI being configured
4296  * @v_idx: vector index
4297  * @qp_idx: queue pair index
4298  **/
4299 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4300 {
4301 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4302 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4303 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4304 
4305 	tx_ring->q_vector = q_vector;
4306 	tx_ring->next = q_vector->tx.ring;
4307 	q_vector->tx.ring = tx_ring;
4308 	q_vector->tx.count++;
4309 
4310 	/* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4311 	if (i40e_enabled_xdp_vsi(vsi)) {
4312 		struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4313 
4314 		xdp_ring->q_vector = q_vector;
4315 		xdp_ring->next = q_vector->tx.ring;
4316 		q_vector->tx.ring = xdp_ring;
4317 		q_vector->tx.count++;
4318 	}
4319 
4320 	rx_ring->q_vector = q_vector;
4321 	rx_ring->next = q_vector->rx.ring;
4322 	q_vector->rx.ring = rx_ring;
4323 	q_vector->rx.count++;
4324 }
4325 
4326 /**
4327  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4328  * @vsi: the VSI being configured
4329  *
4330  * This function maps descriptor rings to the queue-specific vectors
4331  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
4332  * one vector per queue pair, but on a constrained vector budget, we
4333  * group the queue pairs as "efficiently" as possible.
4334  **/
4335 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4336 {
4337 	int qp_remaining = vsi->num_queue_pairs;
4338 	int q_vectors = vsi->num_q_vectors;
4339 	int num_ringpairs;
4340 	int v_start = 0;
4341 	int qp_idx = 0;
4342 
4343 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4344 	 * group them so there are multiple queues per vector.
4345 	 * It is also important to go through all the vectors available to be
4346 	 * sure that if we don't use all the vectors, that the remaining vectors
4347 	 * are cleared. This is especially important when decreasing the
4348 	 * number of queues in use.
4349 	 */
4350 	for (; v_start < q_vectors; v_start++) {
4351 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4352 
4353 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4354 
4355 		q_vector->num_ringpairs = num_ringpairs;
4356 		q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4357 
4358 		q_vector->rx.count = 0;
4359 		q_vector->tx.count = 0;
4360 		q_vector->rx.ring = NULL;
4361 		q_vector->tx.ring = NULL;
4362 
4363 		while (num_ringpairs--) {
4364 			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4365 			qp_idx++;
4366 			qp_remaining--;
4367 		}
4368 	}
4369 }
4370 
4371 /**
4372  * i40e_vsi_request_irq - Request IRQ from the OS
4373  * @vsi: the VSI being configured
4374  * @basename: name for the vector
4375  **/
4376 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4377 {
4378 	struct i40e_pf *pf = vsi->back;
4379 	int err;
4380 
4381 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4382 		err = i40e_vsi_request_irq_msix(vsi, basename);
4383 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4384 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
4385 				  pf->int_name, pf);
4386 	else
4387 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4388 				  pf->int_name, pf);
4389 
4390 	if (err)
4391 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4392 
4393 	return err;
4394 }
4395 
4396 #ifdef CONFIG_NET_POLL_CONTROLLER
4397 /**
4398  * i40e_netpoll - A Polling 'interrupt' handler
4399  * @netdev: network interface device structure
4400  *
4401  * This is used by netconsole to send skbs without having to re-enable
4402  * interrupts.  It's not called while the normal interrupt routine is executing.
4403  **/
4404 static void i40e_netpoll(struct net_device *netdev)
4405 {
4406 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4407 	struct i40e_vsi *vsi = np->vsi;
4408 	struct i40e_pf *pf = vsi->back;
4409 	int i;
4410 
4411 	/* if interface is down do nothing */
4412 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
4413 		return;
4414 
4415 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4416 		for (i = 0; i < vsi->num_q_vectors; i++)
4417 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4418 	} else {
4419 		i40e_intr(pf->pdev->irq, netdev);
4420 	}
4421 }
4422 #endif
4423 
4424 #define I40E_QTX_ENA_WAIT_COUNT 50
4425 
4426 /**
4427  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4428  * @pf: the PF being configured
4429  * @pf_q: the PF queue
4430  * @enable: enable or disable state of the queue
4431  *
4432  * This routine will wait for the given Tx queue of the PF to reach the
4433  * enabled or disabled state.
4434  * Returns -ETIMEDOUT in case of failing to reach the requested state after
4435  * multiple retries; else will return 0 in case of success.
4436  **/
4437 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4438 {
4439 	int i;
4440 	u32 tx_reg;
4441 
4442 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4443 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4444 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4445 			break;
4446 
4447 		usleep_range(10, 20);
4448 	}
4449 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4450 		return -ETIMEDOUT;
4451 
4452 	return 0;
4453 }
4454 
4455 /**
4456  * i40e_control_tx_q - Start or stop a particular Tx queue
4457  * @pf: the PF structure
4458  * @pf_q: the PF queue to configure
4459  * @enable: start or stop the queue
4460  *
4461  * This function enables or disables a single queue. Note that any delay
4462  * required after the operation is expected to be handled by the caller of
4463  * this function.
4464  **/
4465 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4466 {
4467 	struct i40e_hw *hw = &pf->hw;
4468 	u32 tx_reg;
4469 	int i;
4470 
4471 	/* warn the TX unit of coming changes */
4472 	i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4473 	if (!enable)
4474 		usleep_range(10, 20);
4475 
4476 	for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4477 		tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4478 		if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4479 		    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4480 			break;
4481 		usleep_range(1000, 2000);
4482 	}
4483 
4484 	/* Skip if the queue is already in the requested state */
4485 	if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4486 		return;
4487 
4488 	/* turn on/off the queue */
4489 	if (enable) {
4490 		wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4491 		tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4492 	} else {
4493 		tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4494 	}
4495 
4496 	wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4497 }
4498 
4499 /**
4500  * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4501  * @seid: VSI SEID
4502  * @pf: the PF structure
4503  * @pf_q: the PF queue to configure
4504  * @is_xdp: true if the queue is used for XDP
4505  * @enable: start or stop the queue
4506  **/
4507 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4508 			   bool is_xdp, bool enable)
4509 {
4510 	int ret;
4511 
4512 	i40e_control_tx_q(pf, pf_q, enable);
4513 
4514 	/* wait for the change to finish */
4515 	ret = i40e_pf_txq_wait(pf, pf_q, enable);
4516 	if (ret) {
4517 		dev_info(&pf->pdev->dev,
4518 			 "VSI seid %d %sTx ring %d %sable timeout\n",
4519 			 seid, (is_xdp ? "XDP " : ""), pf_q,
4520 			 (enable ? "en" : "dis"));
4521 	}
4522 
4523 	return ret;
4524 }
4525 
4526 /**
4527  * i40e_vsi_enable_tx - Start a VSI's rings
4528  * @vsi: the VSI being configured
4529  **/
4530 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4531 {
4532 	struct i40e_pf *pf = vsi->back;
4533 	int i, pf_q, ret = 0;
4534 
4535 	pf_q = vsi->base_queue;
4536 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4537 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
4538 					     pf_q,
4539 					     false /*is xdp*/, true);
4540 		if (ret)
4541 			break;
4542 
4543 		if (!i40e_enabled_xdp_vsi(vsi))
4544 			continue;
4545 
4546 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
4547 					     pf_q + vsi->alloc_queue_pairs,
4548 					     true /*is xdp*/, true);
4549 		if (ret)
4550 			break;
4551 	}
4552 	return ret;
4553 }
4554 
4555 /**
4556  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4557  * @pf: the PF being configured
4558  * @pf_q: the PF queue
4559  * @enable: enable or disable state of the queue
4560  *
4561  * This routine will wait for the given Rx queue of the PF to reach the
4562  * enabled or disabled state.
4563  * Returns -ETIMEDOUT in case of failing to reach the requested state after
4564  * multiple retries; else will return 0 in case of success.
4565  **/
4566 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4567 {
4568 	int i;
4569 	u32 rx_reg;
4570 
4571 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4572 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4573 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4574 			break;
4575 
4576 		usleep_range(10, 20);
4577 	}
4578 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4579 		return -ETIMEDOUT;
4580 
4581 	return 0;
4582 }
4583 
4584 /**
4585  * i40e_control_rx_q - Start or stop a particular Rx queue
4586  * @pf: the PF structure
4587  * @pf_q: the PF queue to configure
4588  * @enable: start or stop the queue
4589  *
4590  * This function enables or disables a single queue. Note that
4591  * any delay required after the operation is expected to be
4592  * handled by the caller of this function.
4593  **/
4594 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4595 {
4596 	struct i40e_hw *hw = &pf->hw;
4597 	u32 rx_reg;
4598 	int i;
4599 
4600 	for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4601 		rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4602 		if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4603 		    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4604 			break;
4605 		usleep_range(1000, 2000);
4606 	}
4607 
4608 	/* Skip if the queue is already in the requested state */
4609 	if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4610 		return;
4611 
4612 	/* turn on/off the queue */
4613 	if (enable)
4614 		rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4615 	else
4616 		rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4617 
4618 	wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4619 }
4620 
4621 /**
4622  * i40e_control_wait_rx_q
4623  * @pf: the PF structure
4624  * @pf_q: queue being configured
4625  * @enable: start or stop the rings
4626  *
4627  * This function enables or disables a single queue along with waiting
4628  * for the change to finish. The caller of this function should handle
4629  * the delays needed in the case of disabling queues.
4630  **/
4631 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4632 {
4633 	int ret = 0;
4634 
4635 	i40e_control_rx_q(pf, pf_q, enable);
4636 
4637 	/* wait for the change to finish */
4638 	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4639 	if (ret)
4640 		return ret;
4641 
4642 	return ret;
4643 }
4644 
4645 /**
4646  * i40e_vsi_enable_rx - Start a VSI's rings
4647  * @vsi: the VSI being configured
4648  **/
4649 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4650 {
4651 	struct i40e_pf *pf = vsi->back;
4652 	int i, pf_q, ret = 0;
4653 
4654 	pf_q = vsi->base_queue;
4655 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4656 		ret = i40e_control_wait_rx_q(pf, pf_q, true);
4657 		if (ret) {
4658 			dev_info(&pf->pdev->dev,
4659 				 "VSI seid %d Rx ring %d enable timeout\n",
4660 				 vsi->seid, pf_q);
4661 			break;
4662 		}
4663 	}
4664 
4665 	return ret;
4666 }
4667 
4668 /**
4669  * i40e_vsi_start_rings - Start a VSI's rings
4670  * @vsi: the VSI being configured
4671  **/
4672 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4673 {
4674 	int ret = 0;
4675 
4676 	/* do rx first for enable and last for disable */
4677 	ret = i40e_vsi_enable_rx(vsi);
4678 	if (ret)
4679 		return ret;
4680 	ret = i40e_vsi_enable_tx(vsi);
4681 
4682 	return ret;
4683 }
4684 
4685 #define I40E_DISABLE_TX_GAP_MSEC	50
4686 
4687 /**
4688  * i40e_vsi_stop_rings - Stop a VSI's rings
4689  * @vsi: the VSI being configured
4690  **/
4691 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4692 {
4693 	struct i40e_pf *pf = vsi->back;
4694 	int pf_q, err, q_end;
4695 
4696 	/* When port TX is suspended, don't wait */
4697 	if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4698 		return i40e_vsi_stop_rings_no_wait(vsi);
4699 
4700 	q_end = vsi->base_queue + vsi->num_queue_pairs;
4701 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4702 		i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4703 
4704 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4705 		err = i40e_control_wait_rx_q(pf, pf_q, false);
4706 		if (err)
4707 			dev_info(&pf->pdev->dev,
4708 				 "VSI seid %d Rx ring %d disable timeout\n",
4709 				 vsi->seid, pf_q);
4710 	}
4711 
4712 	msleep(I40E_DISABLE_TX_GAP_MSEC);
4713 	pf_q = vsi->base_queue;
4714 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4715 		wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4716 
4717 	i40e_vsi_wait_queues_disabled(vsi);
4718 }
4719 
4720 /**
4721  * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4722  * @vsi: the VSI being shutdown
4723  *
4724  * This function stops all the rings for a VSI but does not delay to verify
4725  * that rings have been disabled. It is expected that the caller is shutting
4726  * down multiple VSIs at once and will delay together for all the VSIs after
4727  * initiating the shutdown. This is particularly useful for shutting down lots
4728  * of VFs together. Otherwise, a large delay can be incurred while configuring
4729  * each VSI in serial.
4730  **/
4731 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4732 {
4733 	struct i40e_pf *pf = vsi->back;
4734 	int i, pf_q;
4735 
4736 	pf_q = vsi->base_queue;
4737 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4738 		i40e_control_tx_q(pf, pf_q, false);
4739 		i40e_control_rx_q(pf, pf_q, false);
4740 	}
4741 }
4742 
4743 /**
4744  * i40e_vsi_free_irq - Free the irq association with the OS
4745  * @vsi: the VSI being configured
4746  **/
4747 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4748 {
4749 	struct i40e_pf *pf = vsi->back;
4750 	struct i40e_hw *hw = &pf->hw;
4751 	int base = vsi->base_vector;
4752 	u32 val, qp;
4753 	int i;
4754 
4755 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4756 		if (!vsi->q_vectors)
4757 			return;
4758 
4759 		if (!vsi->irqs_ready)
4760 			return;
4761 
4762 		vsi->irqs_ready = false;
4763 		for (i = 0; i < vsi->num_q_vectors; i++) {
4764 			int irq_num;
4765 			u16 vector;
4766 
4767 			vector = i + base;
4768 			irq_num = pf->msix_entries[vector].vector;
4769 
4770 			/* free only the irqs that were actually requested */
4771 			if (!vsi->q_vectors[i] ||
4772 			    !vsi->q_vectors[i]->num_ringpairs)
4773 				continue;
4774 
4775 			/* clear the affinity notifier in the IRQ descriptor */
4776 			irq_set_affinity_notifier(irq_num, NULL);
4777 			/* remove our suggested affinity mask for this IRQ */
4778 			irq_update_affinity_hint(irq_num, NULL);
4779 			synchronize_irq(irq_num);
4780 			free_irq(irq_num, vsi->q_vectors[i]);
4781 
4782 			/* Tear down the interrupt queue link list
4783 			 *
4784 			 * We know that they come in pairs and always
4785 			 * the Rx first, then the Tx.  To clear the
4786 			 * link list, stick the EOL value into the
4787 			 * next_q field of the registers.
4788 			 */
4789 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4790 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4791 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4792 			val |= I40E_QUEUE_END_OF_LIST
4793 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4794 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4795 
4796 			while (qp != I40E_QUEUE_END_OF_LIST) {
4797 				u32 next;
4798 
4799 				val = rd32(hw, I40E_QINT_RQCTL(qp));
4800 
4801 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4802 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4803 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4804 					 I40E_QINT_RQCTL_INTEVENT_MASK);
4805 
4806 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4807 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4808 
4809 				wr32(hw, I40E_QINT_RQCTL(qp), val);
4810 
4811 				val = rd32(hw, I40E_QINT_TQCTL(qp));
4812 
4813 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4814 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4815 
4816 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4817 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4818 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4819 					 I40E_QINT_TQCTL_INTEVENT_MASK);
4820 
4821 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4822 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4823 
4824 				wr32(hw, I40E_QINT_TQCTL(qp), val);
4825 				qp = next;
4826 			}
4827 		}
4828 	} else {
4829 		free_irq(pf->pdev->irq, pf);
4830 
4831 		val = rd32(hw, I40E_PFINT_LNKLST0);
4832 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4833 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4834 		val |= I40E_QUEUE_END_OF_LIST
4835 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4836 		wr32(hw, I40E_PFINT_LNKLST0, val);
4837 
4838 		val = rd32(hw, I40E_QINT_RQCTL(qp));
4839 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4840 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4841 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4842 			 I40E_QINT_RQCTL_INTEVENT_MASK);
4843 
4844 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4845 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4846 
4847 		wr32(hw, I40E_QINT_RQCTL(qp), val);
4848 
4849 		val = rd32(hw, I40E_QINT_TQCTL(qp));
4850 
4851 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4852 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4853 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4854 			 I40E_QINT_TQCTL_INTEVENT_MASK);
4855 
4856 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4857 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4858 
4859 		wr32(hw, I40E_QINT_TQCTL(qp), val);
4860 	}
4861 }
4862 
4863 /**
4864  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4865  * @vsi: the VSI being configured
4866  * @v_idx: Index of vector to be freed
4867  *
4868  * This function frees the memory allocated to the q_vector.  In addition if
4869  * NAPI is enabled it will delete any references to the NAPI struct prior
4870  * to freeing the q_vector.
4871  **/
4872 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4873 {
4874 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4875 	struct i40e_ring *ring;
4876 
4877 	if (!q_vector)
4878 		return;
4879 
4880 	/* disassociate q_vector from rings */
4881 	i40e_for_each_ring(ring, q_vector->tx)
4882 		ring->q_vector = NULL;
4883 
4884 	i40e_for_each_ring(ring, q_vector->rx)
4885 		ring->q_vector = NULL;
4886 
4887 	/* only VSI w/ an associated netdev is set up w/ NAPI */
4888 	if (vsi->netdev)
4889 		netif_napi_del(&q_vector->napi);
4890 
4891 	vsi->q_vectors[v_idx] = NULL;
4892 
4893 	kfree_rcu(q_vector, rcu);
4894 }
4895 
4896 /**
4897  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4898  * @vsi: the VSI being un-configured
4899  *
4900  * This frees the memory allocated to the q_vectors and
4901  * deletes references to the NAPI struct.
4902  **/
4903 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4904 {
4905 	int v_idx;
4906 
4907 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4908 		i40e_free_q_vector(vsi, v_idx);
4909 }
4910 
4911 /**
4912  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4913  * @pf: board private structure
4914  **/
4915 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4916 {
4917 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4918 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4919 		pci_disable_msix(pf->pdev);
4920 		kfree(pf->msix_entries);
4921 		pf->msix_entries = NULL;
4922 		kfree(pf->irq_pile);
4923 		pf->irq_pile = NULL;
4924 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4925 		pci_disable_msi(pf->pdev);
4926 	}
4927 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4928 }
4929 
4930 /**
4931  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4932  * @pf: board private structure
4933  *
4934  * We go through and clear interrupt specific resources and reset the structure
4935  * to pre-load conditions
4936  **/
4937 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4938 {
4939 	int i;
4940 
4941 	if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
4942 		i40e_free_misc_vector(pf);
4943 
4944 	i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4945 		      I40E_IWARP_IRQ_PILE_ID);
4946 
4947 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4948 	for (i = 0; i < pf->num_alloc_vsi; i++)
4949 		if (pf->vsi[i])
4950 			i40e_vsi_free_q_vectors(pf->vsi[i]);
4951 	i40e_reset_interrupt_capability(pf);
4952 }
4953 
4954 /**
4955  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4956  * @vsi: the VSI being configured
4957  **/
4958 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4959 {
4960 	int q_idx;
4961 
4962 	if (!vsi->netdev)
4963 		return;
4964 
4965 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4966 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4967 
4968 		if (q_vector->rx.ring || q_vector->tx.ring)
4969 			napi_enable(&q_vector->napi);
4970 	}
4971 }
4972 
4973 /**
4974  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4975  * @vsi: the VSI being configured
4976  **/
4977 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4978 {
4979 	int q_idx;
4980 
4981 	if (!vsi->netdev)
4982 		return;
4983 
4984 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4985 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4986 
4987 		if (q_vector->rx.ring || q_vector->tx.ring)
4988 			napi_disable(&q_vector->napi);
4989 	}
4990 }
4991 
4992 /**
4993  * i40e_vsi_close - Shut down a VSI
4994  * @vsi: the vsi to be quelled
4995  **/
4996 static void i40e_vsi_close(struct i40e_vsi *vsi)
4997 {
4998 	struct i40e_pf *pf = vsi->back;
4999 	if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5000 		i40e_down(vsi);
5001 	i40e_vsi_free_irq(vsi);
5002 	i40e_vsi_free_tx_resources(vsi);
5003 	i40e_vsi_free_rx_resources(vsi);
5004 	vsi->current_netdev_flags = 0;
5005 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5006 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5007 		set_bit(__I40E_CLIENT_RESET, pf->state);
5008 }
5009 
5010 /**
5011  * i40e_quiesce_vsi - Pause a given VSI
5012  * @vsi: the VSI being paused
5013  **/
5014 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5015 {
5016 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
5017 		return;
5018 
5019 	set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5020 	if (vsi->netdev && netif_running(vsi->netdev))
5021 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5022 	else
5023 		i40e_vsi_close(vsi);
5024 }
5025 
5026 /**
5027  * i40e_unquiesce_vsi - Resume a given VSI
5028  * @vsi: the VSI being resumed
5029  **/
5030 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5031 {
5032 	if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5033 		return;
5034 
5035 	if (vsi->netdev && netif_running(vsi->netdev))
5036 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5037 	else
5038 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
5039 }
5040 
5041 /**
5042  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5043  * @pf: the PF
5044  **/
5045 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5046 {
5047 	int v;
5048 
5049 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5050 		if (pf->vsi[v])
5051 			i40e_quiesce_vsi(pf->vsi[v]);
5052 	}
5053 }
5054 
5055 /**
5056  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5057  * @pf: the PF
5058  **/
5059 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5060 {
5061 	int v;
5062 
5063 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5064 		if (pf->vsi[v])
5065 			i40e_unquiesce_vsi(pf->vsi[v]);
5066 	}
5067 }
5068 
5069 /**
5070  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5071  * @vsi: the VSI being configured
5072  *
5073  * Wait until all queues on a given VSI have been disabled.
5074  **/
5075 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5076 {
5077 	struct i40e_pf *pf = vsi->back;
5078 	int i, pf_q, ret;
5079 
5080 	pf_q = vsi->base_queue;
5081 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5082 		/* Check and wait for the Tx queue */
5083 		ret = i40e_pf_txq_wait(pf, pf_q, false);
5084 		if (ret) {
5085 			dev_info(&pf->pdev->dev,
5086 				 "VSI seid %d Tx ring %d disable timeout\n",
5087 				 vsi->seid, pf_q);
5088 			return ret;
5089 		}
5090 
5091 		if (!i40e_enabled_xdp_vsi(vsi))
5092 			goto wait_rx;
5093 
5094 		/* Check and wait for the XDP Tx queue */
5095 		ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5096 				       false);
5097 		if (ret) {
5098 			dev_info(&pf->pdev->dev,
5099 				 "VSI seid %d XDP Tx ring %d disable timeout\n",
5100 				 vsi->seid, pf_q);
5101 			return ret;
5102 		}
5103 wait_rx:
5104 		/* Check and wait for the Rx queue */
5105 		ret = i40e_pf_rxq_wait(pf, pf_q, false);
5106 		if (ret) {
5107 			dev_info(&pf->pdev->dev,
5108 				 "VSI seid %d Rx ring %d disable timeout\n",
5109 				 vsi->seid, pf_q);
5110 			return ret;
5111 		}
5112 	}
5113 
5114 	return 0;
5115 }
5116 
5117 #ifdef CONFIG_I40E_DCB
5118 /**
5119  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5120  * @pf: the PF
5121  *
5122  * This function waits for the queues to be in disabled state for all the
5123  * VSIs that are managed by this PF.
5124  **/
5125 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5126 {
5127 	int v, ret = 0;
5128 
5129 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5130 		if (pf->vsi[v]) {
5131 			ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5132 			if (ret)
5133 				break;
5134 		}
5135 	}
5136 
5137 	return ret;
5138 }
5139 
5140 #endif
5141 
5142 /**
5143  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5144  * @pf: pointer to PF
5145  *
5146  * Get TC map for ISCSI PF type that will include iSCSI TC
5147  * and LAN TC.
5148  **/
5149 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5150 {
5151 	struct i40e_dcb_app_priority_table app;
5152 	struct i40e_hw *hw = &pf->hw;
5153 	u8 enabled_tc = 1; /* TC0 is always enabled */
5154 	u8 tc, i;
5155 	/* Get the iSCSI APP TLV */
5156 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5157 
5158 	for (i = 0; i < dcbcfg->numapps; i++) {
5159 		app = dcbcfg->app[i];
5160 		if (app.selector == I40E_APP_SEL_TCPIP &&
5161 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
5162 			tc = dcbcfg->etscfg.prioritytable[app.priority];
5163 			enabled_tc |= BIT(tc);
5164 			break;
5165 		}
5166 	}
5167 
5168 	return enabled_tc;
5169 }
5170 
5171 /**
5172  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
5173  * @dcbcfg: the corresponding DCBx configuration structure
5174  *
5175  * Return the number of TCs from given DCBx configuration
5176  **/
5177 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5178 {
5179 	int i, tc_unused = 0;
5180 	u8 num_tc = 0;
5181 	u8 ret = 0;
5182 
5183 	/* Scan the ETS Config Priority Table to find
5184 	 * traffic class enabled for a given priority
5185 	 * and create a bitmask of enabled TCs
5186 	 */
5187 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5188 		num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5189 
5190 	/* Now scan the bitmask to check for
5191 	 * contiguous TCs starting with TC0
5192 	 */
5193 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5194 		if (num_tc & BIT(i)) {
5195 			if (!tc_unused) {
5196 				ret++;
5197 			} else {
5198 				pr_err("Non-contiguous TC - Disabling DCB\n");
5199 				return 1;
5200 			}
5201 		} else {
5202 			tc_unused = 1;
5203 		}
5204 	}
5205 
5206 	/* There is always at least TC0 */
5207 	if (!ret)
5208 		ret = 1;
5209 
5210 	return ret;
5211 }
5212 
5213 /**
5214  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5215  * @dcbcfg: the corresponding DCBx configuration structure
5216  *
5217  * Query the current DCB configuration and return the number of
5218  * traffic classes enabled from the given DCBX config
5219  **/
5220 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5221 {
5222 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5223 	u8 enabled_tc = 1;
5224 	u8 i;
5225 
5226 	for (i = 0; i < num_tc; i++)
5227 		enabled_tc |= BIT(i);
5228 
5229 	return enabled_tc;
5230 }
5231 
5232 /**
5233  * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5234  * @pf: PF being queried
5235  *
5236  * Query the current MQPRIO configuration and return the number of
5237  * traffic classes enabled.
5238  **/
5239 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5240 {
5241 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5242 	u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5243 	u8 enabled_tc = 1, i;
5244 
5245 	for (i = 1; i < num_tc; i++)
5246 		enabled_tc |= BIT(i);
5247 	return enabled_tc;
5248 }
5249 
5250 /**
5251  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5252  * @pf: PF being queried
5253  *
5254  * Return number of traffic classes enabled for the given PF
5255  **/
5256 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5257 {
5258 	struct i40e_hw *hw = &pf->hw;
5259 	u8 i, enabled_tc = 1;
5260 	u8 num_tc = 0;
5261 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5262 
5263 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5264 		return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5265 
5266 	/* If neither MQPRIO nor DCB is enabled, then always use single TC */
5267 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5268 		return 1;
5269 
5270 	/* SFP mode will be enabled for all TCs on port */
5271 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5272 		return i40e_dcb_get_num_tc(dcbcfg);
5273 
5274 	/* MFP mode return count of enabled TCs for this PF */
5275 	if (pf->hw.func_caps.iscsi)
5276 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
5277 	else
5278 		return 1; /* Only TC0 */
5279 
5280 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5281 		if (enabled_tc & BIT(i))
5282 			num_tc++;
5283 	}
5284 	return num_tc;
5285 }
5286 
5287 /**
5288  * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5289  * @pf: PF being queried
5290  *
5291  * Return a bitmap for enabled traffic classes for this PF.
5292  **/
5293 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5294 {
5295 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5296 		return i40e_mqprio_get_enabled_tc(pf);
5297 
5298 	/* If neither MQPRIO nor DCB is enabled for this PF then just return
5299 	 * default TC
5300 	 */
5301 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5302 		return I40E_DEFAULT_TRAFFIC_CLASS;
5303 
5304 	/* SFP mode we want PF to be enabled for all TCs */
5305 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5306 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5307 
5308 	/* MFP enabled and iSCSI PF type */
5309 	if (pf->hw.func_caps.iscsi)
5310 		return i40e_get_iscsi_tc_map(pf);
5311 	else
5312 		return I40E_DEFAULT_TRAFFIC_CLASS;
5313 }
5314 
5315 /**
5316  * i40e_vsi_get_bw_info - Query VSI BW Information
5317  * @vsi: the VSI being queried
5318  *
5319  * Returns 0 on success, negative value on failure
5320  **/
5321 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5322 {
5323 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5324 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5325 	struct i40e_pf *pf = vsi->back;
5326 	struct i40e_hw *hw = &pf->hw;
5327 	i40e_status ret;
5328 	u32 tc_bw_max;
5329 	int i;
5330 
5331 	/* Get the VSI level BW configuration */
5332 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5333 	if (ret) {
5334 		dev_info(&pf->pdev->dev,
5335 			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5336 			 i40e_stat_str(&pf->hw, ret),
5337 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5338 		return -EINVAL;
5339 	}
5340 
5341 	/* Get the VSI level BW configuration per TC */
5342 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5343 					       NULL);
5344 	if (ret) {
5345 		dev_info(&pf->pdev->dev,
5346 			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5347 			 i40e_stat_str(&pf->hw, ret),
5348 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5349 		return -EINVAL;
5350 	}
5351 
5352 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5353 		dev_info(&pf->pdev->dev,
5354 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5355 			 bw_config.tc_valid_bits,
5356 			 bw_ets_config.tc_valid_bits);
5357 		/* Still continuing */
5358 	}
5359 
5360 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5361 	vsi->bw_max_quanta = bw_config.max_bw;
5362 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5363 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5364 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5365 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5366 		vsi->bw_ets_limit_credits[i] =
5367 					le16_to_cpu(bw_ets_config.credits[i]);
5368 		/* 3 bits out of 4 for each TC */
5369 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5370 	}
5371 
5372 	return 0;
5373 }
5374 
5375 /**
5376  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5377  * @vsi: the VSI being configured
5378  * @enabled_tc: TC bitmap
5379  * @bw_share: BW shared credits per TC
5380  *
5381  * Returns 0 on success, negative value on failure
5382  **/
5383 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5384 				       u8 *bw_share)
5385 {
5386 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5387 	struct i40e_pf *pf = vsi->back;
5388 	i40e_status ret;
5389 	int i;
5390 
5391 	/* There is no need to reset BW when mqprio mode is on.  */
5392 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5393 		return 0;
5394 	if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5395 		ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5396 		if (ret)
5397 			dev_info(&pf->pdev->dev,
5398 				 "Failed to reset tx rate for vsi->seid %u\n",
5399 				 vsi->seid);
5400 		return ret;
5401 	}
5402 	memset(&bw_data, 0, sizeof(bw_data));
5403 	bw_data.tc_valid_bits = enabled_tc;
5404 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5405 		bw_data.tc_bw_credits[i] = bw_share[i];
5406 
5407 	ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5408 	if (ret) {
5409 		dev_info(&pf->pdev->dev,
5410 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
5411 			 pf->hw.aq.asq_last_status);
5412 		return -EINVAL;
5413 	}
5414 
5415 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5416 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5417 
5418 	return 0;
5419 }
5420 
5421 /**
5422  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5423  * @vsi: the VSI being configured
5424  * @enabled_tc: TC map to be enabled
5425  *
5426  **/
5427 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5428 {
5429 	struct net_device *netdev = vsi->netdev;
5430 	struct i40e_pf *pf = vsi->back;
5431 	struct i40e_hw *hw = &pf->hw;
5432 	u8 netdev_tc = 0;
5433 	int i;
5434 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5435 
5436 	if (!netdev)
5437 		return;
5438 
5439 	if (!enabled_tc) {
5440 		netdev_reset_tc(netdev);
5441 		return;
5442 	}
5443 
5444 	/* Set up actual enabled TCs on the VSI */
5445 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5446 		return;
5447 
5448 	/* set per TC queues for the VSI */
5449 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5450 		/* Only set TC queues for enabled tcs
5451 		 *
5452 		 * e.g. For a VSI that has TC0 and TC3 enabled the
5453 		 * enabled_tc bitmap would be 0x00001001; the driver
5454 		 * will set the numtc for netdev as 2 that will be
5455 		 * referenced by the netdev layer as TC 0 and 1.
5456 		 */
5457 		if (vsi->tc_config.enabled_tc & BIT(i))
5458 			netdev_set_tc_queue(netdev,
5459 					vsi->tc_config.tc_info[i].netdev_tc,
5460 					vsi->tc_config.tc_info[i].qcount,
5461 					vsi->tc_config.tc_info[i].qoffset);
5462 	}
5463 
5464 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5465 		return;
5466 
5467 	/* Assign UP2TC map for the VSI */
5468 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5469 		/* Get the actual TC# for the UP */
5470 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5471 		/* Get the mapped netdev TC# for the UP */
5472 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
5473 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
5474 	}
5475 }
5476 
5477 /**
5478  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5479  * @vsi: the VSI being configured
5480  * @ctxt: the ctxt buffer returned from AQ VSI update param command
5481  **/
5482 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5483 				      struct i40e_vsi_context *ctxt)
5484 {
5485 	/* copy just the sections touched not the entire info
5486 	 * since not all sections are valid as returned by
5487 	 * update vsi params
5488 	 */
5489 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
5490 	memcpy(&vsi->info.queue_mapping,
5491 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5492 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5493 	       sizeof(vsi->info.tc_mapping));
5494 }
5495 
5496 /**
5497  * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5498  * @vsi: the VSI being reconfigured
5499  * @vsi_offset: offset from main VF VSI
5500  */
5501 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5502 {
5503 	struct i40e_vsi_context ctxt = {};
5504 	struct i40e_pf *pf;
5505 	struct i40e_hw *hw;
5506 	int ret;
5507 
5508 	if (!vsi)
5509 		return I40E_ERR_PARAM;
5510 	pf = vsi->back;
5511 	hw = &pf->hw;
5512 
5513 	ctxt.seid = vsi->seid;
5514 	ctxt.pf_num = hw->pf_id;
5515 	ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5516 	ctxt.uplink_seid = vsi->uplink_seid;
5517 	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5518 	ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5519 	ctxt.info = vsi->info;
5520 
5521 	i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5522 				 false);
5523 	if (vsi->reconfig_rss) {
5524 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
5525 				      vsi->num_queue_pairs);
5526 		ret = i40e_vsi_config_rss(vsi);
5527 		if (ret) {
5528 			dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5529 			return ret;
5530 		}
5531 		vsi->reconfig_rss = false;
5532 	}
5533 
5534 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5535 	if (ret) {
5536 		dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5537 			 i40e_stat_str(hw, ret),
5538 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5539 		return ret;
5540 	}
5541 	/* update the local VSI info with updated queue map */
5542 	i40e_vsi_update_queue_map(vsi, &ctxt);
5543 	vsi->info.valid_sections = 0;
5544 
5545 	return ret;
5546 }
5547 
5548 /**
5549  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5550  * @vsi: VSI to be configured
5551  * @enabled_tc: TC bitmap
5552  *
5553  * This configures a particular VSI for TCs that are mapped to the
5554  * given TC bitmap. It uses default bandwidth share for TCs across
5555  * VSIs to configure TC for a particular VSI.
5556  *
5557  * NOTE:
5558  * It is expected that the VSI queues have been quisced before calling
5559  * this function.
5560  **/
5561 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5562 {
5563 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5564 	struct i40e_pf *pf = vsi->back;
5565 	struct i40e_hw *hw = &pf->hw;
5566 	struct i40e_vsi_context ctxt;
5567 	int ret = 0;
5568 	int i;
5569 
5570 	/* Check if enabled_tc is same as existing or new TCs */
5571 	if (vsi->tc_config.enabled_tc == enabled_tc &&
5572 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5573 		return ret;
5574 
5575 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
5576 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5577 		if (enabled_tc & BIT(i))
5578 			bw_share[i] = 1;
5579 	}
5580 
5581 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5582 	if (ret) {
5583 		struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5584 
5585 		dev_info(&pf->pdev->dev,
5586 			 "Failed configuring TC map %d for VSI %d\n",
5587 			 enabled_tc, vsi->seid);
5588 		ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5589 						  &bw_config, NULL);
5590 		if (ret) {
5591 			dev_info(&pf->pdev->dev,
5592 				 "Failed querying vsi bw info, err %s aq_err %s\n",
5593 				 i40e_stat_str(hw, ret),
5594 				 i40e_aq_str(hw, hw->aq.asq_last_status));
5595 			goto out;
5596 		}
5597 		if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5598 			u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5599 
5600 			if (!valid_tc)
5601 				valid_tc = bw_config.tc_valid_bits;
5602 			/* Always enable TC0, no matter what */
5603 			valid_tc |= 1;
5604 			dev_info(&pf->pdev->dev,
5605 				 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5606 				 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5607 			enabled_tc = valid_tc;
5608 		}
5609 
5610 		ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5611 		if (ret) {
5612 			dev_err(&pf->pdev->dev,
5613 				"Unable to  configure TC map %d for VSI %d\n",
5614 				enabled_tc, vsi->seid);
5615 			goto out;
5616 		}
5617 	}
5618 
5619 	/* Update Queue Pairs Mapping for currently enabled UPs */
5620 	ctxt.seid = vsi->seid;
5621 	ctxt.pf_num = vsi->back->hw.pf_id;
5622 	ctxt.vf_num = 0;
5623 	ctxt.uplink_seid = vsi->uplink_seid;
5624 	ctxt.info = vsi->info;
5625 	if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5626 		ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5627 		if (ret)
5628 			goto out;
5629 	} else {
5630 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5631 	}
5632 
5633 	/* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5634 	 * queues changed.
5635 	 */
5636 	if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5637 		vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5638 				      vsi->num_queue_pairs);
5639 		ret = i40e_vsi_config_rss(vsi);
5640 		if (ret) {
5641 			dev_info(&vsi->back->pdev->dev,
5642 				 "Failed to reconfig rss for num_queues\n");
5643 			return ret;
5644 		}
5645 		vsi->reconfig_rss = false;
5646 	}
5647 	if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5648 		ctxt.info.valid_sections |=
5649 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5650 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5651 	}
5652 
5653 	/* Update the VSI after updating the VSI queue-mapping
5654 	 * information
5655 	 */
5656 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5657 	if (ret) {
5658 		dev_info(&pf->pdev->dev,
5659 			 "Update vsi tc config failed, err %s aq_err %s\n",
5660 			 i40e_stat_str(hw, ret),
5661 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5662 		goto out;
5663 	}
5664 	/* update the local VSI info with updated queue map */
5665 	i40e_vsi_update_queue_map(vsi, &ctxt);
5666 	vsi->info.valid_sections = 0;
5667 
5668 	/* Update current VSI BW information */
5669 	ret = i40e_vsi_get_bw_info(vsi);
5670 	if (ret) {
5671 		dev_info(&pf->pdev->dev,
5672 			 "Failed updating vsi bw info, err %s aq_err %s\n",
5673 			 i40e_stat_str(hw, ret),
5674 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5675 		goto out;
5676 	}
5677 
5678 	/* Update the netdev TC setup */
5679 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5680 out:
5681 	return ret;
5682 }
5683 
5684 /**
5685  * i40e_get_link_speed - Returns link speed for the interface
5686  * @vsi: VSI to be configured
5687  *
5688  **/
5689 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5690 {
5691 	struct i40e_pf *pf = vsi->back;
5692 
5693 	switch (pf->hw.phy.link_info.link_speed) {
5694 	case I40E_LINK_SPEED_40GB:
5695 		return 40000;
5696 	case I40E_LINK_SPEED_25GB:
5697 		return 25000;
5698 	case I40E_LINK_SPEED_20GB:
5699 		return 20000;
5700 	case I40E_LINK_SPEED_10GB:
5701 		return 10000;
5702 	case I40E_LINK_SPEED_1GB:
5703 		return 1000;
5704 	default:
5705 		return -EINVAL;
5706 	}
5707 }
5708 
5709 /**
5710  * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5711  * @vsi: VSI to be configured
5712  * @seid: seid of the channel/VSI
5713  * @max_tx_rate: max TX rate to be configured as BW limit
5714  *
5715  * Helper function to set BW limit for a given VSI
5716  **/
5717 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5718 {
5719 	struct i40e_pf *pf = vsi->back;
5720 	u64 credits = 0;
5721 	int speed = 0;
5722 	int ret = 0;
5723 
5724 	speed = i40e_get_link_speed(vsi);
5725 	if (max_tx_rate > speed) {
5726 		dev_err(&pf->pdev->dev,
5727 			"Invalid max tx rate %llu specified for VSI seid %d.",
5728 			max_tx_rate, seid);
5729 		return -EINVAL;
5730 	}
5731 	if (max_tx_rate && max_tx_rate < 50) {
5732 		dev_warn(&pf->pdev->dev,
5733 			 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5734 		max_tx_rate = 50;
5735 	}
5736 
5737 	/* Tx rate credits are in values of 50Mbps, 0 is disabled */
5738 	credits = max_tx_rate;
5739 	do_div(credits, I40E_BW_CREDIT_DIVISOR);
5740 	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5741 					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5742 	if (ret)
5743 		dev_err(&pf->pdev->dev,
5744 			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5745 			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5746 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5747 	return ret;
5748 }
5749 
5750 /**
5751  * i40e_remove_queue_channels - Remove queue channels for the TCs
5752  * @vsi: VSI to be configured
5753  *
5754  * Remove queue channels for the TCs
5755  **/
5756 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5757 {
5758 	enum i40e_admin_queue_err last_aq_status;
5759 	struct i40e_cloud_filter *cfilter;
5760 	struct i40e_channel *ch, *ch_tmp;
5761 	struct i40e_pf *pf = vsi->back;
5762 	struct hlist_node *node;
5763 	int ret, i;
5764 
5765 	/* Reset rss size that was stored when reconfiguring rss for
5766 	 * channel VSIs with non-power-of-2 queue count.
5767 	 */
5768 	vsi->current_rss_size = 0;
5769 
5770 	/* perform cleanup for channels if they exist */
5771 	if (list_empty(&vsi->ch_list))
5772 		return;
5773 
5774 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5775 		struct i40e_vsi *p_vsi;
5776 
5777 		list_del(&ch->list);
5778 		p_vsi = ch->parent_vsi;
5779 		if (!p_vsi || !ch->initialized) {
5780 			kfree(ch);
5781 			continue;
5782 		}
5783 		/* Reset queue contexts */
5784 		for (i = 0; i < ch->num_queue_pairs; i++) {
5785 			struct i40e_ring *tx_ring, *rx_ring;
5786 			u16 pf_q;
5787 
5788 			pf_q = ch->base_queue + i;
5789 			tx_ring = vsi->tx_rings[pf_q];
5790 			tx_ring->ch = NULL;
5791 
5792 			rx_ring = vsi->rx_rings[pf_q];
5793 			rx_ring->ch = NULL;
5794 		}
5795 
5796 		/* Reset BW configured for this VSI via mqprio */
5797 		ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5798 		if (ret)
5799 			dev_info(&vsi->back->pdev->dev,
5800 				 "Failed to reset tx rate for ch->seid %u\n",
5801 				 ch->seid);
5802 
5803 		/* delete cloud filters associated with this channel */
5804 		hlist_for_each_entry_safe(cfilter, node,
5805 					  &pf->cloud_filter_list, cloud_node) {
5806 			if (cfilter->seid != ch->seid)
5807 				continue;
5808 
5809 			hash_del(&cfilter->cloud_node);
5810 			if (cfilter->dst_port)
5811 				ret = i40e_add_del_cloud_filter_big_buf(vsi,
5812 									cfilter,
5813 									false);
5814 			else
5815 				ret = i40e_add_del_cloud_filter(vsi, cfilter,
5816 								false);
5817 			last_aq_status = pf->hw.aq.asq_last_status;
5818 			if (ret)
5819 				dev_info(&pf->pdev->dev,
5820 					 "Failed to delete cloud filter, err %s aq_err %s\n",
5821 					 i40e_stat_str(&pf->hw, ret),
5822 					 i40e_aq_str(&pf->hw, last_aq_status));
5823 			kfree(cfilter);
5824 		}
5825 
5826 		/* delete VSI from FW */
5827 		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5828 					     NULL);
5829 		if (ret)
5830 			dev_err(&vsi->back->pdev->dev,
5831 				"unable to remove channel (%d) for parent VSI(%d)\n",
5832 				ch->seid, p_vsi->seid);
5833 		kfree(ch);
5834 	}
5835 	INIT_LIST_HEAD(&vsi->ch_list);
5836 }
5837 
5838 /**
5839  * i40e_get_max_queues_for_channel
5840  * @vsi: ptr to VSI to which channels are associated with
5841  *
5842  * Helper function which returns max value among the queue counts set on the
5843  * channels/TCs created.
5844  **/
5845 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5846 {
5847 	struct i40e_channel *ch, *ch_tmp;
5848 	int max = 0;
5849 
5850 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5851 		if (!ch->initialized)
5852 			continue;
5853 		if (ch->num_queue_pairs > max)
5854 			max = ch->num_queue_pairs;
5855 	}
5856 
5857 	return max;
5858 }
5859 
5860 /**
5861  * i40e_validate_num_queues - validate num_queues w.r.t channel
5862  * @pf: ptr to PF device
5863  * @num_queues: number of queues
5864  * @vsi: the parent VSI
5865  * @reconfig_rss: indicates should the RSS be reconfigured or not
5866  *
5867  * This function validates number of queues in the context of new channel
5868  * which is being established and determines if RSS should be reconfigured
5869  * or not for parent VSI.
5870  **/
5871 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5872 				    struct i40e_vsi *vsi, bool *reconfig_rss)
5873 {
5874 	int max_ch_queues;
5875 
5876 	if (!reconfig_rss)
5877 		return -EINVAL;
5878 
5879 	*reconfig_rss = false;
5880 	if (vsi->current_rss_size) {
5881 		if (num_queues > vsi->current_rss_size) {
5882 			dev_dbg(&pf->pdev->dev,
5883 				"Error: num_queues (%d) > vsi's current_size(%d)\n",
5884 				num_queues, vsi->current_rss_size);
5885 			return -EINVAL;
5886 		} else if ((num_queues < vsi->current_rss_size) &&
5887 			   (!is_power_of_2(num_queues))) {
5888 			dev_dbg(&pf->pdev->dev,
5889 				"Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5890 				num_queues, vsi->current_rss_size);
5891 			return -EINVAL;
5892 		}
5893 	}
5894 
5895 	if (!is_power_of_2(num_queues)) {
5896 		/* Find the max num_queues configured for channel if channel
5897 		 * exist.
5898 		 * if channel exist, then enforce 'num_queues' to be more than
5899 		 * max ever queues configured for channel.
5900 		 */
5901 		max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5902 		if (num_queues < max_ch_queues) {
5903 			dev_dbg(&pf->pdev->dev,
5904 				"Error: num_queues (%d) < max queues configured for channel(%d)\n",
5905 				num_queues, max_ch_queues);
5906 			return -EINVAL;
5907 		}
5908 		*reconfig_rss = true;
5909 	}
5910 
5911 	return 0;
5912 }
5913 
5914 /**
5915  * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5916  * @vsi: the VSI being setup
5917  * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5918  *
5919  * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5920  **/
5921 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5922 {
5923 	struct i40e_pf *pf = vsi->back;
5924 	u8 seed[I40E_HKEY_ARRAY_SIZE];
5925 	struct i40e_hw *hw = &pf->hw;
5926 	int local_rss_size;
5927 	u8 *lut;
5928 	int ret;
5929 
5930 	if (!vsi->rss_size)
5931 		return -EINVAL;
5932 
5933 	if (rss_size > vsi->rss_size)
5934 		return -EINVAL;
5935 
5936 	local_rss_size = min_t(int, vsi->rss_size, rss_size);
5937 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5938 	if (!lut)
5939 		return -ENOMEM;
5940 
5941 	/* Ignoring user configured lut if there is one */
5942 	i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5943 
5944 	/* Use user configured hash key if there is one, otherwise
5945 	 * use default.
5946 	 */
5947 	if (vsi->rss_hkey_user)
5948 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5949 	else
5950 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5951 
5952 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5953 	if (ret) {
5954 		dev_info(&pf->pdev->dev,
5955 			 "Cannot set RSS lut, err %s aq_err %s\n",
5956 			 i40e_stat_str(hw, ret),
5957 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5958 		kfree(lut);
5959 		return ret;
5960 	}
5961 	kfree(lut);
5962 
5963 	/* Do the update w.r.t. storing rss_size */
5964 	if (!vsi->orig_rss_size)
5965 		vsi->orig_rss_size = vsi->rss_size;
5966 	vsi->current_rss_size = local_rss_size;
5967 
5968 	return ret;
5969 }
5970 
5971 /**
5972  * i40e_channel_setup_queue_map - Setup a channel queue map
5973  * @pf: ptr to PF device
5974  * @ctxt: VSI context structure
5975  * @ch: ptr to channel structure
5976  *
5977  * Setup queue map for a specific channel
5978  **/
5979 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5980 					 struct i40e_vsi_context *ctxt,
5981 					 struct i40e_channel *ch)
5982 {
5983 	u16 qcount, qmap, sections = 0;
5984 	u8 offset = 0;
5985 	int pow;
5986 
5987 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5988 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5989 
5990 	qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5991 	ch->num_queue_pairs = qcount;
5992 
5993 	/* find the next higher power-of-2 of num queue pairs */
5994 	pow = ilog2(qcount);
5995 	if (!is_power_of_2(qcount))
5996 		pow++;
5997 
5998 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5999 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6000 
6001 	/* Setup queue TC[0].qmap for given VSI context */
6002 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6003 
6004 	ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6005 	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6006 	ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6007 	ctxt->info.valid_sections |= cpu_to_le16(sections);
6008 }
6009 
6010 /**
6011  * i40e_add_channel - add a channel by adding VSI
6012  * @pf: ptr to PF device
6013  * @uplink_seid: underlying HW switching element (VEB) ID
6014  * @ch: ptr to channel structure
6015  *
6016  * Add a channel (VSI) using add_vsi and queue_map
6017  **/
6018 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6019 			    struct i40e_channel *ch)
6020 {
6021 	struct i40e_hw *hw = &pf->hw;
6022 	struct i40e_vsi_context ctxt;
6023 	u8 enabled_tc = 0x1; /* TC0 enabled */
6024 	int ret;
6025 
6026 	if (ch->type != I40E_VSI_VMDQ2) {
6027 		dev_info(&pf->pdev->dev,
6028 			 "add new vsi failed, ch->type %d\n", ch->type);
6029 		return -EINVAL;
6030 	}
6031 
6032 	memset(&ctxt, 0, sizeof(ctxt));
6033 	ctxt.pf_num = hw->pf_id;
6034 	ctxt.vf_num = 0;
6035 	ctxt.uplink_seid = uplink_seid;
6036 	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6037 	if (ch->type == I40E_VSI_VMDQ2)
6038 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6039 
6040 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6041 		ctxt.info.valid_sections |=
6042 		     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6043 		ctxt.info.switch_id =
6044 		   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6045 	}
6046 
6047 	/* Set queue map for a given VSI context */
6048 	i40e_channel_setup_queue_map(pf, &ctxt, ch);
6049 
6050 	/* Now time to create VSI */
6051 	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6052 	if (ret) {
6053 		dev_info(&pf->pdev->dev,
6054 			 "add new vsi failed, err %s aq_err %s\n",
6055 			 i40e_stat_str(&pf->hw, ret),
6056 			 i40e_aq_str(&pf->hw,
6057 				     pf->hw.aq.asq_last_status));
6058 		return -ENOENT;
6059 	}
6060 
6061 	/* Success, update channel, set enabled_tc only if the channel
6062 	 * is not a macvlan
6063 	 */
6064 	ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6065 	ch->seid = ctxt.seid;
6066 	ch->vsi_number = ctxt.vsi_number;
6067 	ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6068 
6069 	/* copy just the sections touched not the entire info
6070 	 * since not all sections are valid as returned by
6071 	 * update vsi params
6072 	 */
6073 	ch->info.mapping_flags = ctxt.info.mapping_flags;
6074 	memcpy(&ch->info.queue_mapping,
6075 	       &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6076 	memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6077 	       sizeof(ctxt.info.tc_mapping));
6078 
6079 	return 0;
6080 }
6081 
6082 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6083 				  u8 *bw_share)
6084 {
6085 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6086 	i40e_status ret;
6087 	int i;
6088 
6089 	memset(&bw_data, 0, sizeof(bw_data));
6090 	bw_data.tc_valid_bits = ch->enabled_tc;
6091 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6092 		bw_data.tc_bw_credits[i] = bw_share[i];
6093 
6094 	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6095 				       &bw_data, NULL);
6096 	if (ret) {
6097 		dev_info(&vsi->back->pdev->dev,
6098 			 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6099 			 vsi->back->hw.aq.asq_last_status, ch->seid);
6100 		return -EINVAL;
6101 	}
6102 
6103 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6104 		ch->info.qs_handle[i] = bw_data.qs_handles[i];
6105 
6106 	return 0;
6107 }
6108 
6109 /**
6110  * i40e_channel_config_tx_ring - config TX ring associated with new channel
6111  * @pf: ptr to PF device
6112  * @vsi: the VSI being setup
6113  * @ch: ptr to channel structure
6114  *
6115  * Configure TX rings associated with channel (VSI) since queues are being
6116  * from parent VSI.
6117  **/
6118 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6119 				       struct i40e_vsi *vsi,
6120 				       struct i40e_channel *ch)
6121 {
6122 	i40e_status ret;
6123 	int i;
6124 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6125 
6126 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
6127 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6128 		if (ch->enabled_tc & BIT(i))
6129 			bw_share[i] = 1;
6130 	}
6131 
6132 	/* configure BW for new VSI */
6133 	ret = i40e_channel_config_bw(vsi, ch, bw_share);
6134 	if (ret) {
6135 		dev_info(&vsi->back->pdev->dev,
6136 			 "Failed configuring TC map %d for channel (seid %u)\n",
6137 			 ch->enabled_tc, ch->seid);
6138 		return ret;
6139 	}
6140 
6141 	for (i = 0; i < ch->num_queue_pairs; i++) {
6142 		struct i40e_ring *tx_ring, *rx_ring;
6143 		u16 pf_q;
6144 
6145 		pf_q = ch->base_queue + i;
6146 
6147 		/* Get to TX ring ptr of main VSI, for re-setup TX queue
6148 		 * context
6149 		 */
6150 		tx_ring = vsi->tx_rings[pf_q];
6151 		tx_ring->ch = ch;
6152 
6153 		/* Get the RX ring ptr */
6154 		rx_ring = vsi->rx_rings[pf_q];
6155 		rx_ring->ch = ch;
6156 	}
6157 
6158 	return 0;
6159 }
6160 
6161 /**
6162  * i40e_setup_hw_channel - setup new channel
6163  * @pf: ptr to PF device
6164  * @vsi: the VSI being setup
6165  * @ch: ptr to channel structure
6166  * @uplink_seid: underlying HW switching element (VEB) ID
6167  * @type: type of channel to be created (VMDq2/VF)
6168  *
6169  * Setup new channel (VSI) based on specified type (VMDq2/VF)
6170  * and configures TX rings accordingly
6171  **/
6172 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6173 					struct i40e_vsi *vsi,
6174 					struct i40e_channel *ch,
6175 					u16 uplink_seid, u8 type)
6176 {
6177 	int ret;
6178 
6179 	ch->initialized = false;
6180 	ch->base_queue = vsi->next_base_queue;
6181 	ch->type = type;
6182 
6183 	/* Proceed with creation of channel (VMDq2) VSI */
6184 	ret = i40e_add_channel(pf, uplink_seid, ch);
6185 	if (ret) {
6186 		dev_info(&pf->pdev->dev,
6187 			 "failed to add_channel using uplink_seid %u\n",
6188 			 uplink_seid);
6189 		return ret;
6190 	}
6191 
6192 	/* Mark the successful creation of channel */
6193 	ch->initialized = true;
6194 
6195 	/* Reconfigure TX queues using QTX_CTL register */
6196 	ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6197 	if (ret) {
6198 		dev_info(&pf->pdev->dev,
6199 			 "failed to configure TX rings for channel %u\n",
6200 			 ch->seid);
6201 		return ret;
6202 	}
6203 
6204 	/* update 'next_base_queue' */
6205 	vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6206 	dev_dbg(&pf->pdev->dev,
6207 		"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6208 		ch->seid, ch->vsi_number, ch->stat_counter_idx,
6209 		ch->num_queue_pairs,
6210 		vsi->next_base_queue);
6211 	return ret;
6212 }
6213 
6214 /**
6215  * i40e_setup_channel - setup new channel using uplink element
6216  * @pf: ptr to PF device
6217  * @vsi: pointer to the VSI to set up the channel within
6218  * @ch: ptr to channel structure
6219  *
6220  * Setup new channel (VSI) based on specified type (VMDq2/VF)
6221  * and uplink switching element (uplink_seid)
6222  **/
6223 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6224 			       struct i40e_channel *ch)
6225 {
6226 	u8 vsi_type;
6227 	u16 seid;
6228 	int ret;
6229 
6230 	if (vsi->type == I40E_VSI_MAIN) {
6231 		vsi_type = I40E_VSI_VMDQ2;
6232 	} else {
6233 		dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6234 			vsi->type);
6235 		return false;
6236 	}
6237 
6238 	/* underlying switching element */
6239 	seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6240 
6241 	/* create channel (VSI), configure TX rings */
6242 	ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6243 	if (ret) {
6244 		dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6245 		return false;
6246 	}
6247 
6248 	return ch->initialized ? true : false;
6249 }
6250 
6251 /**
6252  * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6253  * @vsi: ptr to VSI which has PF backing
6254  *
6255  * Sets up switch mode correctly if it needs to be changed and perform
6256  * what are allowed modes.
6257  **/
6258 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6259 {
6260 	u8 mode;
6261 	struct i40e_pf *pf = vsi->back;
6262 	struct i40e_hw *hw = &pf->hw;
6263 	int ret;
6264 
6265 	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6266 	if (ret)
6267 		return -EINVAL;
6268 
6269 	if (hw->dev_caps.switch_mode) {
6270 		/* if switch mode is set, support mode2 (non-tunneled for
6271 		 * cloud filter) for now
6272 		 */
6273 		u32 switch_mode = hw->dev_caps.switch_mode &
6274 				  I40E_SWITCH_MODE_MASK;
6275 		if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6276 			if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6277 				return 0;
6278 			dev_err(&pf->pdev->dev,
6279 				"Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6280 				hw->dev_caps.switch_mode);
6281 			return -EINVAL;
6282 		}
6283 	}
6284 
6285 	/* Set Bit 7 to be valid */
6286 	mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6287 
6288 	/* Set L4type for TCP support */
6289 	mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6290 
6291 	/* Set cloud filter mode */
6292 	mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6293 
6294 	/* Prep mode field for set_switch_config */
6295 	ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6296 					pf->last_sw_conf_valid_flags,
6297 					mode, NULL);
6298 	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6299 		dev_err(&pf->pdev->dev,
6300 			"couldn't set switch config bits, err %s aq_err %s\n",
6301 			i40e_stat_str(hw, ret),
6302 			i40e_aq_str(hw,
6303 				    hw->aq.asq_last_status));
6304 
6305 	return ret;
6306 }
6307 
6308 /**
6309  * i40e_create_queue_channel - function to create channel
6310  * @vsi: VSI to be configured
6311  * @ch: ptr to channel (it contains channel specific params)
6312  *
6313  * This function creates channel (VSI) using num_queues specified by user,
6314  * reconfigs RSS if needed.
6315  **/
6316 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6317 			      struct i40e_channel *ch)
6318 {
6319 	struct i40e_pf *pf = vsi->back;
6320 	bool reconfig_rss;
6321 	int err;
6322 
6323 	if (!ch)
6324 		return -EINVAL;
6325 
6326 	if (!ch->num_queue_pairs) {
6327 		dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6328 			ch->num_queue_pairs);
6329 		return -EINVAL;
6330 	}
6331 
6332 	/* validate user requested num_queues for channel */
6333 	err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6334 				       &reconfig_rss);
6335 	if (err) {
6336 		dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6337 			 ch->num_queue_pairs);
6338 		return -EINVAL;
6339 	}
6340 
6341 	/* By default we are in VEPA mode, if this is the first VF/VMDq
6342 	 * VSI to be added switch to VEB mode.
6343 	 */
6344 
6345 	if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6346 		pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6347 
6348 		if (vsi->type == I40E_VSI_MAIN) {
6349 			if (pf->flags & I40E_FLAG_TC_MQPRIO)
6350 				i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6351 			else
6352 				i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6353 		}
6354 		/* now onwards for main VSI, number of queues will be value
6355 		 * of TC0's queue count
6356 		 */
6357 	}
6358 
6359 	/* By this time, vsi->cnt_q_avail shall be set to non-zero and
6360 	 * it should be more than num_queues
6361 	 */
6362 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6363 		dev_dbg(&pf->pdev->dev,
6364 			"Error: cnt_q_avail (%u) less than num_queues %d\n",
6365 			vsi->cnt_q_avail, ch->num_queue_pairs);
6366 		return -EINVAL;
6367 	}
6368 
6369 	/* reconfig_rss only if vsi type is MAIN_VSI */
6370 	if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6371 		err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6372 		if (err) {
6373 			dev_info(&pf->pdev->dev,
6374 				 "Error: unable to reconfig rss for num_queues (%u)\n",
6375 				 ch->num_queue_pairs);
6376 			return -EINVAL;
6377 		}
6378 	}
6379 
6380 	if (!i40e_setup_channel(pf, vsi, ch)) {
6381 		dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6382 		return -EINVAL;
6383 	}
6384 
6385 	dev_info(&pf->pdev->dev,
6386 		 "Setup channel (id:%u) utilizing num_queues %d\n",
6387 		 ch->seid, ch->num_queue_pairs);
6388 
6389 	/* configure VSI for BW limit */
6390 	if (ch->max_tx_rate) {
6391 		u64 credits = ch->max_tx_rate;
6392 
6393 		if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6394 			return -EINVAL;
6395 
6396 		do_div(credits, I40E_BW_CREDIT_DIVISOR);
6397 		dev_dbg(&pf->pdev->dev,
6398 			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6399 			ch->max_tx_rate,
6400 			credits,
6401 			ch->seid);
6402 	}
6403 
6404 	/* in case of VF, this will be main SRIOV VSI */
6405 	ch->parent_vsi = vsi;
6406 
6407 	/* and update main_vsi's count for queue_available to use */
6408 	vsi->cnt_q_avail -= ch->num_queue_pairs;
6409 
6410 	return 0;
6411 }
6412 
6413 /**
6414  * i40e_configure_queue_channels - Add queue channel for the given TCs
6415  * @vsi: VSI to be configured
6416  *
6417  * Configures queue channel mapping to the given TCs
6418  **/
6419 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6420 {
6421 	struct i40e_channel *ch;
6422 	u64 max_rate = 0;
6423 	int ret = 0, i;
6424 
6425 	/* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6426 	vsi->tc_seid_map[0] = vsi->seid;
6427 	for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6428 		if (vsi->tc_config.enabled_tc & BIT(i)) {
6429 			ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6430 			if (!ch) {
6431 				ret = -ENOMEM;
6432 				goto err_free;
6433 			}
6434 
6435 			INIT_LIST_HEAD(&ch->list);
6436 			ch->num_queue_pairs =
6437 				vsi->tc_config.tc_info[i].qcount;
6438 			ch->base_queue =
6439 				vsi->tc_config.tc_info[i].qoffset;
6440 
6441 			/* Bandwidth limit through tc interface is in bytes/s,
6442 			 * change to Mbit/s
6443 			 */
6444 			max_rate = vsi->mqprio_qopt.max_rate[i];
6445 			do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6446 			ch->max_tx_rate = max_rate;
6447 
6448 			list_add_tail(&ch->list, &vsi->ch_list);
6449 
6450 			ret = i40e_create_queue_channel(vsi, ch);
6451 			if (ret) {
6452 				dev_err(&vsi->back->pdev->dev,
6453 					"Failed creating queue channel with TC%d: queues %d\n",
6454 					i, ch->num_queue_pairs);
6455 				goto err_free;
6456 			}
6457 			vsi->tc_seid_map[i] = ch->seid;
6458 		}
6459 	}
6460 	return ret;
6461 
6462 err_free:
6463 	i40e_remove_queue_channels(vsi);
6464 	return ret;
6465 }
6466 
6467 /**
6468  * i40e_veb_config_tc - Configure TCs for given VEB
6469  * @veb: given VEB
6470  * @enabled_tc: TC bitmap
6471  *
6472  * Configures given TC bitmap for VEB (switching) element
6473  **/
6474 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6475 {
6476 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6477 	struct i40e_pf *pf = veb->pf;
6478 	int ret = 0;
6479 	int i;
6480 
6481 	/* No TCs or already enabled TCs just return */
6482 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
6483 		return ret;
6484 
6485 	bw_data.tc_valid_bits = enabled_tc;
6486 	/* bw_data.absolute_credits is not set (relative) */
6487 
6488 	/* Enable ETS TCs with equal BW Share for now */
6489 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6490 		if (enabled_tc & BIT(i))
6491 			bw_data.tc_bw_share_credits[i] = 1;
6492 	}
6493 
6494 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6495 						   &bw_data, NULL);
6496 	if (ret) {
6497 		dev_info(&pf->pdev->dev,
6498 			 "VEB bw config failed, err %s aq_err %s\n",
6499 			 i40e_stat_str(&pf->hw, ret),
6500 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6501 		goto out;
6502 	}
6503 
6504 	/* Update the BW information */
6505 	ret = i40e_veb_get_bw_info(veb);
6506 	if (ret) {
6507 		dev_info(&pf->pdev->dev,
6508 			 "Failed getting veb bw config, err %s aq_err %s\n",
6509 			 i40e_stat_str(&pf->hw, ret),
6510 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6511 	}
6512 
6513 out:
6514 	return ret;
6515 }
6516 
6517 #ifdef CONFIG_I40E_DCB
6518 /**
6519  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6520  * @pf: PF struct
6521  *
6522  * Reconfigure VEB/VSIs on a given PF; it is assumed that
6523  * the caller would've quiesce all the VSIs before calling
6524  * this function
6525  **/
6526 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6527 {
6528 	u8 tc_map = 0;
6529 	int ret;
6530 	u8 v;
6531 
6532 	/* Enable the TCs available on PF to all VEBs */
6533 	tc_map = i40e_pf_get_tc_map(pf);
6534 	if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6535 		return;
6536 
6537 	for (v = 0; v < I40E_MAX_VEB; v++) {
6538 		if (!pf->veb[v])
6539 			continue;
6540 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6541 		if (ret) {
6542 			dev_info(&pf->pdev->dev,
6543 				 "Failed configuring TC for VEB seid=%d\n",
6544 				 pf->veb[v]->seid);
6545 			/* Will try to configure as many components */
6546 		}
6547 	}
6548 
6549 	/* Update each VSI */
6550 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6551 		if (!pf->vsi[v])
6552 			continue;
6553 
6554 		/* - Enable all TCs for the LAN VSI
6555 		 * - For all others keep them at TC0 for now
6556 		 */
6557 		if (v == pf->lan_vsi)
6558 			tc_map = i40e_pf_get_tc_map(pf);
6559 		else
6560 			tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6561 
6562 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6563 		if (ret) {
6564 			dev_info(&pf->pdev->dev,
6565 				 "Failed configuring TC for VSI seid=%d\n",
6566 				 pf->vsi[v]->seid);
6567 			/* Will try to configure as many components */
6568 		} else {
6569 			/* Re-configure VSI vectors based on updated TC map */
6570 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6571 			if (pf->vsi[v]->netdev)
6572 				i40e_dcbnl_set_all(pf->vsi[v]);
6573 		}
6574 	}
6575 }
6576 
6577 /**
6578  * i40e_resume_port_tx - Resume port Tx
6579  * @pf: PF struct
6580  *
6581  * Resume a port's Tx and issue a PF reset in case of failure to
6582  * resume.
6583  **/
6584 static int i40e_resume_port_tx(struct i40e_pf *pf)
6585 {
6586 	struct i40e_hw *hw = &pf->hw;
6587 	int ret;
6588 
6589 	ret = i40e_aq_resume_port_tx(hw, NULL);
6590 	if (ret) {
6591 		dev_info(&pf->pdev->dev,
6592 			 "Resume Port Tx failed, err %s aq_err %s\n",
6593 			  i40e_stat_str(&pf->hw, ret),
6594 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6595 		/* Schedule PF reset to recover */
6596 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6597 		i40e_service_event_schedule(pf);
6598 	}
6599 
6600 	return ret;
6601 }
6602 
6603 /**
6604  * i40e_suspend_port_tx - Suspend port Tx
6605  * @pf: PF struct
6606  *
6607  * Suspend a port's Tx and issue a PF reset in case of failure.
6608  **/
6609 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6610 {
6611 	struct i40e_hw *hw = &pf->hw;
6612 	int ret;
6613 
6614 	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6615 	if (ret) {
6616 		dev_info(&pf->pdev->dev,
6617 			 "Suspend Port Tx failed, err %s aq_err %s\n",
6618 			 i40e_stat_str(&pf->hw, ret),
6619 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6620 		/* Schedule PF reset to recover */
6621 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6622 		i40e_service_event_schedule(pf);
6623 	}
6624 
6625 	return ret;
6626 }
6627 
6628 /**
6629  * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6630  * @pf: PF being configured
6631  * @new_cfg: New DCBX configuration
6632  *
6633  * Program DCB settings into HW and reconfigure VEB/VSIs on
6634  * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6635  **/
6636 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6637 				  struct i40e_dcbx_config *new_cfg)
6638 {
6639 	struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6640 	int ret;
6641 
6642 	/* Check if need reconfiguration */
6643 	if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6644 		dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6645 		return 0;
6646 	}
6647 
6648 	/* Config change disable all VSIs */
6649 	i40e_pf_quiesce_all_vsi(pf);
6650 
6651 	/* Copy the new config to the current config */
6652 	*old_cfg = *new_cfg;
6653 	old_cfg->etsrec = old_cfg->etscfg;
6654 	ret = i40e_set_dcb_config(&pf->hw);
6655 	if (ret) {
6656 		dev_info(&pf->pdev->dev,
6657 			 "Set DCB Config failed, err %s aq_err %s\n",
6658 			 i40e_stat_str(&pf->hw, ret),
6659 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6660 		goto out;
6661 	}
6662 
6663 	/* Changes in configuration update VEB/VSI */
6664 	i40e_dcb_reconfigure(pf);
6665 out:
6666 	/* In case of reset do not try to resume anything */
6667 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6668 		/* Re-start the VSIs if disabled */
6669 		ret = i40e_resume_port_tx(pf);
6670 		/* In case of error no point in resuming VSIs */
6671 		if (ret)
6672 			goto err;
6673 		i40e_pf_unquiesce_all_vsi(pf);
6674 	}
6675 err:
6676 	return ret;
6677 }
6678 
6679 /**
6680  * i40e_hw_dcb_config - Program new DCBX settings into HW
6681  * @pf: PF being configured
6682  * @new_cfg: New DCBX configuration
6683  *
6684  * Program DCB settings into HW and reconfigure VEB/VSIs on
6685  * given PF
6686  **/
6687 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6688 {
6689 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6690 	u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6691 	u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6692 	struct i40e_dcbx_config *old_cfg;
6693 	u8 mode[I40E_MAX_TRAFFIC_CLASS];
6694 	struct i40e_rx_pb_config pb_cfg;
6695 	struct i40e_hw *hw = &pf->hw;
6696 	u8 num_ports = hw->num_ports;
6697 	bool need_reconfig;
6698 	int ret = -EINVAL;
6699 	u8 lltc_map = 0;
6700 	u8 tc_map = 0;
6701 	u8 new_numtc;
6702 	u8 i;
6703 
6704 	dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6705 	/* Un-pack information to Program ETS HW via shared API
6706 	 * numtc, tcmap
6707 	 * LLTC map
6708 	 * ETS/NON-ETS arbiter mode
6709 	 * max exponent (credit refills)
6710 	 * Total number of ports
6711 	 * PFC priority bit-map
6712 	 * Priority Table
6713 	 * BW % per TC
6714 	 * Arbiter mode between UPs sharing same TC
6715 	 * TSA table (ETS or non-ETS)
6716 	 * EEE enabled or not
6717 	 * MFS TC table
6718 	 */
6719 
6720 	new_numtc = i40e_dcb_get_num_tc(new_cfg);
6721 
6722 	memset(&ets_data, 0, sizeof(ets_data));
6723 	for (i = 0; i < new_numtc; i++) {
6724 		tc_map |= BIT(i);
6725 		switch (new_cfg->etscfg.tsatable[i]) {
6726 		case I40E_IEEE_TSA_ETS:
6727 			prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6728 			ets_data.tc_bw_share_credits[i] =
6729 					new_cfg->etscfg.tcbwtable[i];
6730 			break;
6731 		case I40E_IEEE_TSA_STRICT:
6732 			prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6733 			lltc_map |= BIT(i);
6734 			ets_data.tc_bw_share_credits[i] =
6735 					I40E_DCB_STRICT_PRIO_CREDITS;
6736 			break;
6737 		default:
6738 			/* Invalid TSA type */
6739 			need_reconfig = false;
6740 			goto out;
6741 		}
6742 	}
6743 
6744 	old_cfg = &hw->local_dcbx_config;
6745 	/* Check if need reconfiguration */
6746 	need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6747 
6748 	/* If needed, enable/disable frame tagging, disable all VSIs
6749 	 * and suspend port tx
6750 	 */
6751 	if (need_reconfig) {
6752 		/* Enable DCB tagging only when more than one TC */
6753 		if (new_numtc > 1)
6754 			pf->flags |= I40E_FLAG_DCB_ENABLED;
6755 		else
6756 			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6757 
6758 		set_bit(__I40E_PORT_SUSPENDED, pf->state);
6759 		/* Reconfiguration needed quiesce all VSIs */
6760 		i40e_pf_quiesce_all_vsi(pf);
6761 		ret = i40e_suspend_port_tx(pf);
6762 		if (ret)
6763 			goto err;
6764 	}
6765 
6766 	/* Configure Port ETS Tx Scheduler */
6767 	ets_data.tc_valid_bits = tc_map;
6768 	ets_data.tc_strict_priority_flags = lltc_map;
6769 	ret = i40e_aq_config_switch_comp_ets
6770 		(hw, pf->mac_seid, &ets_data,
6771 		 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6772 	if (ret) {
6773 		dev_info(&pf->pdev->dev,
6774 			 "Modify Port ETS failed, err %s aq_err %s\n",
6775 			 i40e_stat_str(&pf->hw, ret),
6776 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6777 		goto out;
6778 	}
6779 
6780 	/* Configure Rx ETS HW */
6781 	memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6782 	i40e_dcb_hw_set_num_tc(hw, new_numtc);
6783 	i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6784 				   I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6785 				   I40E_DCB_DEFAULT_MAX_EXPONENT,
6786 				   lltc_map);
6787 	i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6788 	i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6789 				     prio_type);
6790 	i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6791 			       new_cfg->etscfg.prioritytable);
6792 	i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6793 
6794 	/* Configure Rx Packet Buffers in HW */
6795 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6796 		mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6797 		mfs_tc[i] += I40E_PACKET_HDR_PAD;
6798 	}
6799 
6800 	i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6801 					 false, new_cfg->pfc.pfcenable,
6802 					 mfs_tc, &pb_cfg);
6803 	i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
6804 
6805 	/* Update the local Rx Packet buffer config */
6806 	pf->pb_cfg = pb_cfg;
6807 
6808 	/* Inform the FW about changes to DCB configuration */
6809 	ret = i40e_aq_dcb_updated(&pf->hw, NULL);
6810 	if (ret) {
6811 		dev_info(&pf->pdev->dev,
6812 			 "DCB Updated failed, err %s aq_err %s\n",
6813 			 i40e_stat_str(&pf->hw, ret),
6814 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6815 		goto out;
6816 	}
6817 
6818 	/* Update the port DCBx configuration */
6819 	*old_cfg = *new_cfg;
6820 
6821 	/* Changes in configuration update VEB/VSI */
6822 	i40e_dcb_reconfigure(pf);
6823 out:
6824 	/* Re-start the VSIs if disabled */
6825 	if (need_reconfig) {
6826 		ret = i40e_resume_port_tx(pf);
6827 
6828 		clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6829 		/* In case of error no point in resuming VSIs */
6830 		if (ret)
6831 			goto err;
6832 
6833 		/* Wait for the PF's queues to be disabled */
6834 		ret = i40e_pf_wait_queues_disabled(pf);
6835 		if (ret) {
6836 			/* Schedule PF reset to recover */
6837 			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6838 			i40e_service_event_schedule(pf);
6839 			goto err;
6840 		} else {
6841 			i40e_pf_unquiesce_all_vsi(pf);
6842 			set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6843 			set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
6844 		}
6845 		/* registers are set, lets apply */
6846 		if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
6847 			ret = i40e_hw_set_dcb_config(pf, new_cfg);
6848 	}
6849 
6850 err:
6851 	return ret;
6852 }
6853 
6854 /**
6855  * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
6856  * @pf: PF being queried
6857  *
6858  * Set default DCB configuration in case DCB is to be done in SW.
6859  **/
6860 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
6861 {
6862 	struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
6863 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6864 	struct i40e_hw *hw = &pf->hw;
6865 	int err;
6866 
6867 	if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
6868 		/* Update the local cached instance with TC0 ETS */
6869 		memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
6870 		pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6871 		pf->tmp_cfg.etscfg.maxtcs = 0;
6872 		pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6873 		pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
6874 		pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
6875 		pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
6876 		/* FW needs one App to configure HW */
6877 		pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
6878 		pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
6879 		pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
6880 		pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
6881 
6882 		return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
6883 	}
6884 
6885 	memset(&ets_data, 0, sizeof(ets_data));
6886 	ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
6887 	ets_data.tc_strict_priority_flags = 0; /* ETS */
6888 	ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
6889 
6890 	/* Enable ETS on the Physical port */
6891 	err = i40e_aq_config_switch_comp_ets
6892 		(hw, pf->mac_seid, &ets_data,
6893 		 i40e_aqc_opc_enable_switching_comp_ets, NULL);
6894 	if (err) {
6895 		dev_info(&pf->pdev->dev,
6896 			 "Enable Port ETS failed, err %s aq_err %s\n",
6897 			 i40e_stat_str(&pf->hw, err),
6898 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6899 		err = -ENOENT;
6900 		goto out;
6901 	}
6902 
6903 	/* Update the local cached instance with TC0 ETS */
6904 	dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6905 	dcb_cfg->etscfg.cbs = 0;
6906 	dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
6907 	dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6908 
6909 out:
6910 	return err;
6911 }
6912 
6913 /**
6914  * i40e_init_pf_dcb - Initialize DCB configuration
6915  * @pf: PF being configured
6916  *
6917  * Query the current DCB configuration and cache it
6918  * in the hardware structure
6919  **/
6920 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6921 {
6922 	struct i40e_hw *hw = &pf->hw;
6923 	int err;
6924 
6925 	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6926 	 * Also do not enable DCBx if FW LLDP agent is disabled
6927 	 */
6928 	if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
6929 		dev_info(&pf->pdev->dev, "DCB is not supported.\n");
6930 		err = I40E_NOT_SUPPORTED;
6931 		goto out;
6932 	}
6933 	if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
6934 		dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
6935 		err = i40e_dcb_sw_default_config(pf);
6936 		if (err) {
6937 			dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
6938 			goto out;
6939 		}
6940 		dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
6941 		pf->dcbx_cap = DCB_CAP_DCBX_HOST |
6942 			       DCB_CAP_DCBX_VER_IEEE;
6943 		/* at init capable but disabled */
6944 		pf->flags |= I40E_FLAG_DCB_CAPABLE;
6945 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6946 		goto out;
6947 	}
6948 	err = i40e_init_dcb(hw, true);
6949 	if (!err) {
6950 		/* Device/Function is not DCBX capable */
6951 		if ((!hw->func_caps.dcb) ||
6952 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6953 			dev_info(&pf->pdev->dev,
6954 				 "DCBX offload is not supported or is disabled for this PF.\n");
6955 		} else {
6956 			/* When status is not DISABLED then DCBX in FW */
6957 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6958 				       DCB_CAP_DCBX_VER_IEEE;
6959 
6960 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
6961 			/* Enable DCB tagging only when more than one TC
6962 			 * or explicitly disable if only one TC
6963 			 */
6964 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6965 				pf->flags |= I40E_FLAG_DCB_ENABLED;
6966 			else
6967 				pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6968 			dev_dbg(&pf->pdev->dev,
6969 				"DCBX offload is supported for this PF.\n");
6970 		}
6971 	} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6972 		dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6973 		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6974 	} else {
6975 		dev_info(&pf->pdev->dev,
6976 			 "Query for DCB configuration failed, err %s aq_err %s\n",
6977 			 i40e_stat_str(&pf->hw, err),
6978 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6979 	}
6980 
6981 out:
6982 	return err;
6983 }
6984 #endif /* CONFIG_I40E_DCB */
6985 
6986 /**
6987  * i40e_print_link_message - print link up or down
6988  * @vsi: the VSI for which link needs a message
6989  * @isup: true of link is up, false otherwise
6990  */
6991 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6992 {
6993 	enum i40e_aq_link_speed new_speed;
6994 	struct i40e_pf *pf = vsi->back;
6995 	char *speed = "Unknown";
6996 	char *fc = "Unknown";
6997 	char *fec = "";
6998 	char *req_fec = "";
6999 	char *an = "";
7000 
7001 	if (isup)
7002 		new_speed = pf->hw.phy.link_info.link_speed;
7003 	else
7004 		new_speed = I40E_LINK_SPEED_UNKNOWN;
7005 
7006 	if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7007 		return;
7008 	vsi->current_isup = isup;
7009 	vsi->current_speed = new_speed;
7010 	if (!isup) {
7011 		netdev_info(vsi->netdev, "NIC Link is Down\n");
7012 		return;
7013 	}
7014 
7015 	/* Warn user if link speed on NPAR enabled partition is not at
7016 	 * least 10GB
7017 	 */
7018 	if (pf->hw.func_caps.npar_enable &&
7019 	    (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7020 	     pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7021 		netdev_warn(vsi->netdev,
7022 			    "The partition detected link speed that is less than 10Gbps\n");
7023 
7024 	switch (pf->hw.phy.link_info.link_speed) {
7025 	case I40E_LINK_SPEED_40GB:
7026 		speed = "40 G";
7027 		break;
7028 	case I40E_LINK_SPEED_20GB:
7029 		speed = "20 G";
7030 		break;
7031 	case I40E_LINK_SPEED_25GB:
7032 		speed = "25 G";
7033 		break;
7034 	case I40E_LINK_SPEED_10GB:
7035 		speed = "10 G";
7036 		break;
7037 	case I40E_LINK_SPEED_5GB:
7038 		speed = "5 G";
7039 		break;
7040 	case I40E_LINK_SPEED_2_5GB:
7041 		speed = "2.5 G";
7042 		break;
7043 	case I40E_LINK_SPEED_1GB:
7044 		speed = "1000 M";
7045 		break;
7046 	case I40E_LINK_SPEED_100MB:
7047 		speed = "100 M";
7048 		break;
7049 	default:
7050 		break;
7051 	}
7052 
7053 	switch (pf->hw.fc.current_mode) {
7054 	case I40E_FC_FULL:
7055 		fc = "RX/TX";
7056 		break;
7057 	case I40E_FC_TX_PAUSE:
7058 		fc = "TX";
7059 		break;
7060 	case I40E_FC_RX_PAUSE:
7061 		fc = "RX";
7062 		break;
7063 	default:
7064 		fc = "None";
7065 		break;
7066 	}
7067 
7068 	if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7069 		req_fec = "None";
7070 		fec = "None";
7071 		an = "False";
7072 
7073 		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7074 			an = "True";
7075 
7076 		if (pf->hw.phy.link_info.fec_info &
7077 		    I40E_AQ_CONFIG_FEC_KR_ENA)
7078 			fec = "CL74 FC-FEC/BASE-R";
7079 		else if (pf->hw.phy.link_info.fec_info &
7080 			 I40E_AQ_CONFIG_FEC_RS_ENA)
7081 			fec = "CL108 RS-FEC";
7082 
7083 		/* 'CL108 RS-FEC' should be displayed when RS is requested, or
7084 		 * both RS and FC are requested
7085 		 */
7086 		if (vsi->back->hw.phy.link_info.req_fec_info &
7087 		    (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7088 			if (vsi->back->hw.phy.link_info.req_fec_info &
7089 			    I40E_AQ_REQUEST_FEC_RS)
7090 				req_fec = "CL108 RS-FEC";
7091 			else
7092 				req_fec = "CL74 FC-FEC/BASE-R";
7093 		}
7094 		netdev_info(vsi->netdev,
7095 			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7096 			    speed, req_fec, fec, an, fc);
7097 	} else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7098 		req_fec = "None";
7099 		fec = "None";
7100 		an = "False";
7101 
7102 		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7103 			an = "True";
7104 
7105 		if (pf->hw.phy.link_info.fec_info &
7106 		    I40E_AQ_CONFIG_FEC_KR_ENA)
7107 			fec = "CL74 FC-FEC/BASE-R";
7108 
7109 		if (pf->hw.phy.link_info.req_fec_info &
7110 		    I40E_AQ_REQUEST_FEC_KR)
7111 			req_fec = "CL74 FC-FEC/BASE-R";
7112 
7113 		netdev_info(vsi->netdev,
7114 			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7115 			    speed, req_fec, fec, an, fc);
7116 	} else {
7117 		netdev_info(vsi->netdev,
7118 			    "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7119 			    speed, fc);
7120 	}
7121 
7122 }
7123 
7124 /**
7125  * i40e_up_complete - Finish the last steps of bringing up a connection
7126  * @vsi: the VSI being configured
7127  **/
7128 static int i40e_up_complete(struct i40e_vsi *vsi)
7129 {
7130 	struct i40e_pf *pf = vsi->back;
7131 	int err;
7132 
7133 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7134 		i40e_vsi_configure_msix(vsi);
7135 	else
7136 		i40e_configure_msi_and_legacy(vsi);
7137 
7138 	/* start rings */
7139 	err = i40e_vsi_start_rings(vsi);
7140 	if (err)
7141 		return err;
7142 
7143 	clear_bit(__I40E_VSI_DOWN, vsi->state);
7144 	i40e_napi_enable_all(vsi);
7145 	i40e_vsi_enable_irq(vsi);
7146 
7147 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7148 	    (vsi->netdev)) {
7149 		i40e_print_link_message(vsi, true);
7150 		netif_tx_start_all_queues(vsi->netdev);
7151 		netif_carrier_on(vsi->netdev);
7152 	}
7153 
7154 	/* replay FDIR SB filters */
7155 	if (vsi->type == I40E_VSI_FDIR) {
7156 		/* reset fd counters */
7157 		pf->fd_add_err = 0;
7158 		pf->fd_atr_cnt = 0;
7159 		i40e_fdir_filter_restore(vsi);
7160 	}
7161 
7162 	/* On the next run of the service_task, notify any clients of the new
7163 	 * opened netdev
7164 	 */
7165 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7166 	i40e_service_event_schedule(pf);
7167 
7168 	return 0;
7169 }
7170 
7171 /**
7172  * i40e_vsi_reinit_locked - Reset the VSI
7173  * @vsi: the VSI being configured
7174  *
7175  * Rebuild the ring structs after some configuration
7176  * has changed, e.g. MTU size.
7177  **/
7178 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7179 {
7180 	struct i40e_pf *pf = vsi->back;
7181 
7182 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7183 		usleep_range(1000, 2000);
7184 	i40e_down(vsi);
7185 
7186 	i40e_up(vsi);
7187 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
7188 }
7189 
7190 /**
7191  * i40e_force_link_state - Force the link status
7192  * @pf: board private structure
7193  * @is_up: whether the link state should be forced up or down
7194  **/
7195 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7196 {
7197 	struct i40e_aq_get_phy_abilities_resp abilities;
7198 	struct i40e_aq_set_phy_config config = {0};
7199 	bool non_zero_phy_type = is_up;
7200 	struct i40e_hw *hw = &pf->hw;
7201 	i40e_status err;
7202 	u64 mask;
7203 	u8 speed;
7204 
7205 	/* Card might've been put in an unstable state by other drivers
7206 	 * and applications, which causes incorrect speed values being
7207 	 * set on startup. In order to clear speed registers, we call
7208 	 * get_phy_capabilities twice, once to get initial state of
7209 	 * available speeds, and once to get current PHY config.
7210 	 */
7211 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7212 					   NULL);
7213 	if (err) {
7214 		dev_err(&pf->pdev->dev,
7215 			"failed to get phy cap., ret =  %s last_status =  %s\n",
7216 			i40e_stat_str(hw, err),
7217 			i40e_aq_str(hw, hw->aq.asq_last_status));
7218 		return err;
7219 	}
7220 	speed = abilities.link_speed;
7221 
7222 	/* Get the current phy config */
7223 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7224 					   NULL);
7225 	if (err) {
7226 		dev_err(&pf->pdev->dev,
7227 			"failed to get phy cap., ret =  %s last_status =  %s\n",
7228 			i40e_stat_str(hw, err),
7229 			i40e_aq_str(hw, hw->aq.asq_last_status));
7230 		return err;
7231 	}
7232 
7233 	/* If link needs to go up, but was not forced to go down,
7234 	 * and its speed values are OK, no need for a flap
7235 	 * if non_zero_phy_type was set, still need to force up
7236 	 */
7237 	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7238 		non_zero_phy_type = true;
7239 	else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7240 		return I40E_SUCCESS;
7241 
7242 	/* To force link we need to set bits for all supported PHY types,
7243 	 * but there are now more than 32, so we need to split the bitmap
7244 	 * across two fields.
7245 	 */
7246 	mask = I40E_PHY_TYPES_BITMASK;
7247 	config.phy_type =
7248 		non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7249 	config.phy_type_ext =
7250 		non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7251 	/* Copy the old settings, except of phy_type */
7252 	config.abilities = abilities.abilities;
7253 	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7254 		if (is_up)
7255 			config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7256 		else
7257 			config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7258 	}
7259 	if (abilities.link_speed != 0)
7260 		config.link_speed = abilities.link_speed;
7261 	else
7262 		config.link_speed = speed;
7263 	config.eee_capability = abilities.eee_capability;
7264 	config.eeer = abilities.eeer_val;
7265 	config.low_power_ctrl = abilities.d3_lpan;
7266 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7267 			    I40E_AQ_PHY_FEC_CONFIG_MASK;
7268 	err = i40e_aq_set_phy_config(hw, &config, NULL);
7269 
7270 	if (err) {
7271 		dev_err(&pf->pdev->dev,
7272 			"set phy config ret =  %s last_status =  %s\n",
7273 			i40e_stat_str(&pf->hw, err),
7274 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7275 		return err;
7276 	}
7277 
7278 	/* Update the link info */
7279 	err = i40e_update_link_info(hw);
7280 	if (err) {
7281 		/* Wait a little bit (on 40G cards it sometimes takes a really
7282 		 * long time for link to come back from the atomic reset)
7283 		 * and try once more
7284 		 */
7285 		msleep(1000);
7286 		i40e_update_link_info(hw);
7287 	}
7288 
7289 	i40e_aq_set_link_restart_an(hw, is_up, NULL);
7290 
7291 	return I40E_SUCCESS;
7292 }
7293 
7294 /**
7295  * i40e_up - Bring the connection back up after being down
7296  * @vsi: the VSI being configured
7297  **/
7298 int i40e_up(struct i40e_vsi *vsi)
7299 {
7300 	int err;
7301 
7302 	if (vsi->type == I40E_VSI_MAIN &&
7303 	    (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7304 	     vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7305 		i40e_force_link_state(vsi->back, true);
7306 
7307 	err = i40e_vsi_configure(vsi);
7308 	if (!err)
7309 		err = i40e_up_complete(vsi);
7310 
7311 	return err;
7312 }
7313 
7314 /**
7315  * i40e_down - Shutdown the connection processing
7316  * @vsi: the VSI being stopped
7317  **/
7318 void i40e_down(struct i40e_vsi *vsi)
7319 {
7320 	int i;
7321 
7322 	/* It is assumed that the caller of this function
7323 	 * sets the vsi->state __I40E_VSI_DOWN bit.
7324 	 */
7325 	if (vsi->netdev) {
7326 		netif_carrier_off(vsi->netdev);
7327 		netif_tx_disable(vsi->netdev);
7328 	}
7329 	i40e_vsi_disable_irq(vsi);
7330 	i40e_vsi_stop_rings(vsi);
7331 	if (vsi->type == I40E_VSI_MAIN &&
7332 	   (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7333 	    vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7334 		i40e_force_link_state(vsi->back, false);
7335 	i40e_napi_disable_all(vsi);
7336 
7337 	for (i = 0; i < vsi->num_queue_pairs; i++) {
7338 		i40e_clean_tx_ring(vsi->tx_rings[i]);
7339 		if (i40e_enabled_xdp_vsi(vsi)) {
7340 			/* Make sure that in-progress ndo_xdp_xmit and
7341 			 * ndo_xsk_wakeup calls are completed.
7342 			 */
7343 			synchronize_rcu();
7344 			i40e_clean_tx_ring(vsi->xdp_rings[i]);
7345 		}
7346 		i40e_clean_rx_ring(vsi->rx_rings[i]);
7347 	}
7348 
7349 }
7350 
7351 /**
7352  * i40e_validate_mqprio_qopt- validate queue mapping info
7353  * @vsi: the VSI being configured
7354  * @mqprio_qopt: queue parametrs
7355  **/
7356 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7357 				     struct tc_mqprio_qopt_offload *mqprio_qopt)
7358 {
7359 	u64 sum_max_rate = 0;
7360 	u64 max_rate = 0;
7361 	int i;
7362 
7363 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7364 	    mqprio_qopt->qopt.num_tc < 1 ||
7365 	    mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7366 		return -EINVAL;
7367 	for (i = 0; ; i++) {
7368 		if (!mqprio_qopt->qopt.count[i])
7369 			return -EINVAL;
7370 		if (mqprio_qopt->min_rate[i]) {
7371 			dev_err(&vsi->back->pdev->dev,
7372 				"Invalid min tx rate (greater than 0) specified\n");
7373 			return -EINVAL;
7374 		}
7375 		max_rate = mqprio_qopt->max_rate[i];
7376 		do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7377 		sum_max_rate += max_rate;
7378 
7379 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7380 			break;
7381 		if (mqprio_qopt->qopt.offset[i + 1] !=
7382 		    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7383 			return -EINVAL;
7384 	}
7385 	if (vsi->num_queue_pairs <
7386 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7387 		dev_err(&vsi->back->pdev->dev,
7388 			"Failed to create traffic channel, insufficient number of queues.\n");
7389 		return -EINVAL;
7390 	}
7391 	if (sum_max_rate > i40e_get_link_speed(vsi)) {
7392 		dev_err(&vsi->back->pdev->dev,
7393 			"Invalid max tx rate specified\n");
7394 		return -EINVAL;
7395 	}
7396 	return 0;
7397 }
7398 
7399 /**
7400  * i40e_vsi_set_default_tc_config - set default values for tc configuration
7401  * @vsi: the VSI being configured
7402  **/
7403 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7404 {
7405 	u16 qcount;
7406 	int i;
7407 
7408 	/* Only TC0 is enabled */
7409 	vsi->tc_config.numtc = 1;
7410 	vsi->tc_config.enabled_tc = 1;
7411 	qcount = min_t(int, vsi->alloc_queue_pairs,
7412 		       i40e_pf_get_max_q_per_tc(vsi->back));
7413 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7414 		/* For the TC that is not enabled set the offset to default
7415 		 * queue and allocate one queue for the given TC.
7416 		 */
7417 		vsi->tc_config.tc_info[i].qoffset = 0;
7418 		if (i == 0)
7419 			vsi->tc_config.tc_info[i].qcount = qcount;
7420 		else
7421 			vsi->tc_config.tc_info[i].qcount = 1;
7422 		vsi->tc_config.tc_info[i].netdev_tc = 0;
7423 	}
7424 }
7425 
7426 /**
7427  * i40e_del_macvlan_filter
7428  * @hw: pointer to the HW structure
7429  * @seid: seid of the channel VSI
7430  * @macaddr: the mac address to apply as a filter
7431  * @aq_err: store the admin Q error
7432  *
7433  * This function deletes a mac filter on the channel VSI which serves as the
7434  * macvlan. Returns 0 on success.
7435  **/
7436 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7437 					   const u8 *macaddr, int *aq_err)
7438 {
7439 	struct i40e_aqc_remove_macvlan_element_data element;
7440 	i40e_status status;
7441 
7442 	memset(&element, 0, sizeof(element));
7443 	ether_addr_copy(element.mac_addr, macaddr);
7444 	element.vlan_tag = 0;
7445 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7446 	status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7447 	*aq_err = hw->aq.asq_last_status;
7448 
7449 	return status;
7450 }
7451 
7452 /**
7453  * i40e_add_macvlan_filter
7454  * @hw: pointer to the HW structure
7455  * @seid: seid of the channel VSI
7456  * @macaddr: the mac address to apply as a filter
7457  * @aq_err: store the admin Q error
7458  *
7459  * This function adds a mac filter on the channel VSI which serves as the
7460  * macvlan. Returns 0 on success.
7461  **/
7462 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7463 					   const u8 *macaddr, int *aq_err)
7464 {
7465 	struct i40e_aqc_add_macvlan_element_data element;
7466 	i40e_status status;
7467 	u16 cmd_flags = 0;
7468 
7469 	ether_addr_copy(element.mac_addr, macaddr);
7470 	element.vlan_tag = 0;
7471 	element.queue_number = 0;
7472 	element.match_method = I40E_AQC_MM_ERR_NO_RES;
7473 	cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7474 	element.flags = cpu_to_le16(cmd_flags);
7475 	status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7476 	*aq_err = hw->aq.asq_last_status;
7477 
7478 	return status;
7479 }
7480 
7481 /**
7482  * i40e_reset_ch_rings - Reset the queue contexts in a channel
7483  * @vsi: the VSI we want to access
7484  * @ch: the channel we want to access
7485  */
7486 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7487 {
7488 	struct i40e_ring *tx_ring, *rx_ring;
7489 	u16 pf_q;
7490 	int i;
7491 
7492 	for (i = 0; i < ch->num_queue_pairs; i++) {
7493 		pf_q = ch->base_queue + i;
7494 		tx_ring = vsi->tx_rings[pf_q];
7495 		tx_ring->ch = NULL;
7496 		rx_ring = vsi->rx_rings[pf_q];
7497 		rx_ring->ch = NULL;
7498 	}
7499 }
7500 
7501 /**
7502  * i40e_free_macvlan_channels
7503  * @vsi: the VSI we want to access
7504  *
7505  * This function frees the Qs of the channel VSI from
7506  * the stack and also deletes the channel VSIs which
7507  * serve as macvlans.
7508  */
7509 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7510 {
7511 	struct i40e_channel *ch, *ch_tmp;
7512 	int ret;
7513 
7514 	if (list_empty(&vsi->macvlan_list))
7515 		return;
7516 
7517 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7518 		struct i40e_vsi *parent_vsi;
7519 
7520 		if (i40e_is_channel_macvlan(ch)) {
7521 			i40e_reset_ch_rings(vsi, ch);
7522 			clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7523 			netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7524 			netdev_set_sb_channel(ch->fwd->netdev, 0);
7525 			kfree(ch->fwd);
7526 			ch->fwd = NULL;
7527 		}
7528 
7529 		list_del(&ch->list);
7530 		parent_vsi = ch->parent_vsi;
7531 		if (!parent_vsi || !ch->initialized) {
7532 			kfree(ch);
7533 			continue;
7534 		}
7535 
7536 		/* remove the VSI */
7537 		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7538 					     NULL);
7539 		if (ret)
7540 			dev_err(&vsi->back->pdev->dev,
7541 				"unable to remove channel (%d) for parent VSI(%d)\n",
7542 				ch->seid, parent_vsi->seid);
7543 		kfree(ch);
7544 	}
7545 	vsi->macvlan_cnt = 0;
7546 }
7547 
7548 /**
7549  * i40e_fwd_ring_up - bring the macvlan device up
7550  * @vsi: the VSI we want to access
7551  * @vdev: macvlan netdevice
7552  * @fwd: the private fwd structure
7553  */
7554 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7555 			    struct i40e_fwd_adapter *fwd)
7556 {
7557 	struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7558 	int ret = 0, num_tc = 1,  i, aq_err;
7559 	struct i40e_pf *pf = vsi->back;
7560 	struct i40e_hw *hw = &pf->hw;
7561 
7562 	/* Go through the list and find an available channel */
7563 	list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7564 		if (!i40e_is_channel_macvlan(iter)) {
7565 			iter->fwd = fwd;
7566 			/* record configuration for macvlan interface in vdev */
7567 			for (i = 0; i < num_tc; i++)
7568 				netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7569 							     i,
7570 							     iter->num_queue_pairs,
7571 							     iter->base_queue);
7572 			for (i = 0; i < iter->num_queue_pairs; i++) {
7573 				struct i40e_ring *tx_ring, *rx_ring;
7574 				u16 pf_q;
7575 
7576 				pf_q = iter->base_queue + i;
7577 
7578 				/* Get to TX ring ptr */
7579 				tx_ring = vsi->tx_rings[pf_q];
7580 				tx_ring->ch = iter;
7581 
7582 				/* Get the RX ring ptr */
7583 				rx_ring = vsi->rx_rings[pf_q];
7584 				rx_ring->ch = iter;
7585 			}
7586 			ch = iter;
7587 			break;
7588 		}
7589 	}
7590 
7591 	if (!ch)
7592 		return -EINVAL;
7593 
7594 	/* Guarantee all rings are updated before we update the
7595 	 * MAC address filter.
7596 	 */
7597 	wmb();
7598 
7599 	/* Add a mac filter */
7600 	ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7601 	if (ret) {
7602 		/* if we cannot add the MAC rule then disable the offload */
7603 		macvlan_release_l2fw_offload(vdev);
7604 		for (i = 0; i < ch->num_queue_pairs; i++) {
7605 			struct i40e_ring *rx_ring;
7606 			u16 pf_q;
7607 
7608 			pf_q = ch->base_queue + i;
7609 			rx_ring = vsi->rx_rings[pf_q];
7610 			rx_ring->netdev = NULL;
7611 		}
7612 		dev_info(&pf->pdev->dev,
7613 			 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7614 			  i40e_stat_str(hw, ret),
7615 			  i40e_aq_str(hw, aq_err));
7616 		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7617 	}
7618 
7619 	return ret;
7620 }
7621 
7622 /**
7623  * i40e_setup_macvlans - create the channels which will be macvlans
7624  * @vsi: the VSI we want to access
7625  * @macvlan_cnt: no. of macvlans to be setup
7626  * @qcnt: no. of Qs per macvlan
7627  * @vdev: macvlan netdevice
7628  */
7629 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7630 			       struct net_device *vdev)
7631 {
7632 	struct i40e_pf *pf = vsi->back;
7633 	struct i40e_hw *hw = &pf->hw;
7634 	struct i40e_vsi_context ctxt;
7635 	u16 sections, qmap, num_qps;
7636 	struct i40e_channel *ch;
7637 	int i, pow, ret = 0;
7638 	u8 offset = 0;
7639 
7640 	if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7641 		return -EINVAL;
7642 
7643 	num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7644 
7645 	/* find the next higher power-of-2 of num queue pairs */
7646 	pow = fls(roundup_pow_of_two(num_qps) - 1);
7647 
7648 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7649 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7650 
7651 	/* Setup context bits for the main VSI */
7652 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7653 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7654 	memset(&ctxt, 0, sizeof(ctxt));
7655 	ctxt.seid = vsi->seid;
7656 	ctxt.pf_num = vsi->back->hw.pf_id;
7657 	ctxt.vf_num = 0;
7658 	ctxt.uplink_seid = vsi->uplink_seid;
7659 	ctxt.info = vsi->info;
7660 	ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7661 	ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7662 	ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7663 	ctxt.info.valid_sections |= cpu_to_le16(sections);
7664 
7665 	/* Reconfigure RSS for main VSI with new max queue count */
7666 	vsi->rss_size = max_t(u16, num_qps, qcnt);
7667 	ret = i40e_vsi_config_rss(vsi);
7668 	if (ret) {
7669 		dev_info(&pf->pdev->dev,
7670 			 "Failed to reconfig RSS for num_queues (%u)\n",
7671 			 vsi->rss_size);
7672 		return ret;
7673 	}
7674 	vsi->reconfig_rss = true;
7675 	dev_dbg(&vsi->back->pdev->dev,
7676 		"Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7677 	vsi->next_base_queue = num_qps;
7678 	vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7679 
7680 	/* Update the VSI after updating the VSI queue-mapping
7681 	 * information
7682 	 */
7683 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7684 	if (ret) {
7685 		dev_info(&pf->pdev->dev,
7686 			 "Update vsi tc config failed, err %s aq_err %s\n",
7687 			 i40e_stat_str(hw, ret),
7688 			 i40e_aq_str(hw, hw->aq.asq_last_status));
7689 		return ret;
7690 	}
7691 	/* update the local VSI info with updated queue map */
7692 	i40e_vsi_update_queue_map(vsi, &ctxt);
7693 	vsi->info.valid_sections = 0;
7694 
7695 	/* Create channels for macvlans */
7696 	INIT_LIST_HEAD(&vsi->macvlan_list);
7697 	for (i = 0; i < macvlan_cnt; i++) {
7698 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7699 		if (!ch) {
7700 			ret = -ENOMEM;
7701 			goto err_free;
7702 		}
7703 		INIT_LIST_HEAD(&ch->list);
7704 		ch->num_queue_pairs = qcnt;
7705 		if (!i40e_setup_channel(pf, vsi, ch)) {
7706 			ret = -EINVAL;
7707 			kfree(ch);
7708 			goto err_free;
7709 		}
7710 		ch->parent_vsi = vsi;
7711 		vsi->cnt_q_avail -= ch->num_queue_pairs;
7712 		vsi->macvlan_cnt++;
7713 		list_add_tail(&ch->list, &vsi->macvlan_list);
7714 	}
7715 
7716 	return ret;
7717 
7718 err_free:
7719 	dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7720 	i40e_free_macvlan_channels(vsi);
7721 
7722 	return ret;
7723 }
7724 
7725 /**
7726  * i40e_fwd_add - configure macvlans
7727  * @netdev: net device to configure
7728  * @vdev: macvlan netdevice
7729  **/
7730 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7731 {
7732 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7733 	u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7734 	struct i40e_vsi *vsi = np->vsi;
7735 	struct i40e_pf *pf = vsi->back;
7736 	struct i40e_fwd_adapter *fwd;
7737 	int avail_macvlan, ret;
7738 
7739 	if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7740 		netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7741 		return ERR_PTR(-EINVAL);
7742 	}
7743 	if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7744 		netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7745 		return ERR_PTR(-EINVAL);
7746 	}
7747 	if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7748 		netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7749 		return ERR_PTR(-EINVAL);
7750 	}
7751 
7752 	/* The macvlan device has to be a single Q device so that the
7753 	 * tc_to_txq field can be reused to pick the tx queue.
7754 	 */
7755 	if (netif_is_multiqueue(vdev))
7756 		return ERR_PTR(-ERANGE);
7757 
7758 	if (!vsi->macvlan_cnt) {
7759 		/* reserve bit 0 for the pf device */
7760 		set_bit(0, vsi->fwd_bitmask);
7761 
7762 		/* Try to reserve as many queues as possible for macvlans. First
7763 		 * reserve 3/4th of max vectors, then half, then quarter and
7764 		 * calculate Qs per macvlan as you go
7765 		 */
7766 		vectors = pf->num_lan_msix;
7767 		if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7768 			/* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7769 			q_per_macvlan = 4;
7770 			macvlan_cnt = (vectors - 32) / 4;
7771 		} else if (vectors <= 64 && vectors > 32) {
7772 			/* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7773 			q_per_macvlan = 2;
7774 			macvlan_cnt = (vectors - 16) / 2;
7775 		} else if (vectors <= 32 && vectors > 16) {
7776 			/* allocate 1 Q per macvlan and 16 Qs to the PF*/
7777 			q_per_macvlan = 1;
7778 			macvlan_cnt = vectors - 16;
7779 		} else if (vectors <= 16 && vectors > 8) {
7780 			/* allocate 1 Q per macvlan and 8 Qs to the PF */
7781 			q_per_macvlan = 1;
7782 			macvlan_cnt = vectors - 8;
7783 		} else {
7784 			/* allocate 1 Q per macvlan and 1 Q to the PF */
7785 			q_per_macvlan = 1;
7786 			macvlan_cnt = vectors - 1;
7787 		}
7788 
7789 		if (macvlan_cnt == 0)
7790 			return ERR_PTR(-EBUSY);
7791 
7792 		/* Quiesce VSI queues */
7793 		i40e_quiesce_vsi(vsi);
7794 
7795 		/* sets up the macvlans but does not "enable" them */
7796 		ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7797 					  vdev);
7798 		if (ret)
7799 			return ERR_PTR(ret);
7800 
7801 		/* Unquiesce VSI */
7802 		i40e_unquiesce_vsi(vsi);
7803 	}
7804 	avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7805 					    vsi->macvlan_cnt);
7806 	if (avail_macvlan >= I40E_MAX_MACVLANS)
7807 		return ERR_PTR(-EBUSY);
7808 
7809 	/* create the fwd struct */
7810 	fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7811 	if (!fwd)
7812 		return ERR_PTR(-ENOMEM);
7813 
7814 	set_bit(avail_macvlan, vsi->fwd_bitmask);
7815 	fwd->bit_no = avail_macvlan;
7816 	netdev_set_sb_channel(vdev, avail_macvlan);
7817 	fwd->netdev = vdev;
7818 
7819 	if (!netif_running(netdev))
7820 		return fwd;
7821 
7822 	/* Set fwd ring up */
7823 	ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7824 	if (ret) {
7825 		/* unbind the queues and drop the subordinate channel config */
7826 		netdev_unbind_sb_channel(netdev, vdev);
7827 		netdev_set_sb_channel(vdev, 0);
7828 
7829 		kfree(fwd);
7830 		return ERR_PTR(-EINVAL);
7831 	}
7832 
7833 	return fwd;
7834 }
7835 
7836 /**
7837  * i40e_del_all_macvlans - Delete all the mac filters on the channels
7838  * @vsi: the VSI we want to access
7839  */
7840 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7841 {
7842 	struct i40e_channel *ch, *ch_tmp;
7843 	struct i40e_pf *pf = vsi->back;
7844 	struct i40e_hw *hw = &pf->hw;
7845 	int aq_err, ret = 0;
7846 
7847 	if (list_empty(&vsi->macvlan_list))
7848 		return;
7849 
7850 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7851 		if (i40e_is_channel_macvlan(ch)) {
7852 			ret = i40e_del_macvlan_filter(hw, ch->seid,
7853 						      i40e_channel_mac(ch),
7854 						      &aq_err);
7855 			if (!ret) {
7856 				/* Reset queue contexts */
7857 				i40e_reset_ch_rings(vsi, ch);
7858 				clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7859 				netdev_unbind_sb_channel(vsi->netdev,
7860 							 ch->fwd->netdev);
7861 				netdev_set_sb_channel(ch->fwd->netdev, 0);
7862 				kfree(ch->fwd);
7863 				ch->fwd = NULL;
7864 			}
7865 		}
7866 	}
7867 }
7868 
7869 /**
7870  * i40e_fwd_del - delete macvlan interfaces
7871  * @netdev: net device to configure
7872  * @vdev: macvlan netdevice
7873  */
7874 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7875 {
7876 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7877 	struct i40e_fwd_adapter *fwd = vdev;
7878 	struct i40e_channel *ch, *ch_tmp;
7879 	struct i40e_vsi *vsi = np->vsi;
7880 	struct i40e_pf *pf = vsi->back;
7881 	struct i40e_hw *hw = &pf->hw;
7882 	int aq_err, ret = 0;
7883 
7884 	/* Find the channel associated with the macvlan and del mac filter */
7885 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7886 		if (i40e_is_channel_macvlan(ch) &&
7887 		    ether_addr_equal(i40e_channel_mac(ch),
7888 				     fwd->netdev->dev_addr)) {
7889 			ret = i40e_del_macvlan_filter(hw, ch->seid,
7890 						      i40e_channel_mac(ch),
7891 						      &aq_err);
7892 			if (!ret) {
7893 				/* Reset queue contexts */
7894 				i40e_reset_ch_rings(vsi, ch);
7895 				clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7896 				netdev_unbind_sb_channel(netdev, fwd->netdev);
7897 				netdev_set_sb_channel(fwd->netdev, 0);
7898 				kfree(ch->fwd);
7899 				ch->fwd = NULL;
7900 			} else {
7901 				dev_info(&pf->pdev->dev,
7902 					 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7903 					  i40e_stat_str(hw, ret),
7904 					  i40e_aq_str(hw, aq_err));
7905 			}
7906 			break;
7907 		}
7908 	}
7909 }
7910 
7911 /**
7912  * i40e_setup_tc - configure multiple traffic classes
7913  * @netdev: net device to configure
7914  * @type_data: tc offload data
7915  **/
7916 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7917 {
7918 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7919 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7920 	struct i40e_vsi *vsi = np->vsi;
7921 	struct i40e_pf *pf = vsi->back;
7922 	u8 enabled_tc = 0, num_tc, hw;
7923 	bool need_reset = false;
7924 	int old_queue_pairs;
7925 	int ret = -EINVAL;
7926 	u16 mode;
7927 	int i;
7928 
7929 	old_queue_pairs = vsi->num_queue_pairs;
7930 	num_tc = mqprio_qopt->qopt.num_tc;
7931 	hw = mqprio_qopt->qopt.hw;
7932 	mode = mqprio_qopt->mode;
7933 	if (!hw) {
7934 		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7935 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7936 		goto config_tc;
7937 	}
7938 
7939 	/* Check if MFP enabled */
7940 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7941 		netdev_info(netdev,
7942 			    "Configuring TC not supported in MFP mode\n");
7943 		return ret;
7944 	}
7945 	switch (mode) {
7946 	case TC_MQPRIO_MODE_DCB:
7947 		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7948 
7949 		/* Check if DCB enabled to continue */
7950 		if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7951 			netdev_info(netdev,
7952 				    "DCB is not enabled for adapter\n");
7953 			return ret;
7954 		}
7955 
7956 		/* Check whether tc count is within enabled limit */
7957 		if (num_tc > i40e_pf_get_num_tc(pf)) {
7958 			netdev_info(netdev,
7959 				    "TC count greater than enabled on link for adapter\n");
7960 			return ret;
7961 		}
7962 		break;
7963 	case TC_MQPRIO_MODE_CHANNEL:
7964 		if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7965 			netdev_info(netdev,
7966 				    "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7967 			return ret;
7968 		}
7969 		if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7970 			return ret;
7971 		ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7972 		if (ret)
7973 			return ret;
7974 		memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7975 		       sizeof(*mqprio_qopt));
7976 		pf->flags |= I40E_FLAG_TC_MQPRIO;
7977 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7978 		break;
7979 	default:
7980 		return -EINVAL;
7981 	}
7982 
7983 config_tc:
7984 	/* Generate TC map for number of tc requested */
7985 	for (i = 0; i < num_tc; i++)
7986 		enabled_tc |= BIT(i);
7987 
7988 	/* Requesting same TC configuration as already enabled */
7989 	if (enabled_tc == vsi->tc_config.enabled_tc &&
7990 	    mode != TC_MQPRIO_MODE_CHANNEL)
7991 		return 0;
7992 
7993 	/* Quiesce VSI queues */
7994 	i40e_quiesce_vsi(vsi);
7995 
7996 	if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7997 		i40e_remove_queue_channels(vsi);
7998 
7999 	/* Configure VSI for enabled TCs */
8000 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
8001 	if (ret) {
8002 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8003 			    vsi->seid);
8004 		need_reset = true;
8005 		goto exit;
8006 	} else if (enabled_tc &&
8007 		   (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8008 		netdev_info(netdev,
8009 			    "Failed to create channel. Override queues (%u) not power of 2\n",
8010 			    vsi->tc_config.tc_info[0].qcount);
8011 		ret = -EINVAL;
8012 		need_reset = true;
8013 		goto exit;
8014 	}
8015 
8016 	dev_info(&vsi->back->pdev->dev,
8017 		 "Setup channel (id:%u) utilizing num_queues %d\n",
8018 		 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8019 
8020 	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
8021 		if (vsi->mqprio_qopt.max_rate[0]) {
8022 			u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8023 
8024 			do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
8025 			ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8026 			if (!ret) {
8027 				u64 credits = max_tx_rate;
8028 
8029 				do_div(credits, I40E_BW_CREDIT_DIVISOR);
8030 				dev_dbg(&vsi->back->pdev->dev,
8031 					"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8032 					max_tx_rate,
8033 					credits,
8034 					vsi->seid);
8035 			} else {
8036 				need_reset = true;
8037 				goto exit;
8038 			}
8039 		}
8040 		ret = i40e_configure_queue_channels(vsi);
8041 		if (ret) {
8042 			vsi->num_queue_pairs = old_queue_pairs;
8043 			netdev_info(netdev,
8044 				    "Failed configuring queue channels\n");
8045 			need_reset = true;
8046 			goto exit;
8047 		}
8048 	}
8049 
8050 exit:
8051 	/* Reset the configuration data to defaults, only TC0 is enabled */
8052 	if (need_reset) {
8053 		i40e_vsi_set_default_tc_config(vsi);
8054 		need_reset = false;
8055 	}
8056 
8057 	/* Unquiesce VSI */
8058 	i40e_unquiesce_vsi(vsi);
8059 	return ret;
8060 }
8061 
8062 /**
8063  * i40e_set_cld_element - sets cloud filter element data
8064  * @filter: cloud filter rule
8065  * @cld: ptr to cloud filter element data
8066  *
8067  * This is helper function to copy data into cloud filter element
8068  **/
8069 static inline void
8070 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8071 		     struct i40e_aqc_cloud_filters_element_data *cld)
8072 {
8073 	u32 ipa;
8074 	int i;
8075 
8076 	memset(cld, 0, sizeof(*cld));
8077 	ether_addr_copy(cld->outer_mac, filter->dst_mac);
8078 	ether_addr_copy(cld->inner_mac, filter->src_mac);
8079 
8080 	if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8081 		return;
8082 
8083 	if (filter->n_proto == ETH_P_IPV6) {
8084 #define IPV6_MAX_INDEX	(ARRAY_SIZE(filter->dst_ipv6) - 1)
8085 		for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8086 			ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8087 
8088 			*(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8089 		}
8090 	} else {
8091 		ipa = be32_to_cpu(filter->dst_ipv4);
8092 
8093 		memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8094 	}
8095 
8096 	cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8097 
8098 	/* tenant_id is not supported by FW now, once the support is enabled
8099 	 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8100 	 */
8101 	if (filter->tenant_id)
8102 		return;
8103 }
8104 
8105 /**
8106  * i40e_add_del_cloud_filter - Add/del cloud filter
8107  * @vsi: pointer to VSI
8108  * @filter: cloud filter rule
8109  * @add: if true, add, if false, delete
8110  *
8111  * Add or delete a cloud filter for a specific flow spec.
8112  * Returns 0 if the filter were successfully added.
8113  **/
8114 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8115 			      struct i40e_cloud_filter *filter, bool add)
8116 {
8117 	struct i40e_aqc_cloud_filters_element_data cld_filter;
8118 	struct i40e_pf *pf = vsi->back;
8119 	int ret;
8120 	static const u16 flag_table[128] = {
8121 		[I40E_CLOUD_FILTER_FLAGS_OMAC]  =
8122 			I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8123 		[I40E_CLOUD_FILTER_FLAGS_IMAC]  =
8124 			I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8125 		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
8126 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8127 		[I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8128 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8129 		[I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8130 			I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8131 		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8132 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8133 		[I40E_CLOUD_FILTER_FLAGS_IIP] =
8134 			I40E_AQC_ADD_CLOUD_FILTER_IIP,
8135 	};
8136 
8137 	if (filter->flags >= ARRAY_SIZE(flag_table))
8138 		return I40E_ERR_CONFIG;
8139 
8140 	memset(&cld_filter, 0, sizeof(cld_filter));
8141 
8142 	/* copy element needed to add cloud filter from filter */
8143 	i40e_set_cld_element(filter, &cld_filter);
8144 
8145 	if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8146 		cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8147 					     I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8148 
8149 	if (filter->n_proto == ETH_P_IPV6)
8150 		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8151 						I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8152 	else
8153 		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8154 						I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8155 
8156 	if (add)
8157 		ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8158 						&cld_filter, 1);
8159 	else
8160 		ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8161 						&cld_filter, 1);
8162 	if (ret)
8163 		dev_dbg(&pf->pdev->dev,
8164 			"Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8165 			add ? "add" : "delete", filter->dst_port, ret,
8166 			pf->hw.aq.asq_last_status);
8167 	else
8168 		dev_info(&pf->pdev->dev,
8169 			 "%s cloud filter for VSI: %d\n",
8170 			 add ? "Added" : "Deleted", filter->seid);
8171 	return ret;
8172 }
8173 
8174 /**
8175  * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8176  * @vsi: pointer to VSI
8177  * @filter: cloud filter rule
8178  * @add: if true, add, if false, delete
8179  *
8180  * Add or delete a cloud filter for a specific flow spec using big buffer.
8181  * Returns 0 if the filter were successfully added.
8182  **/
8183 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8184 				      struct i40e_cloud_filter *filter,
8185 				      bool add)
8186 {
8187 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
8188 	struct i40e_pf *pf = vsi->back;
8189 	int ret;
8190 
8191 	/* Both (src/dst) valid mac_addr are not supported */
8192 	if ((is_valid_ether_addr(filter->dst_mac) &&
8193 	     is_valid_ether_addr(filter->src_mac)) ||
8194 	    (is_multicast_ether_addr(filter->dst_mac) &&
8195 	     is_multicast_ether_addr(filter->src_mac)))
8196 		return -EOPNOTSUPP;
8197 
8198 	/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8199 	 * ports are not supported via big buffer now.
8200 	 */
8201 	if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8202 		return -EOPNOTSUPP;
8203 
8204 	/* adding filter using src_port/src_ip is not supported at this stage */
8205 	if (filter->src_port ||
8206 	    (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8207 	    !ipv6_addr_any(&filter->ip.v6.src_ip6))
8208 		return -EOPNOTSUPP;
8209 
8210 	memset(&cld_filter, 0, sizeof(cld_filter));
8211 
8212 	/* copy element needed to add cloud filter from filter */
8213 	i40e_set_cld_element(filter, &cld_filter.element);
8214 
8215 	if (is_valid_ether_addr(filter->dst_mac) ||
8216 	    is_valid_ether_addr(filter->src_mac) ||
8217 	    is_multicast_ether_addr(filter->dst_mac) ||
8218 	    is_multicast_ether_addr(filter->src_mac)) {
8219 		/* MAC + IP : unsupported mode */
8220 		if (filter->dst_ipv4)
8221 			return -EOPNOTSUPP;
8222 
8223 		/* since we validated that L4 port must be valid before
8224 		 * we get here, start with respective "flags" value
8225 		 * and update if vlan is present or not
8226 		 */
8227 		cld_filter.element.flags =
8228 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8229 
8230 		if (filter->vlan_id) {
8231 			cld_filter.element.flags =
8232 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8233 		}
8234 
8235 	} else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8236 		   !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8237 		cld_filter.element.flags =
8238 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8239 		if (filter->n_proto == ETH_P_IPV6)
8240 			cld_filter.element.flags |=
8241 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8242 		else
8243 			cld_filter.element.flags |=
8244 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8245 	} else {
8246 		dev_err(&pf->pdev->dev,
8247 			"either mac or ip has to be valid for cloud filter\n");
8248 		return -EINVAL;
8249 	}
8250 
8251 	/* Now copy L4 port in Byte 6..7 in general fields */
8252 	cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8253 						be16_to_cpu(filter->dst_port);
8254 
8255 	if (add) {
8256 		/* Validate current device switch mode, change if necessary */
8257 		ret = i40e_validate_and_set_switch_mode(vsi);
8258 		if (ret) {
8259 			dev_err(&pf->pdev->dev,
8260 				"failed to set switch mode, ret %d\n",
8261 				ret);
8262 			return ret;
8263 		}
8264 
8265 		ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8266 						   &cld_filter, 1);
8267 	} else {
8268 		ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8269 						   &cld_filter, 1);
8270 	}
8271 
8272 	if (ret)
8273 		dev_dbg(&pf->pdev->dev,
8274 			"Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8275 			add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8276 	else
8277 		dev_info(&pf->pdev->dev,
8278 			 "%s cloud filter for VSI: %d, L4 port: %d\n",
8279 			 add ? "add" : "delete", filter->seid,
8280 			 ntohs(filter->dst_port));
8281 	return ret;
8282 }
8283 
8284 /**
8285  * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8286  * @vsi: Pointer to VSI
8287  * @f: Pointer to struct flow_cls_offload
8288  * @filter: Pointer to cloud filter structure
8289  *
8290  **/
8291 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8292 				 struct flow_cls_offload *f,
8293 				 struct i40e_cloud_filter *filter)
8294 {
8295 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8296 	struct flow_dissector *dissector = rule->match.dissector;
8297 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8298 	struct i40e_pf *pf = vsi->back;
8299 	u8 field_flags = 0;
8300 
8301 	if (dissector->used_keys &
8302 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8303 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
8304 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8305 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
8306 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8307 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8308 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
8309 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8310 		dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8311 			dissector->used_keys);
8312 		return -EOPNOTSUPP;
8313 	}
8314 
8315 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8316 		struct flow_match_enc_keyid match;
8317 
8318 		flow_rule_match_enc_keyid(rule, &match);
8319 		if (match.mask->keyid != 0)
8320 			field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8321 
8322 		filter->tenant_id = be32_to_cpu(match.key->keyid);
8323 	}
8324 
8325 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8326 		struct flow_match_basic match;
8327 
8328 		flow_rule_match_basic(rule, &match);
8329 		n_proto_key = ntohs(match.key->n_proto);
8330 		n_proto_mask = ntohs(match.mask->n_proto);
8331 
8332 		if (n_proto_key == ETH_P_ALL) {
8333 			n_proto_key = 0;
8334 			n_proto_mask = 0;
8335 		}
8336 		filter->n_proto = n_proto_key & n_proto_mask;
8337 		filter->ip_proto = match.key->ip_proto;
8338 	}
8339 
8340 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8341 		struct flow_match_eth_addrs match;
8342 
8343 		flow_rule_match_eth_addrs(rule, &match);
8344 
8345 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
8346 		if (!is_zero_ether_addr(match.mask->dst)) {
8347 			if (is_broadcast_ether_addr(match.mask->dst)) {
8348 				field_flags |= I40E_CLOUD_FIELD_OMAC;
8349 			} else {
8350 				dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8351 					match.mask->dst);
8352 				return I40E_ERR_CONFIG;
8353 			}
8354 		}
8355 
8356 		if (!is_zero_ether_addr(match.mask->src)) {
8357 			if (is_broadcast_ether_addr(match.mask->src)) {
8358 				field_flags |= I40E_CLOUD_FIELD_IMAC;
8359 			} else {
8360 				dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8361 					match.mask->src);
8362 				return I40E_ERR_CONFIG;
8363 			}
8364 		}
8365 		ether_addr_copy(filter->dst_mac, match.key->dst);
8366 		ether_addr_copy(filter->src_mac, match.key->src);
8367 	}
8368 
8369 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8370 		struct flow_match_vlan match;
8371 
8372 		flow_rule_match_vlan(rule, &match);
8373 		if (match.mask->vlan_id) {
8374 			if (match.mask->vlan_id == VLAN_VID_MASK) {
8375 				field_flags |= I40E_CLOUD_FIELD_IVLAN;
8376 
8377 			} else {
8378 				dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8379 					match.mask->vlan_id);
8380 				return I40E_ERR_CONFIG;
8381 			}
8382 		}
8383 
8384 		filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8385 	}
8386 
8387 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8388 		struct flow_match_control match;
8389 
8390 		flow_rule_match_control(rule, &match);
8391 		addr_type = match.key->addr_type;
8392 	}
8393 
8394 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8395 		struct flow_match_ipv4_addrs match;
8396 
8397 		flow_rule_match_ipv4_addrs(rule, &match);
8398 		if (match.mask->dst) {
8399 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8400 				field_flags |= I40E_CLOUD_FIELD_IIP;
8401 			} else {
8402 				dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8403 					&match.mask->dst);
8404 				return I40E_ERR_CONFIG;
8405 			}
8406 		}
8407 
8408 		if (match.mask->src) {
8409 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
8410 				field_flags |= I40E_CLOUD_FIELD_IIP;
8411 			} else {
8412 				dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8413 					&match.mask->src);
8414 				return I40E_ERR_CONFIG;
8415 			}
8416 		}
8417 
8418 		if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8419 			dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8420 			return I40E_ERR_CONFIG;
8421 		}
8422 		filter->dst_ipv4 = match.key->dst;
8423 		filter->src_ipv4 = match.key->src;
8424 	}
8425 
8426 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8427 		struct flow_match_ipv6_addrs match;
8428 
8429 		flow_rule_match_ipv6_addrs(rule, &match);
8430 
8431 		/* src and dest IPV6 address should not be LOOPBACK
8432 		 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8433 		 */
8434 		if (ipv6_addr_loopback(&match.key->dst) ||
8435 		    ipv6_addr_loopback(&match.key->src)) {
8436 			dev_err(&pf->pdev->dev,
8437 				"Bad ipv6, addr is LOOPBACK\n");
8438 			return I40E_ERR_CONFIG;
8439 		}
8440 		if (!ipv6_addr_any(&match.mask->dst) ||
8441 		    !ipv6_addr_any(&match.mask->src))
8442 			field_flags |= I40E_CLOUD_FIELD_IIP;
8443 
8444 		memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8445 		       sizeof(filter->src_ipv6));
8446 		memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8447 		       sizeof(filter->dst_ipv6));
8448 	}
8449 
8450 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8451 		struct flow_match_ports match;
8452 
8453 		flow_rule_match_ports(rule, &match);
8454 		if (match.mask->src) {
8455 			if (match.mask->src == cpu_to_be16(0xffff)) {
8456 				field_flags |= I40E_CLOUD_FIELD_IIP;
8457 			} else {
8458 				dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8459 					be16_to_cpu(match.mask->src));
8460 				return I40E_ERR_CONFIG;
8461 			}
8462 		}
8463 
8464 		if (match.mask->dst) {
8465 			if (match.mask->dst == cpu_to_be16(0xffff)) {
8466 				field_flags |= I40E_CLOUD_FIELD_IIP;
8467 			} else {
8468 				dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8469 					be16_to_cpu(match.mask->dst));
8470 				return I40E_ERR_CONFIG;
8471 			}
8472 		}
8473 
8474 		filter->dst_port = match.key->dst;
8475 		filter->src_port = match.key->src;
8476 
8477 		switch (filter->ip_proto) {
8478 		case IPPROTO_TCP:
8479 		case IPPROTO_UDP:
8480 			break;
8481 		default:
8482 			dev_err(&pf->pdev->dev,
8483 				"Only UDP and TCP transport are supported\n");
8484 			return -EINVAL;
8485 		}
8486 	}
8487 	filter->flags = field_flags;
8488 	return 0;
8489 }
8490 
8491 /**
8492  * i40e_handle_tclass: Forward to a traffic class on the device
8493  * @vsi: Pointer to VSI
8494  * @tc: traffic class index on the device
8495  * @filter: Pointer to cloud filter structure
8496  *
8497  **/
8498 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8499 			      struct i40e_cloud_filter *filter)
8500 {
8501 	struct i40e_channel *ch, *ch_tmp;
8502 
8503 	/* direct to a traffic class on the same device */
8504 	if (tc == 0) {
8505 		filter->seid = vsi->seid;
8506 		return 0;
8507 	} else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8508 		if (!filter->dst_port) {
8509 			dev_err(&vsi->back->pdev->dev,
8510 				"Specify destination port to direct to traffic class that is not default\n");
8511 			return -EINVAL;
8512 		}
8513 		if (list_empty(&vsi->ch_list))
8514 			return -EINVAL;
8515 		list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8516 					 list) {
8517 			if (ch->seid == vsi->tc_seid_map[tc])
8518 				filter->seid = ch->seid;
8519 		}
8520 		return 0;
8521 	}
8522 	dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8523 	return -EINVAL;
8524 }
8525 
8526 /**
8527  * i40e_configure_clsflower - Configure tc flower filters
8528  * @vsi: Pointer to VSI
8529  * @cls_flower: Pointer to struct flow_cls_offload
8530  *
8531  **/
8532 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8533 				    struct flow_cls_offload *cls_flower)
8534 {
8535 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8536 	struct i40e_cloud_filter *filter = NULL;
8537 	struct i40e_pf *pf = vsi->back;
8538 	int err = 0;
8539 
8540 	if (tc < 0) {
8541 		dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8542 		return -EOPNOTSUPP;
8543 	}
8544 
8545 	if (!tc) {
8546 		dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8547 		return -EINVAL;
8548 	}
8549 
8550 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8551 	    test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8552 		return -EBUSY;
8553 
8554 	if (pf->fdir_pf_active_filters ||
8555 	    (!hlist_empty(&pf->fdir_filter_list))) {
8556 		dev_err(&vsi->back->pdev->dev,
8557 			"Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8558 		return -EINVAL;
8559 	}
8560 
8561 	if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8562 		dev_err(&vsi->back->pdev->dev,
8563 			"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8564 		vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8565 		vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8566 	}
8567 
8568 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8569 	if (!filter)
8570 		return -ENOMEM;
8571 
8572 	filter->cookie = cls_flower->cookie;
8573 
8574 	err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8575 	if (err < 0)
8576 		goto err;
8577 
8578 	err = i40e_handle_tclass(vsi, tc, filter);
8579 	if (err < 0)
8580 		goto err;
8581 
8582 	/* Add cloud filter */
8583 	if (filter->dst_port)
8584 		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8585 	else
8586 		err = i40e_add_del_cloud_filter(vsi, filter, true);
8587 
8588 	if (err) {
8589 		dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8590 			err);
8591 		goto err;
8592 	}
8593 
8594 	/* add filter to the ordered list */
8595 	INIT_HLIST_NODE(&filter->cloud_node);
8596 
8597 	hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8598 
8599 	pf->num_cloud_filters++;
8600 
8601 	return err;
8602 err:
8603 	kfree(filter);
8604 	return err;
8605 }
8606 
8607 /**
8608  * i40e_find_cloud_filter - Find the could filter in the list
8609  * @vsi: Pointer to VSI
8610  * @cookie: filter specific cookie
8611  *
8612  **/
8613 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8614 							unsigned long *cookie)
8615 {
8616 	struct i40e_cloud_filter *filter = NULL;
8617 	struct hlist_node *node2;
8618 
8619 	hlist_for_each_entry_safe(filter, node2,
8620 				  &vsi->back->cloud_filter_list, cloud_node)
8621 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8622 			return filter;
8623 	return NULL;
8624 }
8625 
8626 /**
8627  * i40e_delete_clsflower - Remove tc flower filters
8628  * @vsi: Pointer to VSI
8629  * @cls_flower: Pointer to struct flow_cls_offload
8630  *
8631  **/
8632 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8633 				 struct flow_cls_offload *cls_flower)
8634 {
8635 	struct i40e_cloud_filter *filter = NULL;
8636 	struct i40e_pf *pf = vsi->back;
8637 	int err = 0;
8638 
8639 	filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8640 
8641 	if (!filter)
8642 		return -EINVAL;
8643 
8644 	hash_del(&filter->cloud_node);
8645 
8646 	if (filter->dst_port)
8647 		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8648 	else
8649 		err = i40e_add_del_cloud_filter(vsi, filter, false);
8650 
8651 	kfree(filter);
8652 	if (err) {
8653 		dev_err(&pf->pdev->dev,
8654 			"Failed to delete cloud filter, err %s\n",
8655 			i40e_stat_str(&pf->hw, err));
8656 		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8657 	}
8658 
8659 	pf->num_cloud_filters--;
8660 	if (!pf->num_cloud_filters)
8661 		if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8662 		    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8663 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8664 			pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8665 			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8666 		}
8667 	return 0;
8668 }
8669 
8670 /**
8671  * i40e_setup_tc_cls_flower - flower classifier offloads
8672  * @np: net device to configure
8673  * @cls_flower: offload data
8674  **/
8675 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8676 				    struct flow_cls_offload *cls_flower)
8677 {
8678 	struct i40e_vsi *vsi = np->vsi;
8679 
8680 	switch (cls_flower->command) {
8681 	case FLOW_CLS_REPLACE:
8682 		return i40e_configure_clsflower(vsi, cls_flower);
8683 	case FLOW_CLS_DESTROY:
8684 		return i40e_delete_clsflower(vsi, cls_flower);
8685 	case FLOW_CLS_STATS:
8686 		return -EOPNOTSUPP;
8687 	default:
8688 		return -EOPNOTSUPP;
8689 	}
8690 }
8691 
8692 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8693 				  void *cb_priv)
8694 {
8695 	struct i40e_netdev_priv *np = cb_priv;
8696 
8697 	if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8698 		return -EOPNOTSUPP;
8699 
8700 	switch (type) {
8701 	case TC_SETUP_CLSFLOWER:
8702 		return i40e_setup_tc_cls_flower(np, type_data);
8703 
8704 	default:
8705 		return -EOPNOTSUPP;
8706 	}
8707 }
8708 
8709 static LIST_HEAD(i40e_block_cb_list);
8710 
8711 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8712 			   void *type_data)
8713 {
8714 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8715 
8716 	switch (type) {
8717 	case TC_SETUP_QDISC_MQPRIO:
8718 		return i40e_setup_tc(netdev, type_data);
8719 	case TC_SETUP_BLOCK:
8720 		return flow_block_cb_setup_simple(type_data,
8721 						  &i40e_block_cb_list,
8722 						  i40e_setup_tc_block_cb,
8723 						  np, np, true);
8724 	default:
8725 		return -EOPNOTSUPP;
8726 	}
8727 }
8728 
8729 /**
8730  * i40e_open - Called when a network interface is made active
8731  * @netdev: network interface device structure
8732  *
8733  * The open entry point is called when a network interface is made
8734  * active by the system (IFF_UP).  At this point all resources needed
8735  * for transmit and receive operations are allocated, the interrupt
8736  * handler is registered with the OS, the netdev watchdog subtask is
8737  * enabled, and the stack is notified that the interface is ready.
8738  *
8739  * Returns 0 on success, negative value on failure
8740  **/
8741 int i40e_open(struct net_device *netdev)
8742 {
8743 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8744 	struct i40e_vsi *vsi = np->vsi;
8745 	struct i40e_pf *pf = vsi->back;
8746 	int err;
8747 
8748 	/* disallow open during test or if eeprom is broken */
8749 	if (test_bit(__I40E_TESTING, pf->state) ||
8750 	    test_bit(__I40E_BAD_EEPROM, pf->state))
8751 		return -EBUSY;
8752 
8753 	netif_carrier_off(netdev);
8754 
8755 	if (i40e_force_link_state(pf, true))
8756 		return -EAGAIN;
8757 
8758 	err = i40e_vsi_open(vsi);
8759 	if (err)
8760 		return err;
8761 
8762 	/* configure global TSO hardware offload settings */
8763 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8764 						       TCP_FLAG_FIN) >> 16);
8765 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8766 						       TCP_FLAG_FIN |
8767 						       TCP_FLAG_CWR) >> 16);
8768 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8769 	udp_tunnel_get_rx_info(netdev);
8770 
8771 	return 0;
8772 }
8773 
8774 /**
8775  * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
8776  * @vsi: vsi structure
8777  *
8778  * This updates netdev's number of tx/rx queues
8779  *
8780  * Returns status of setting tx/rx queues
8781  **/
8782 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
8783 {
8784 	int ret;
8785 
8786 	ret = netif_set_real_num_rx_queues(vsi->netdev,
8787 					   vsi->num_queue_pairs);
8788 	if (ret)
8789 		return ret;
8790 
8791 	return netif_set_real_num_tx_queues(vsi->netdev,
8792 					    vsi->num_queue_pairs);
8793 }
8794 
8795 /**
8796  * i40e_vsi_open -
8797  * @vsi: the VSI to open
8798  *
8799  * Finish initialization of the VSI.
8800  *
8801  * Returns 0 on success, negative value on failure
8802  *
8803  * Note: expects to be called while under rtnl_lock()
8804  **/
8805 int i40e_vsi_open(struct i40e_vsi *vsi)
8806 {
8807 	struct i40e_pf *pf = vsi->back;
8808 	char int_name[I40E_INT_NAME_STR_LEN];
8809 	int err;
8810 
8811 	/* allocate descriptors */
8812 	err = i40e_vsi_setup_tx_resources(vsi);
8813 	if (err)
8814 		goto err_setup_tx;
8815 	err = i40e_vsi_setup_rx_resources(vsi);
8816 	if (err)
8817 		goto err_setup_rx;
8818 
8819 	err = i40e_vsi_configure(vsi);
8820 	if (err)
8821 		goto err_setup_rx;
8822 
8823 	if (vsi->netdev) {
8824 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8825 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8826 		err = i40e_vsi_request_irq(vsi, int_name);
8827 		if (err)
8828 			goto err_setup_rx;
8829 
8830 		/* Notify the stack of the actual queue counts. */
8831 		err = i40e_netif_set_realnum_tx_rx_queues(vsi);
8832 		if (err)
8833 			goto err_set_queues;
8834 
8835 	} else if (vsi->type == I40E_VSI_FDIR) {
8836 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8837 			 dev_driver_string(&pf->pdev->dev),
8838 			 dev_name(&pf->pdev->dev));
8839 		err = i40e_vsi_request_irq(vsi, int_name);
8840 		if (err)
8841 			goto err_setup_rx;
8842 
8843 	} else {
8844 		err = -EINVAL;
8845 		goto err_setup_rx;
8846 	}
8847 
8848 	err = i40e_up_complete(vsi);
8849 	if (err)
8850 		goto err_up_complete;
8851 
8852 	return 0;
8853 
8854 err_up_complete:
8855 	i40e_down(vsi);
8856 err_set_queues:
8857 	i40e_vsi_free_irq(vsi);
8858 err_setup_rx:
8859 	i40e_vsi_free_rx_resources(vsi);
8860 err_setup_tx:
8861 	i40e_vsi_free_tx_resources(vsi);
8862 	if (vsi == pf->vsi[pf->lan_vsi])
8863 		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8864 
8865 	return err;
8866 }
8867 
8868 /**
8869  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8870  * @pf: Pointer to PF
8871  *
8872  * This function destroys the hlist where all the Flow Director
8873  * filters were saved.
8874  **/
8875 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8876 {
8877 	struct i40e_fdir_filter *filter;
8878 	struct i40e_flex_pit *pit_entry, *tmp;
8879 	struct hlist_node *node2;
8880 
8881 	hlist_for_each_entry_safe(filter, node2,
8882 				  &pf->fdir_filter_list, fdir_node) {
8883 		hlist_del(&filter->fdir_node);
8884 		kfree(filter);
8885 	}
8886 
8887 	list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8888 		list_del(&pit_entry->list);
8889 		kfree(pit_entry);
8890 	}
8891 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8892 
8893 	list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8894 		list_del(&pit_entry->list);
8895 		kfree(pit_entry);
8896 	}
8897 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8898 
8899 	pf->fdir_pf_active_filters = 0;
8900 	i40e_reset_fdir_filter_cnt(pf);
8901 
8902 	/* Reprogram the default input set for TCP/IPv4 */
8903 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8904 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8905 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8906 
8907 	/* Reprogram the default input set for TCP/IPv6 */
8908 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8909 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8910 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8911 
8912 	/* Reprogram the default input set for UDP/IPv4 */
8913 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8914 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8915 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8916 
8917 	/* Reprogram the default input set for UDP/IPv6 */
8918 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8919 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8920 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8921 
8922 	/* Reprogram the default input set for SCTP/IPv4 */
8923 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8924 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8925 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8926 
8927 	/* Reprogram the default input set for SCTP/IPv6 */
8928 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8929 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8930 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8931 
8932 	/* Reprogram the default input set for Other/IPv4 */
8933 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8934 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8935 
8936 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8937 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8938 
8939 	/* Reprogram the default input set for Other/IPv6 */
8940 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8941 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8942 
8943 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
8944 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8945 }
8946 
8947 /**
8948  * i40e_cloud_filter_exit - Cleans up the cloud filters
8949  * @pf: Pointer to PF
8950  *
8951  * This function destroys the hlist where all the cloud filters
8952  * were saved.
8953  **/
8954 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8955 {
8956 	struct i40e_cloud_filter *cfilter;
8957 	struct hlist_node *node;
8958 
8959 	hlist_for_each_entry_safe(cfilter, node,
8960 				  &pf->cloud_filter_list, cloud_node) {
8961 		hlist_del(&cfilter->cloud_node);
8962 		kfree(cfilter);
8963 	}
8964 	pf->num_cloud_filters = 0;
8965 
8966 	if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8967 	    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8968 		pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8969 		pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8970 		pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8971 	}
8972 }
8973 
8974 /**
8975  * i40e_close - Disables a network interface
8976  * @netdev: network interface device structure
8977  *
8978  * The close entry point is called when an interface is de-activated
8979  * by the OS.  The hardware is still under the driver's control, but
8980  * this netdev interface is disabled.
8981  *
8982  * Returns 0, this is not allowed to fail
8983  **/
8984 int i40e_close(struct net_device *netdev)
8985 {
8986 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8987 	struct i40e_vsi *vsi = np->vsi;
8988 
8989 	i40e_vsi_close(vsi);
8990 
8991 	return 0;
8992 }
8993 
8994 /**
8995  * i40e_do_reset - Start a PF or Core Reset sequence
8996  * @pf: board private structure
8997  * @reset_flags: which reset is requested
8998  * @lock_acquired: indicates whether or not the lock has been acquired
8999  * before this function was called.
9000  *
9001  * The essential difference in resets is that the PF Reset
9002  * doesn't clear the packet buffers, doesn't reset the PE
9003  * firmware, and doesn't bother the other PFs on the chip.
9004  **/
9005 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9006 {
9007 	u32 val;
9008 
9009 	/* do the biggest reset indicated */
9010 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9011 
9012 		/* Request a Global Reset
9013 		 *
9014 		 * This will start the chip's countdown to the actual full
9015 		 * chip reset event, and a warning interrupt to be sent
9016 		 * to all PFs, including the requestor.  Our handler
9017 		 * for the warning interrupt will deal with the shutdown
9018 		 * and recovery of the switch setup.
9019 		 */
9020 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9021 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9022 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9023 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9024 
9025 	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9026 
9027 		/* Request a Core Reset
9028 		 *
9029 		 * Same as Global Reset, except does *not* include the MAC/PHY
9030 		 */
9031 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9032 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9033 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
9034 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9035 		i40e_flush(&pf->hw);
9036 
9037 	} else if (reset_flags & I40E_PF_RESET_FLAG) {
9038 
9039 		/* Request a PF Reset
9040 		 *
9041 		 * Resets only the PF-specific registers
9042 		 *
9043 		 * This goes directly to the tear-down and rebuild of
9044 		 * the switch, since we need to do all the recovery as
9045 		 * for the Core Reset.
9046 		 */
9047 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
9048 		i40e_handle_reset_warning(pf, lock_acquired);
9049 
9050 	} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9051 		/* Request a PF Reset
9052 		 *
9053 		 * Resets PF and reinitializes PFs VSI.
9054 		 */
9055 		i40e_prep_for_reset(pf);
9056 		i40e_reset_and_rebuild(pf, true, lock_acquired);
9057 		dev_info(&pf->pdev->dev,
9058 			 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9059 			 "FW LLDP is disabled\n" :
9060 			 "FW LLDP is enabled\n");
9061 
9062 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9063 		int v;
9064 
9065 		/* Find the VSI(s) that requested a re-init */
9066 		dev_info(&pf->pdev->dev,
9067 			 "VSI reinit requested\n");
9068 		for (v = 0; v < pf->num_alloc_vsi; v++) {
9069 			struct i40e_vsi *vsi = pf->vsi[v];
9070 
9071 			if (vsi != NULL &&
9072 			    test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9073 					       vsi->state))
9074 				i40e_vsi_reinit_locked(pf->vsi[v]);
9075 		}
9076 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9077 		int v;
9078 
9079 		/* Find the VSI(s) that needs to be brought down */
9080 		dev_info(&pf->pdev->dev, "VSI down requested\n");
9081 		for (v = 0; v < pf->num_alloc_vsi; v++) {
9082 			struct i40e_vsi *vsi = pf->vsi[v];
9083 
9084 			if (vsi != NULL &&
9085 			    test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9086 					       vsi->state)) {
9087 				set_bit(__I40E_VSI_DOWN, vsi->state);
9088 				i40e_down(vsi);
9089 			}
9090 		}
9091 	} else {
9092 		dev_info(&pf->pdev->dev,
9093 			 "bad reset request 0x%08x\n", reset_flags);
9094 	}
9095 }
9096 
9097 #ifdef CONFIG_I40E_DCB
9098 /**
9099  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9100  * @pf: board private structure
9101  * @old_cfg: current DCB config
9102  * @new_cfg: new DCB config
9103  **/
9104 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9105 			    struct i40e_dcbx_config *old_cfg,
9106 			    struct i40e_dcbx_config *new_cfg)
9107 {
9108 	bool need_reconfig = false;
9109 
9110 	/* Check if ETS configuration has changed */
9111 	if (memcmp(&new_cfg->etscfg,
9112 		   &old_cfg->etscfg,
9113 		   sizeof(new_cfg->etscfg))) {
9114 		/* If Priority Table has changed reconfig is needed */
9115 		if (memcmp(&new_cfg->etscfg.prioritytable,
9116 			   &old_cfg->etscfg.prioritytable,
9117 			   sizeof(new_cfg->etscfg.prioritytable))) {
9118 			need_reconfig = true;
9119 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9120 		}
9121 
9122 		if (memcmp(&new_cfg->etscfg.tcbwtable,
9123 			   &old_cfg->etscfg.tcbwtable,
9124 			   sizeof(new_cfg->etscfg.tcbwtable)))
9125 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9126 
9127 		if (memcmp(&new_cfg->etscfg.tsatable,
9128 			   &old_cfg->etscfg.tsatable,
9129 			   sizeof(new_cfg->etscfg.tsatable)))
9130 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9131 	}
9132 
9133 	/* Check if PFC configuration has changed */
9134 	if (memcmp(&new_cfg->pfc,
9135 		   &old_cfg->pfc,
9136 		   sizeof(new_cfg->pfc))) {
9137 		need_reconfig = true;
9138 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9139 	}
9140 
9141 	/* Check if APP Table has changed */
9142 	if (memcmp(&new_cfg->app,
9143 		   &old_cfg->app,
9144 		   sizeof(new_cfg->app))) {
9145 		need_reconfig = true;
9146 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9147 	}
9148 
9149 	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9150 	return need_reconfig;
9151 }
9152 
9153 /**
9154  * i40e_handle_lldp_event - Handle LLDP Change MIB event
9155  * @pf: board private structure
9156  * @e: event info posted on ARQ
9157  **/
9158 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9159 				  struct i40e_arq_event_info *e)
9160 {
9161 	struct i40e_aqc_lldp_get_mib *mib =
9162 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9163 	struct i40e_hw *hw = &pf->hw;
9164 	struct i40e_dcbx_config tmp_dcbx_cfg;
9165 	bool need_reconfig = false;
9166 	int ret = 0;
9167 	u8 type;
9168 
9169 	/* X710-T*L 2.5G and 5G speeds don't support DCB */
9170 	if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9171 	    (hw->phy.link_info.link_speed &
9172 	     ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9173 	     !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9174 		/* let firmware decide if the DCB should be disabled */
9175 		pf->flags |= I40E_FLAG_DCB_CAPABLE;
9176 
9177 	/* Not DCB capable or capability disabled */
9178 	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9179 		return ret;
9180 
9181 	/* Ignore if event is not for Nearest Bridge */
9182 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9183 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9184 	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9185 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9186 		return ret;
9187 
9188 	/* Check MIB Type and return if event for Remote MIB update */
9189 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9190 	dev_dbg(&pf->pdev->dev,
9191 		"LLDP event mib type %s\n", type ? "remote" : "local");
9192 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9193 		/* Update the remote cached instance and return */
9194 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9195 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9196 				&hw->remote_dcbx_config);
9197 		goto exit;
9198 	}
9199 
9200 	/* Store the old configuration */
9201 	tmp_dcbx_cfg = hw->local_dcbx_config;
9202 
9203 	/* Reset the old DCBx configuration data */
9204 	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9205 	/* Get updated DCBX data from firmware */
9206 	ret = i40e_get_dcb_config(&pf->hw);
9207 	if (ret) {
9208 		/* X710-T*L 2.5G and 5G speeds don't support DCB */
9209 		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9210 		    (hw->phy.link_info.link_speed &
9211 		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9212 			dev_warn(&pf->pdev->dev,
9213 				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9214 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9215 		} else {
9216 			dev_info(&pf->pdev->dev,
9217 				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9218 				 i40e_stat_str(&pf->hw, ret),
9219 				 i40e_aq_str(&pf->hw,
9220 					     pf->hw.aq.asq_last_status));
9221 		}
9222 		goto exit;
9223 	}
9224 
9225 	/* No change detected in DCBX configs */
9226 	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9227 		    sizeof(tmp_dcbx_cfg))) {
9228 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9229 		goto exit;
9230 	}
9231 
9232 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9233 					       &hw->local_dcbx_config);
9234 
9235 	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9236 
9237 	if (!need_reconfig)
9238 		goto exit;
9239 
9240 	/* Enable DCB tagging only when more than one TC */
9241 	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9242 		pf->flags |= I40E_FLAG_DCB_ENABLED;
9243 	else
9244 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9245 
9246 	set_bit(__I40E_PORT_SUSPENDED, pf->state);
9247 	/* Reconfiguration needed quiesce all VSIs */
9248 	i40e_pf_quiesce_all_vsi(pf);
9249 
9250 	/* Changes in configuration update VEB/VSI */
9251 	i40e_dcb_reconfigure(pf);
9252 
9253 	ret = i40e_resume_port_tx(pf);
9254 
9255 	clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9256 	/* In case of error no point in resuming VSIs */
9257 	if (ret)
9258 		goto exit;
9259 
9260 	/* Wait for the PF's queues to be disabled */
9261 	ret = i40e_pf_wait_queues_disabled(pf);
9262 	if (ret) {
9263 		/* Schedule PF reset to recover */
9264 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9265 		i40e_service_event_schedule(pf);
9266 	} else {
9267 		i40e_pf_unquiesce_all_vsi(pf);
9268 		set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9269 		set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9270 	}
9271 
9272 exit:
9273 	return ret;
9274 }
9275 #endif /* CONFIG_I40E_DCB */
9276 
9277 /**
9278  * i40e_do_reset_safe - Protected reset path for userland calls.
9279  * @pf: board private structure
9280  * @reset_flags: which reset is requested
9281  *
9282  **/
9283 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9284 {
9285 	rtnl_lock();
9286 	i40e_do_reset(pf, reset_flags, true);
9287 	rtnl_unlock();
9288 }
9289 
9290 /**
9291  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9292  * @pf: board private structure
9293  * @e: event info posted on ARQ
9294  *
9295  * Handler for LAN Queue Overflow Event generated by the firmware for PF
9296  * and VF queues
9297  **/
9298 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9299 					   struct i40e_arq_event_info *e)
9300 {
9301 	struct i40e_aqc_lan_overflow *data =
9302 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9303 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
9304 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9305 	struct i40e_hw *hw = &pf->hw;
9306 	struct i40e_vf *vf;
9307 	u16 vf_id;
9308 
9309 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9310 		queue, qtx_ctl);
9311 
9312 	/* Queue belongs to VF, find the VF and issue VF reset */
9313 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9314 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9315 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9316 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9317 		vf_id -= hw->func_caps.vf_base_id;
9318 		vf = &pf->vf[vf_id];
9319 		i40e_vc_notify_vf_reset(vf);
9320 		/* Allow VF to process pending reset notification */
9321 		msleep(20);
9322 		i40e_reset_vf(vf, false);
9323 	}
9324 }
9325 
9326 /**
9327  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9328  * @pf: board private structure
9329  **/
9330 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9331 {
9332 	u32 val, fcnt_prog;
9333 
9334 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9335 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9336 	return fcnt_prog;
9337 }
9338 
9339 /**
9340  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9341  * @pf: board private structure
9342  **/
9343 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9344 {
9345 	u32 val, fcnt_prog;
9346 
9347 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9348 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9349 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9350 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9351 	return fcnt_prog;
9352 }
9353 
9354 /**
9355  * i40e_get_global_fd_count - Get total FD filters programmed on device
9356  * @pf: board private structure
9357  **/
9358 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9359 {
9360 	u32 val, fcnt_prog;
9361 
9362 	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9363 	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9364 		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9365 		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9366 	return fcnt_prog;
9367 }
9368 
9369 /**
9370  * i40e_reenable_fdir_sb - Restore FDir SB capability
9371  * @pf: board private structure
9372  **/
9373 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9374 {
9375 	if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9376 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9377 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
9378 			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9379 }
9380 
9381 /**
9382  * i40e_reenable_fdir_atr - Restore FDir ATR capability
9383  * @pf: board private structure
9384  **/
9385 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9386 {
9387 	if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9388 		/* ATR uses the same filtering logic as SB rules. It only
9389 		 * functions properly if the input set mask is at the default
9390 		 * settings. It is safe to restore the default input set
9391 		 * because there are no active TCPv4 filter rules.
9392 		 */
9393 		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9394 					I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9395 					I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9396 
9397 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9398 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
9399 			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9400 	}
9401 }
9402 
9403 /**
9404  * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9405  * @pf: board private structure
9406  * @filter: FDir filter to remove
9407  */
9408 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9409 				       struct i40e_fdir_filter *filter)
9410 {
9411 	/* Update counters */
9412 	pf->fdir_pf_active_filters--;
9413 	pf->fd_inv = 0;
9414 
9415 	switch (filter->flow_type) {
9416 	case TCP_V4_FLOW:
9417 		pf->fd_tcp4_filter_cnt--;
9418 		break;
9419 	case UDP_V4_FLOW:
9420 		pf->fd_udp4_filter_cnt--;
9421 		break;
9422 	case SCTP_V4_FLOW:
9423 		pf->fd_sctp4_filter_cnt--;
9424 		break;
9425 	case TCP_V6_FLOW:
9426 		pf->fd_tcp6_filter_cnt--;
9427 		break;
9428 	case UDP_V6_FLOW:
9429 		pf->fd_udp6_filter_cnt--;
9430 		break;
9431 	case SCTP_V6_FLOW:
9432 		pf->fd_udp6_filter_cnt--;
9433 		break;
9434 	case IP_USER_FLOW:
9435 		switch (filter->ipl4_proto) {
9436 		case IPPROTO_TCP:
9437 			pf->fd_tcp4_filter_cnt--;
9438 			break;
9439 		case IPPROTO_UDP:
9440 			pf->fd_udp4_filter_cnt--;
9441 			break;
9442 		case IPPROTO_SCTP:
9443 			pf->fd_sctp4_filter_cnt--;
9444 			break;
9445 		case IPPROTO_IP:
9446 			pf->fd_ip4_filter_cnt--;
9447 			break;
9448 		}
9449 		break;
9450 	case IPV6_USER_FLOW:
9451 		switch (filter->ipl4_proto) {
9452 		case IPPROTO_TCP:
9453 			pf->fd_tcp6_filter_cnt--;
9454 			break;
9455 		case IPPROTO_UDP:
9456 			pf->fd_udp6_filter_cnt--;
9457 			break;
9458 		case IPPROTO_SCTP:
9459 			pf->fd_sctp6_filter_cnt--;
9460 			break;
9461 		case IPPROTO_IP:
9462 			pf->fd_ip6_filter_cnt--;
9463 			break;
9464 		}
9465 		break;
9466 	}
9467 
9468 	/* Remove the filter from the list and free memory */
9469 	hlist_del(&filter->fdir_node);
9470 	kfree(filter);
9471 }
9472 
9473 /**
9474  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9475  * @pf: board private structure
9476  **/
9477 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9478 {
9479 	struct i40e_fdir_filter *filter;
9480 	u32 fcnt_prog, fcnt_avail;
9481 	struct hlist_node *node;
9482 
9483 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9484 		return;
9485 
9486 	/* Check if we have enough room to re-enable FDir SB capability. */
9487 	fcnt_prog = i40e_get_global_fd_count(pf);
9488 	fcnt_avail = pf->fdir_pf_filter_count;
9489 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9490 	    (pf->fd_add_err == 0) ||
9491 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9492 		i40e_reenable_fdir_sb(pf);
9493 
9494 	/* We should wait for even more space before re-enabling ATR.
9495 	 * Additionally, we cannot enable ATR as long as we still have TCP SB
9496 	 * rules active.
9497 	 */
9498 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9499 	    pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9500 		i40e_reenable_fdir_atr(pf);
9501 
9502 	/* if hw had a problem adding a filter, delete it */
9503 	if (pf->fd_inv > 0) {
9504 		hlist_for_each_entry_safe(filter, node,
9505 					  &pf->fdir_filter_list, fdir_node)
9506 			if (filter->fd_id == pf->fd_inv)
9507 				i40e_delete_invalid_filter(pf, filter);
9508 	}
9509 }
9510 
9511 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9512 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9513 /**
9514  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9515  * @pf: board private structure
9516  **/
9517 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9518 {
9519 	unsigned long min_flush_time;
9520 	int flush_wait_retry = 50;
9521 	bool disable_atr = false;
9522 	int fd_room;
9523 	int reg;
9524 
9525 	if (!time_after(jiffies, pf->fd_flush_timestamp +
9526 				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9527 		return;
9528 
9529 	/* If the flush is happening too quick and we have mostly SB rules we
9530 	 * should not re-enable ATR for some time.
9531 	 */
9532 	min_flush_time = pf->fd_flush_timestamp +
9533 			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9534 	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9535 
9536 	if (!(time_after(jiffies, min_flush_time)) &&
9537 	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9538 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
9539 			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9540 		disable_atr = true;
9541 	}
9542 
9543 	pf->fd_flush_timestamp = jiffies;
9544 	set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9545 	/* flush all filters */
9546 	wr32(&pf->hw, I40E_PFQF_CTL_1,
9547 	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9548 	i40e_flush(&pf->hw);
9549 	pf->fd_flush_cnt++;
9550 	pf->fd_add_err = 0;
9551 	do {
9552 		/* Check FD flush status every 5-6msec */
9553 		usleep_range(5000, 6000);
9554 		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9555 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9556 			break;
9557 	} while (flush_wait_retry--);
9558 	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9559 		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9560 	} else {
9561 		/* replay sideband filters */
9562 		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9563 		if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9564 			clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9565 		clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9566 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
9567 			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9568 	}
9569 }
9570 
9571 /**
9572  * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9573  * @pf: board private structure
9574  **/
9575 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9576 {
9577 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9578 }
9579 
9580 /**
9581  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9582  * @pf: board private structure
9583  **/
9584 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9585 {
9586 
9587 	/* if interface is down do nothing */
9588 	if (test_bit(__I40E_DOWN, pf->state))
9589 		return;
9590 
9591 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9592 		i40e_fdir_flush_and_replay(pf);
9593 
9594 	i40e_fdir_check_and_reenable(pf);
9595 
9596 }
9597 
9598 /**
9599  * i40e_vsi_link_event - notify VSI of a link event
9600  * @vsi: vsi to be notified
9601  * @link_up: link up or down
9602  **/
9603 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9604 {
9605 	if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9606 		return;
9607 
9608 	switch (vsi->type) {
9609 	case I40E_VSI_MAIN:
9610 		if (!vsi->netdev || !vsi->netdev_registered)
9611 			break;
9612 
9613 		if (link_up) {
9614 			netif_carrier_on(vsi->netdev);
9615 			netif_tx_wake_all_queues(vsi->netdev);
9616 		} else {
9617 			netif_carrier_off(vsi->netdev);
9618 			netif_tx_stop_all_queues(vsi->netdev);
9619 		}
9620 		break;
9621 
9622 	case I40E_VSI_SRIOV:
9623 	case I40E_VSI_VMDQ2:
9624 	case I40E_VSI_CTRL:
9625 	case I40E_VSI_IWARP:
9626 	case I40E_VSI_MIRROR:
9627 	default:
9628 		/* there is no notification for other VSIs */
9629 		break;
9630 	}
9631 }
9632 
9633 /**
9634  * i40e_veb_link_event - notify elements on the veb of a link event
9635  * @veb: veb to be notified
9636  * @link_up: link up or down
9637  **/
9638 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9639 {
9640 	struct i40e_pf *pf;
9641 	int i;
9642 
9643 	if (!veb || !veb->pf)
9644 		return;
9645 	pf = veb->pf;
9646 
9647 	/* depth first... */
9648 	for (i = 0; i < I40E_MAX_VEB; i++)
9649 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9650 			i40e_veb_link_event(pf->veb[i], link_up);
9651 
9652 	/* ... now the local VSIs */
9653 	for (i = 0; i < pf->num_alloc_vsi; i++)
9654 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9655 			i40e_vsi_link_event(pf->vsi[i], link_up);
9656 }
9657 
9658 /**
9659  * i40e_link_event - Update netif_carrier status
9660  * @pf: board private structure
9661  **/
9662 static void i40e_link_event(struct i40e_pf *pf)
9663 {
9664 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9665 	u8 new_link_speed, old_link_speed;
9666 	i40e_status status;
9667 	bool new_link, old_link;
9668 #ifdef CONFIG_I40E_DCB
9669 	int err;
9670 #endif /* CONFIG_I40E_DCB */
9671 
9672 	/* set this to force the get_link_status call to refresh state */
9673 	pf->hw.phy.get_link_info = true;
9674 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9675 	status = i40e_get_link_status(&pf->hw, &new_link);
9676 
9677 	/* On success, disable temp link polling */
9678 	if (status == I40E_SUCCESS) {
9679 		clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9680 	} else {
9681 		/* Enable link polling temporarily until i40e_get_link_status
9682 		 * returns I40E_SUCCESS
9683 		 */
9684 		set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9685 		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9686 			status);
9687 		return;
9688 	}
9689 
9690 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
9691 	new_link_speed = pf->hw.phy.link_info.link_speed;
9692 
9693 	if (new_link == old_link &&
9694 	    new_link_speed == old_link_speed &&
9695 	    (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9696 	     new_link == netif_carrier_ok(vsi->netdev)))
9697 		return;
9698 
9699 	i40e_print_link_message(vsi, new_link);
9700 
9701 	/* Notify the base of the switch tree connected to
9702 	 * the link.  Floating VEBs are not notified.
9703 	 */
9704 	if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9705 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9706 	else
9707 		i40e_vsi_link_event(vsi, new_link);
9708 
9709 	if (pf->vf)
9710 		i40e_vc_notify_link_state(pf);
9711 
9712 	if (pf->flags & I40E_FLAG_PTP)
9713 		i40e_ptp_set_increment(pf);
9714 #ifdef CONFIG_I40E_DCB
9715 	if (new_link == old_link)
9716 		return;
9717 	/* Not SW DCB so firmware will take care of default settings */
9718 	if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9719 		return;
9720 
9721 	/* We cover here only link down, as after link up in case of SW DCB
9722 	 * SW LLDP agent will take care of setting it up
9723 	 */
9724 	if (!new_link) {
9725 		dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9726 		memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9727 		err = i40e_dcb_sw_default_config(pf);
9728 		if (err) {
9729 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9730 				       I40E_FLAG_DCB_ENABLED);
9731 		} else {
9732 			pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9733 				       DCB_CAP_DCBX_VER_IEEE;
9734 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
9735 			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9736 		}
9737 	}
9738 #endif /* CONFIG_I40E_DCB */
9739 }
9740 
9741 /**
9742  * i40e_watchdog_subtask - periodic checks not using event driven response
9743  * @pf: board private structure
9744  **/
9745 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9746 {
9747 	int i;
9748 
9749 	/* if interface is down do nothing */
9750 	if (test_bit(__I40E_DOWN, pf->state) ||
9751 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
9752 		return;
9753 
9754 	/* make sure we don't do these things too often */
9755 	if (time_before(jiffies, (pf->service_timer_previous +
9756 				  pf->service_timer_period)))
9757 		return;
9758 	pf->service_timer_previous = jiffies;
9759 
9760 	if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9761 	    test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9762 		i40e_link_event(pf);
9763 
9764 	/* Update the stats for active netdevs so the network stack
9765 	 * can look at updated numbers whenever it cares to
9766 	 */
9767 	for (i = 0; i < pf->num_alloc_vsi; i++)
9768 		if (pf->vsi[i] && pf->vsi[i]->netdev)
9769 			i40e_update_stats(pf->vsi[i]);
9770 
9771 	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9772 		/* Update the stats for the active switching components */
9773 		for (i = 0; i < I40E_MAX_VEB; i++)
9774 			if (pf->veb[i])
9775 				i40e_update_veb_stats(pf->veb[i]);
9776 	}
9777 
9778 	i40e_ptp_rx_hang(pf);
9779 	i40e_ptp_tx_hang(pf);
9780 }
9781 
9782 /**
9783  * i40e_reset_subtask - Set up for resetting the device and driver
9784  * @pf: board private structure
9785  **/
9786 static void i40e_reset_subtask(struct i40e_pf *pf)
9787 {
9788 	u32 reset_flags = 0;
9789 
9790 	if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9791 		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9792 		clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9793 	}
9794 	if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9795 		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9796 		clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9797 	}
9798 	if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9799 		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9800 		clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9801 	}
9802 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9803 		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9804 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9805 	}
9806 	if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9807 		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9808 		clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9809 	}
9810 
9811 	/* If there's a recovery already waiting, it takes
9812 	 * precedence before starting a new reset sequence.
9813 	 */
9814 	if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9815 		i40e_prep_for_reset(pf);
9816 		i40e_reset(pf);
9817 		i40e_rebuild(pf, false, false);
9818 	}
9819 
9820 	/* If we're already down or resetting, just bail */
9821 	if (reset_flags &&
9822 	    !test_bit(__I40E_DOWN, pf->state) &&
9823 	    !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9824 		i40e_do_reset(pf, reset_flags, false);
9825 	}
9826 }
9827 
9828 /**
9829  * i40e_handle_link_event - Handle link event
9830  * @pf: board private structure
9831  * @e: event info posted on ARQ
9832  **/
9833 static void i40e_handle_link_event(struct i40e_pf *pf,
9834 				   struct i40e_arq_event_info *e)
9835 {
9836 	struct i40e_aqc_get_link_status *status =
9837 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9838 
9839 	/* Do a new status request to re-enable LSE reporting
9840 	 * and load new status information into the hw struct
9841 	 * This completely ignores any state information
9842 	 * in the ARQ event info, instead choosing to always
9843 	 * issue the AQ update link status command.
9844 	 */
9845 	i40e_link_event(pf);
9846 
9847 	/* Check if module meets thermal requirements */
9848 	if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9849 		dev_err(&pf->pdev->dev,
9850 			"Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9851 		dev_err(&pf->pdev->dev,
9852 			"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9853 	} else {
9854 		/* check for unqualified module, if link is down, suppress
9855 		 * the message if link was forced to be down.
9856 		 */
9857 		if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9858 		    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9859 		    (!(status->link_info & I40E_AQ_LINK_UP)) &&
9860 		    (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9861 			dev_err(&pf->pdev->dev,
9862 				"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9863 			dev_err(&pf->pdev->dev,
9864 				"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9865 		}
9866 	}
9867 }
9868 
9869 /**
9870  * i40e_clean_adminq_subtask - Clean the AdminQ rings
9871  * @pf: board private structure
9872  **/
9873 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9874 {
9875 	struct i40e_arq_event_info event;
9876 	struct i40e_hw *hw = &pf->hw;
9877 	u16 pending, i = 0;
9878 	i40e_status ret;
9879 	u16 opcode;
9880 	u32 oldval;
9881 	u32 val;
9882 
9883 	/* Do not run clean AQ when PF reset fails */
9884 	if (test_bit(__I40E_RESET_FAILED, pf->state))
9885 		return;
9886 
9887 	/* check for error indications */
9888 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
9889 	oldval = val;
9890 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9891 		if (hw->debug_mask & I40E_DEBUG_AQ)
9892 			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9893 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9894 	}
9895 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9896 		if (hw->debug_mask & I40E_DEBUG_AQ)
9897 			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9898 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9899 		pf->arq_overflows++;
9900 	}
9901 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9902 		if (hw->debug_mask & I40E_DEBUG_AQ)
9903 			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9904 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9905 	}
9906 	if (oldval != val)
9907 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
9908 
9909 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
9910 	oldval = val;
9911 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9912 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9913 			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9914 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9915 	}
9916 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9917 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9918 			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9919 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9920 	}
9921 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9922 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9923 			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9924 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9925 	}
9926 	if (oldval != val)
9927 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
9928 
9929 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9930 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9931 	if (!event.msg_buf)
9932 		return;
9933 
9934 	do {
9935 		ret = i40e_clean_arq_element(hw, &event, &pending);
9936 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9937 			break;
9938 		else if (ret) {
9939 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9940 			break;
9941 		}
9942 
9943 		opcode = le16_to_cpu(event.desc.opcode);
9944 		switch (opcode) {
9945 
9946 		case i40e_aqc_opc_get_link_status:
9947 			rtnl_lock();
9948 			i40e_handle_link_event(pf, &event);
9949 			rtnl_unlock();
9950 			break;
9951 		case i40e_aqc_opc_send_msg_to_pf:
9952 			ret = i40e_vc_process_vf_msg(pf,
9953 					le16_to_cpu(event.desc.retval),
9954 					le32_to_cpu(event.desc.cookie_high),
9955 					le32_to_cpu(event.desc.cookie_low),
9956 					event.msg_buf,
9957 					event.msg_len);
9958 			break;
9959 		case i40e_aqc_opc_lldp_update_mib:
9960 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9961 #ifdef CONFIG_I40E_DCB
9962 			rtnl_lock();
9963 			i40e_handle_lldp_event(pf, &event);
9964 			rtnl_unlock();
9965 #endif /* CONFIG_I40E_DCB */
9966 			break;
9967 		case i40e_aqc_opc_event_lan_overflow:
9968 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9969 			i40e_handle_lan_overflow_event(pf, &event);
9970 			break;
9971 		case i40e_aqc_opc_send_msg_to_peer:
9972 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9973 			break;
9974 		case i40e_aqc_opc_nvm_erase:
9975 		case i40e_aqc_opc_nvm_update:
9976 		case i40e_aqc_opc_oem_post_update:
9977 			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9978 				   "ARQ NVM operation 0x%04x completed\n",
9979 				   opcode);
9980 			break;
9981 		default:
9982 			dev_info(&pf->pdev->dev,
9983 				 "ARQ: Unknown event 0x%04x ignored\n",
9984 				 opcode);
9985 			break;
9986 		}
9987 	} while (i++ < pf->adminq_work_limit);
9988 
9989 	if (i < pf->adminq_work_limit)
9990 		clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9991 
9992 	/* re-enable Admin queue interrupt cause */
9993 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
9994 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9995 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
9996 	i40e_flush(hw);
9997 
9998 	kfree(event.msg_buf);
9999 }
10000 
10001 /**
10002  * i40e_verify_eeprom - make sure eeprom is good to use
10003  * @pf: board private structure
10004  **/
10005 static void i40e_verify_eeprom(struct i40e_pf *pf)
10006 {
10007 	int err;
10008 
10009 	err = i40e_diag_eeprom_test(&pf->hw);
10010 	if (err) {
10011 		/* retry in case of garbage read */
10012 		err = i40e_diag_eeprom_test(&pf->hw);
10013 		if (err) {
10014 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10015 				 err);
10016 			set_bit(__I40E_BAD_EEPROM, pf->state);
10017 		}
10018 	}
10019 
10020 	if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10021 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10022 		clear_bit(__I40E_BAD_EEPROM, pf->state);
10023 	}
10024 }
10025 
10026 /**
10027  * i40e_enable_pf_switch_lb
10028  * @pf: pointer to the PF structure
10029  *
10030  * enable switch loop back or die - no point in a return value
10031  **/
10032 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10033 {
10034 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10035 	struct i40e_vsi_context ctxt;
10036 	int ret;
10037 
10038 	ctxt.seid = pf->main_vsi_seid;
10039 	ctxt.pf_num = pf->hw.pf_id;
10040 	ctxt.vf_num = 0;
10041 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10042 	if (ret) {
10043 		dev_info(&pf->pdev->dev,
10044 			 "couldn't get PF vsi config, err %s aq_err %s\n",
10045 			 i40e_stat_str(&pf->hw, ret),
10046 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10047 		return;
10048 	}
10049 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10050 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10051 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10052 
10053 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10054 	if (ret) {
10055 		dev_info(&pf->pdev->dev,
10056 			 "update vsi switch failed, err %s aq_err %s\n",
10057 			 i40e_stat_str(&pf->hw, ret),
10058 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10059 	}
10060 }
10061 
10062 /**
10063  * i40e_disable_pf_switch_lb
10064  * @pf: pointer to the PF structure
10065  *
10066  * disable switch loop back or die - no point in a return value
10067  **/
10068 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10069 {
10070 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10071 	struct i40e_vsi_context ctxt;
10072 	int ret;
10073 
10074 	ctxt.seid = pf->main_vsi_seid;
10075 	ctxt.pf_num = pf->hw.pf_id;
10076 	ctxt.vf_num = 0;
10077 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10078 	if (ret) {
10079 		dev_info(&pf->pdev->dev,
10080 			 "couldn't get PF vsi config, err %s aq_err %s\n",
10081 			 i40e_stat_str(&pf->hw, ret),
10082 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10083 		return;
10084 	}
10085 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10086 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10087 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10088 
10089 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10090 	if (ret) {
10091 		dev_info(&pf->pdev->dev,
10092 			 "update vsi switch failed, err %s aq_err %s\n",
10093 			 i40e_stat_str(&pf->hw, ret),
10094 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10095 	}
10096 }
10097 
10098 /**
10099  * i40e_config_bridge_mode - Configure the HW bridge mode
10100  * @veb: pointer to the bridge instance
10101  *
10102  * Configure the loop back mode for the LAN VSI that is downlink to the
10103  * specified HW bridge instance. It is expected this function is called
10104  * when a new HW bridge is instantiated.
10105  **/
10106 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10107 {
10108 	struct i40e_pf *pf = veb->pf;
10109 
10110 	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10111 		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10112 			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10113 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10114 		i40e_disable_pf_switch_lb(pf);
10115 	else
10116 		i40e_enable_pf_switch_lb(pf);
10117 }
10118 
10119 /**
10120  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10121  * @veb: pointer to the VEB instance
10122  *
10123  * This is a recursive function that first builds the attached VSIs then
10124  * recurses in to build the next layer of VEB.  We track the connections
10125  * through our own index numbers because the seid's from the HW could
10126  * change across the reset.
10127  **/
10128 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10129 {
10130 	struct i40e_vsi *ctl_vsi = NULL;
10131 	struct i40e_pf *pf = veb->pf;
10132 	int v, veb_idx;
10133 	int ret;
10134 
10135 	/* build VSI that owns this VEB, temporarily attached to base VEB */
10136 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10137 		if (pf->vsi[v] &&
10138 		    pf->vsi[v]->veb_idx == veb->idx &&
10139 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10140 			ctl_vsi = pf->vsi[v];
10141 			break;
10142 		}
10143 	}
10144 	if (!ctl_vsi) {
10145 		dev_info(&pf->pdev->dev,
10146 			 "missing owner VSI for veb_idx %d\n", veb->idx);
10147 		ret = -ENOENT;
10148 		goto end_reconstitute;
10149 	}
10150 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
10151 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10152 	ret = i40e_add_vsi(ctl_vsi);
10153 	if (ret) {
10154 		dev_info(&pf->pdev->dev,
10155 			 "rebuild of veb_idx %d owner VSI failed: %d\n",
10156 			 veb->idx, ret);
10157 		goto end_reconstitute;
10158 	}
10159 	i40e_vsi_reset_stats(ctl_vsi);
10160 
10161 	/* create the VEB in the switch and move the VSI onto the VEB */
10162 	ret = i40e_add_veb(veb, ctl_vsi);
10163 	if (ret)
10164 		goto end_reconstitute;
10165 
10166 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10167 		veb->bridge_mode = BRIDGE_MODE_VEB;
10168 	else
10169 		veb->bridge_mode = BRIDGE_MODE_VEPA;
10170 	i40e_config_bridge_mode(veb);
10171 
10172 	/* create the remaining VSIs attached to this VEB */
10173 	for (v = 0; v < pf->num_alloc_vsi; v++) {
10174 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10175 			continue;
10176 
10177 		if (pf->vsi[v]->veb_idx == veb->idx) {
10178 			struct i40e_vsi *vsi = pf->vsi[v];
10179 
10180 			vsi->uplink_seid = veb->seid;
10181 			ret = i40e_add_vsi(vsi);
10182 			if (ret) {
10183 				dev_info(&pf->pdev->dev,
10184 					 "rebuild of vsi_idx %d failed: %d\n",
10185 					 v, ret);
10186 				goto end_reconstitute;
10187 			}
10188 			i40e_vsi_reset_stats(vsi);
10189 		}
10190 	}
10191 
10192 	/* create any VEBs attached to this VEB - RECURSION */
10193 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10194 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10195 			pf->veb[veb_idx]->uplink_seid = veb->seid;
10196 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10197 			if (ret)
10198 				break;
10199 		}
10200 	}
10201 
10202 end_reconstitute:
10203 	return ret;
10204 }
10205 
10206 /**
10207  * i40e_get_capabilities - get info about the HW
10208  * @pf: the PF struct
10209  * @list_type: AQ capability to be queried
10210  **/
10211 static int i40e_get_capabilities(struct i40e_pf *pf,
10212 				 enum i40e_admin_queue_opc list_type)
10213 {
10214 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10215 	u16 data_size;
10216 	int buf_len;
10217 	int err;
10218 
10219 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10220 	do {
10221 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
10222 		if (!cap_buf)
10223 			return -ENOMEM;
10224 
10225 		/* this loads the data into the hw struct for us */
10226 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10227 						    &data_size, list_type,
10228 						    NULL);
10229 		/* data loaded, buffer no longer needed */
10230 		kfree(cap_buf);
10231 
10232 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10233 			/* retry with a larger buffer */
10234 			buf_len = data_size;
10235 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10236 			dev_info(&pf->pdev->dev,
10237 				 "capability discovery failed, err %s aq_err %s\n",
10238 				 i40e_stat_str(&pf->hw, err),
10239 				 i40e_aq_str(&pf->hw,
10240 					     pf->hw.aq.asq_last_status));
10241 			return -ENODEV;
10242 		}
10243 	} while (err);
10244 
10245 	if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10246 		if (list_type == i40e_aqc_opc_list_func_capabilities) {
10247 			dev_info(&pf->pdev->dev,
10248 				 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10249 				 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10250 				 pf->hw.func_caps.num_msix_vectors,
10251 				 pf->hw.func_caps.num_msix_vectors_vf,
10252 				 pf->hw.func_caps.fd_filters_guaranteed,
10253 				 pf->hw.func_caps.fd_filters_best_effort,
10254 				 pf->hw.func_caps.num_tx_qp,
10255 				 pf->hw.func_caps.num_vsis);
10256 		} else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10257 			dev_info(&pf->pdev->dev,
10258 				 "switch_mode=0x%04x, function_valid=0x%08x\n",
10259 				 pf->hw.dev_caps.switch_mode,
10260 				 pf->hw.dev_caps.valid_functions);
10261 			dev_info(&pf->pdev->dev,
10262 				 "SR-IOV=%d, num_vfs for all function=%u\n",
10263 				 pf->hw.dev_caps.sr_iov_1_1,
10264 				 pf->hw.dev_caps.num_vfs);
10265 			dev_info(&pf->pdev->dev,
10266 				 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10267 				 pf->hw.dev_caps.num_vsis,
10268 				 pf->hw.dev_caps.num_rx_qp,
10269 				 pf->hw.dev_caps.num_tx_qp);
10270 		}
10271 	}
10272 	if (list_type == i40e_aqc_opc_list_func_capabilities) {
10273 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10274 		       + pf->hw.func_caps.num_vfs)
10275 		if (pf->hw.revision_id == 0 &&
10276 		    pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10277 			dev_info(&pf->pdev->dev,
10278 				 "got num_vsis %d, setting num_vsis to %d\n",
10279 				 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10280 			pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10281 		}
10282 	}
10283 	return 0;
10284 }
10285 
10286 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10287 
10288 /**
10289  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10290  * @pf: board private structure
10291  **/
10292 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10293 {
10294 	struct i40e_vsi *vsi;
10295 
10296 	/* quick workaround for an NVM issue that leaves a critical register
10297 	 * uninitialized
10298 	 */
10299 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10300 		static const u32 hkey[] = {
10301 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10302 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10303 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10304 			0x95b3a76d};
10305 		int i;
10306 
10307 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10308 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10309 	}
10310 
10311 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10312 		return;
10313 
10314 	/* find existing VSI and see if it needs configuring */
10315 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10316 
10317 	/* create a new VSI if none exists */
10318 	if (!vsi) {
10319 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10320 				     pf->vsi[pf->lan_vsi]->seid, 0);
10321 		if (!vsi) {
10322 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10323 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10324 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10325 			return;
10326 		}
10327 	}
10328 
10329 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10330 }
10331 
10332 /**
10333  * i40e_fdir_teardown - release the Flow Director resources
10334  * @pf: board private structure
10335  **/
10336 static void i40e_fdir_teardown(struct i40e_pf *pf)
10337 {
10338 	struct i40e_vsi *vsi;
10339 
10340 	i40e_fdir_filter_exit(pf);
10341 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10342 	if (vsi)
10343 		i40e_vsi_release(vsi);
10344 }
10345 
10346 /**
10347  * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10348  * @vsi: PF main vsi
10349  * @seid: seid of main or channel VSIs
10350  *
10351  * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10352  * existed before reset
10353  **/
10354 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10355 {
10356 	struct i40e_cloud_filter *cfilter;
10357 	struct i40e_pf *pf = vsi->back;
10358 	struct hlist_node *node;
10359 	i40e_status ret;
10360 
10361 	/* Add cloud filters back if they exist */
10362 	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10363 				  cloud_node) {
10364 		if (cfilter->seid != seid)
10365 			continue;
10366 
10367 		if (cfilter->dst_port)
10368 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10369 								true);
10370 		else
10371 			ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10372 
10373 		if (ret) {
10374 			dev_dbg(&pf->pdev->dev,
10375 				"Failed to rebuild cloud filter, err %s aq_err %s\n",
10376 				i40e_stat_str(&pf->hw, ret),
10377 				i40e_aq_str(&pf->hw,
10378 					    pf->hw.aq.asq_last_status));
10379 			return ret;
10380 		}
10381 	}
10382 	return 0;
10383 }
10384 
10385 /**
10386  * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10387  * @vsi: PF main vsi
10388  *
10389  * Rebuilds channel VSIs if they existed before reset
10390  **/
10391 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10392 {
10393 	struct i40e_channel *ch, *ch_tmp;
10394 	i40e_status ret;
10395 
10396 	if (list_empty(&vsi->ch_list))
10397 		return 0;
10398 
10399 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10400 		if (!ch->initialized)
10401 			break;
10402 		/* Proceed with creation of channel (VMDq2) VSI */
10403 		ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10404 		if (ret) {
10405 			dev_info(&vsi->back->pdev->dev,
10406 				 "failed to rebuild channels using uplink_seid %u\n",
10407 				 vsi->uplink_seid);
10408 			return ret;
10409 		}
10410 		/* Reconfigure TX queues using QTX_CTL register */
10411 		ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10412 		if (ret) {
10413 			dev_info(&vsi->back->pdev->dev,
10414 				 "failed to configure TX rings for channel %u\n",
10415 				 ch->seid);
10416 			return ret;
10417 		}
10418 		/* update 'next_base_queue' */
10419 		vsi->next_base_queue = vsi->next_base_queue +
10420 							ch->num_queue_pairs;
10421 		if (ch->max_tx_rate) {
10422 			u64 credits = ch->max_tx_rate;
10423 
10424 			if (i40e_set_bw_limit(vsi, ch->seid,
10425 					      ch->max_tx_rate))
10426 				return -EINVAL;
10427 
10428 			do_div(credits, I40E_BW_CREDIT_DIVISOR);
10429 			dev_dbg(&vsi->back->pdev->dev,
10430 				"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10431 				ch->max_tx_rate,
10432 				credits,
10433 				ch->seid);
10434 		}
10435 		ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10436 		if (ret) {
10437 			dev_dbg(&vsi->back->pdev->dev,
10438 				"Failed to rebuild cloud filters for channel VSI %u\n",
10439 				ch->seid);
10440 			return ret;
10441 		}
10442 	}
10443 	return 0;
10444 }
10445 
10446 /**
10447  * i40e_prep_for_reset - prep for the core to reset
10448  * @pf: board private structure
10449  *
10450  * Close up the VFs and other things in prep for PF Reset.
10451   **/
10452 static void i40e_prep_for_reset(struct i40e_pf *pf)
10453 {
10454 	struct i40e_hw *hw = &pf->hw;
10455 	i40e_status ret = 0;
10456 	u32 v;
10457 
10458 	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10459 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10460 		return;
10461 	if (i40e_check_asq_alive(&pf->hw))
10462 		i40e_vc_notify_reset(pf);
10463 
10464 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10465 
10466 	/* quiesce the VSIs and their queues that are not already DOWN */
10467 	i40e_pf_quiesce_all_vsi(pf);
10468 
10469 	for (v = 0; v < pf->num_alloc_vsi; v++) {
10470 		if (pf->vsi[v])
10471 			pf->vsi[v]->seid = 0;
10472 	}
10473 
10474 	i40e_shutdown_adminq(&pf->hw);
10475 
10476 	/* call shutdown HMC */
10477 	if (hw->hmc.hmc_obj) {
10478 		ret = i40e_shutdown_lan_hmc(hw);
10479 		if (ret)
10480 			dev_warn(&pf->pdev->dev,
10481 				 "shutdown_lan_hmc failed: %d\n", ret);
10482 	}
10483 
10484 	/* Save the current PTP time so that we can restore the time after the
10485 	 * reset completes.
10486 	 */
10487 	i40e_ptp_save_hw_time(pf);
10488 }
10489 
10490 /**
10491  * i40e_send_version - update firmware with driver version
10492  * @pf: PF struct
10493  */
10494 static void i40e_send_version(struct i40e_pf *pf)
10495 {
10496 	struct i40e_driver_version dv;
10497 
10498 	dv.major_version = 0xff;
10499 	dv.minor_version = 0xff;
10500 	dv.build_version = 0xff;
10501 	dv.subbuild_version = 0;
10502 	strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10503 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10504 }
10505 
10506 /**
10507  * i40e_get_oem_version - get OEM specific version information
10508  * @hw: pointer to the hardware structure
10509  **/
10510 static void i40e_get_oem_version(struct i40e_hw *hw)
10511 {
10512 	u16 block_offset = 0xffff;
10513 	u16 block_length = 0;
10514 	u16 capabilities = 0;
10515 	u16 gen_snap = 0;
10516 	u16 release = 0;
10517 
10518 #define I40E_SR_NVM_OEM_VERSION_PTR		0x1B
10519 #define I40E_NVM_OEM_LENGTH_OFFSET		0x00
10520 #define I40E_NVM_OEM_CAPABILITIES_OFFSET	0x01
10521 #define I40E_NVM_OEM_GEN_OFFSET			0x02
10522 #define I40E_NVM_OEM_RELEASE_OFFSET		0x03
10523 #define I40E_NVM_OEM_CAPABILITIES_MASK		0x000F
10524 #define I40E_NVM_OEM_LENGTH			3
10525 
10526 	/* Check if pointer to OEM version block is valid. */
10527 	i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10528 	if (block_offset == 0xffff)
10529 		return;
10530 
10531 	/* Check if OEM version block has correct length. */
10532 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10533 			   &block_length);
10534 	if (block_length < I40E_NVM_OEM_LENGTH)
10535 		return;
10536 
10537 	/* Check if OEM version format is as expected. */
10538 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10539 			   &capabilities);
10540 	if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10541 		return;
10542 
10543 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10544 			   &gen_snap);
10545 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10546 			   &release);
10547 	hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10548 	hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10549 }
10550 
10551 /**
10552  * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10553  * @pf: board private structure
10554  **/
10555 static int i40e_reset(struct i40e_pf *pf)
10556 {
10557 	struct i40e_hw *hw = &pf->hw;
10558 	i40e_status ret;
10559 
10560 	ret = i40e_pf_reset(hw);
10561 	if (ret) {
10562 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10563 		set_bit(__I40E_RESET_FAILED, pf->state);
10564 		clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10565 	} else {
10566 		pf->pfr_count++;
10567 	}
10568 	return ret;
10569 }
10570 
10571 /**
10572  * i40e_rebuild - rebuild using a saved config
10573  * @pf: board private structure
10574  * @reinit: if the Main VSI needs to re-initialized.
10575  * @lock_acquired: indicates whether or not the lock has been acquired
10576  * before this function was called.
10577  **/
10578 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10579 {
10580 	int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10581 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10582 	struct i40e_hw *hw = &pf->hw;
10583 	i40e_status ret;
10584 	u32 val;
10585 	int v;
10586 
10587 	if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10588 	    i40e_check_recovery_mode(pf)) {
10589 		i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10590 	}
10591 
10592 	if (test_bit(__I40E_DOWN, pf->state) &&
10593 	    !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10594 	    !old_recovery_mode_bit)
10595 		goto clear_recovery;
10596 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10597 
10598 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10599 	ret = i40e_init_adminq(&pf->hw);
10600 	if (ret) {
10601 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10602 			 i40e_stat_str(&pf->hw, ret),
10603 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10604 		goto clear_recovery;
10605 	}
10606 	i40e_get_oem_version(&pf->hw);
10607 
10608 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10609 		/* The following delay is necessary for firmware update. */
10610 		mdelay(1000);
10611 	}
10612 
10613 	/* re-verify the eeprom if we just had an EMP reset */
10614 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10615 		i40e_verify_eeprom(pf);
10616 
10617 	/* if we are going out of or into recovery mode we have to act
10618 	 * accordingly with regard to resources initialization
10619 	 * and deinitialization
10620 	 */
10621 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10622 	    old_recovery_mode_bit) {
10623 		if (i40e_get_capabilities(pf,
10624 					  i40e_aqc_opc_list_func_capabilities))
10625 			goto end_unlock;
10626 
10627 		if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10628 			/* we're staying in recovery mode so we'll reinitialize
10629 			 * misc vector here
10630 			 */
10631 			if (i40e_setup_misc_vector_for_recovery_mode(pf))
10632 				goto end_unlock;
10633 		} else {
10634 			if (!lock_acquired)
10635 				rtnl_lock();
10636 			/* we're going out of recovery mode so we'll free
10637 			 * the IRQ allocated specifically for recovery mode
10638 			 * and restore the interrupt scheme
10639 			 */
10640 			free_irq(pf->pdev->irq, pf);
10641 			i40e_clear_interrupt_scheme(pf);
10642 			if (i40e_restore_interrupt_scheme(pf))
10643 				goto end_unlock;
10644 		}
10645 
10646 		/* tell the firmware that we're starting */
10647 		i40e_send_version(pf);
10648 
10649 		/* bail out in case recovery mode was detected, as there is
10650 		 * no need for further configuration.
10651 		 */
10652 		goto end_unlock;
10653 	}
10654 
10655 	i40e_clear_pxe_mode(hw);
10656 	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10657 	if (ret)
10658 		goto end_core_reset;
10659 
10660 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10661 				hw->func_caps.num_rx_qp, 0, 0);
10662 	if (ret) {
10663 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10664 		goto end_core_reset;
10665 	}
10666 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10667 	if (ret) {
10668 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10669 		goto end_core_reset;
10670 	}
10671 
10672 #ifdef CONFIG_I40E_DCB
10673 	/* Enable FW to write a default DCB config on link-up
10674 	 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10675 	 * is not supported with new link speed
10676 	 */
10677 	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10678 		i40e_aq_set_dcb_parameters(hw, false, NULL);
10679 	} else {
10680 		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10681 		    (hw->phy.link_info.link_speed &
10682 		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10683 			i40e_aq_set_dcb_parameters(hw, false, NULL);
10684 			dev_warn(&pf->pdev->dev,
10685 				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10686 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10687 		} else {
10688 			i40e_aq_set_dcb_parameters(hw, true, NULL);
10689 			ret = i40e_init_pf_dcb(pf);
10690 			if (ret) {
10691 				dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10692 					 ret);
10693 				pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10694 				/* Continue without DCB enabled */
10695 			}
10696 		}
10697 	}
10698 
10699 #endif /* CONFIG_I40E_DCB */
10700 	if (!lock_acquired)
10701 		rtnl_lock();
10702 	ret = i40e_setup_pf_switch(pf, reinit, true);
10703 	if (ret)
10704 		goto end_unlock;
10705 
10706 	/* The driver only wants link up/down and module qualification
10707 	 * reports from firmware.  Note the negative logic.
10708 	 */
10709 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
10710 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
10711 					 I40E_AQ_EVENT_MEDIA_NA |
10712 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10713 	if (ret)
10714 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10715 			 i40e_stat_str(&pf->hw, ret),
10716 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10717 
10718 	/* Rebuild the VSIs and VEBs that existed before reset.
10719 	 * They are still in our local switch element arrays, so only
10720 	 * need to rebuild the switch model in the HW.
10721 	 *
10722 	 * If there were VEBs but the reconstitution failed, we'll try
10723 	 * to recover minimal use by getting the basic PF VSI working.
10724 	 */
10725 	if (vsi->uplink_seid != pf->mac_seid) {
10726 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10727 		/* find the one VEB connected to the MAC, and find orphans */
10728 		for (v = 0; v < I40E_MAX_VEB; v++) {
10729 			if (!pf->veb[v])
10730 				continue;
10731 
10732 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10733 			    pf->veb[v]->uplink_seid == 0) {
10734 				ret = i40e_reconstitute_veb(pf->veb[v]);
10735 
10736 				if (!ret)
10737 					continue;
10738 
10739 				/* If Main VEB failed, we're in deep doodoo,
10740 				 * so give up rebuilding the switch and set up
10741 				 * for minimal rebuild of PF VSI.
10742 				 * If orphan failed, we'll report the error
10743 				 * but try to keep going.
10744 				 */
10745 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10746 					dev_info(&pf->pdev->dev,
10747 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10748 						 ret);
10749 					vsi->uplink_seid = pf->mac_seid;
10750 					break;
10751 				} else if (pf->veb[v]->uplink_seid == 0) {
10752 					dev_info(&pf->pdev->dev,
10753 						 "rebuild of orphan VEB failed: %d\n",
10754 						 ret);
10755 				}
10756 			}
10757 		}
10758 	}
10759 
10760 	if (vsi->uplink_seid == pf->mac_seid) {
10761 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10762 		/* no VEB, so rebuild only the Main VSI */
10763 		ret = i40e_add_vsi(vsi);
10764 		if (ret) {
10765 			dev_info(&pf->pdev->dev,
10766 				 "rebuild of Main VSI failed: %d\n", ret);
10767 			goto end_unlock;
10768 		}
10769 	}
10770 
10771 	if (vsi->mqprio_qopt.max_rate[0]) {
10772 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10773 		u64 credits = 0;
10774 
10775 		do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10776 		ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10777 		if (ret)
10778 			goto end_unlock;
10779 
10780 		credits = max_tx_rate;
10781 		do_div(credits, I40E_BW_CREDIT_DIVISOR);
10782 		dev_dbg(&vsi->back->pdev->dev,
10783 			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10784 			max_tx_rate,
10785 			credits,
10786 			vsi->seid);
10787 	}
10788 
10789 	ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10790 	if (ret)
10791 		goto end_unlock;
10792 
10793 	/* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10794 	 * for this main VSI if they exist
10795 	 */
10796 	ret = i40e_rebuild_channels(vsi);
10797 	if (ret)
10798 		goto end_unlock;
10799 
10800 	/* Reconfigure hardware for allowing smaller MSS in the case
10801 	 * of TSO, so that we avoid the MDD being fired and causing
10802 	 * a reset in the case of small MSS+TSO.
10803 	 */
10804 #define I40E_REG_MSS          0x000E64DC
10805 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10806 #define I40E_64BYTE_MSS       0x400000
10807 	val = rd32(hw, I40E_REG_MSS);
10808 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10809 		val &= ~I40E_REG_MSS_MIN_MASK;
10810 		val |= I40E_64BYTE_MSS;
10811 		wr32(hw, I40E_REG_MSS, val);
10812 	}
10813 
10814 	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10815 		msleep(75);
10816 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10817 		if (ret)
10818 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10819 				 i40e_stat_str(&pf->hw, ret),
10820 				 i40e_aq_str(&pf->hw,
10821 					     pf->hw.aq.asq_last_status));
10822 	}
10823 	/* reinit the misc interrupt */
10824 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10825 		ret = i40e_setup_misc_vector(pf);
10826 
10827 	/* Add a filter to drop all Flow control frames from any VSI from being
10828 	 * transmitted. By doing so we stop a malicious VF from sending out
10829 	 * PAUSE or PFC frames and potentially controlling traffic for other
10830 	 * PF/VF VSIs.
10831 	 * The FW can still send Flow control frames if enabled.
10832 	 */
10833 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10834 						       pf->main_vsi_seid);
10835 
10836 	/* restart the VSIs that were rebuilt and running before the reset */
10837 	i40e_pf_unquiesce_all_vsi(pf);
10838 
10839 	/* Release the RTNL lock before we start resetting VFs */
10840 	if (!lock_acquired)
10841 		rtnl_unlock();
10842 
10843 	/* Restore promiscuous settings */
10844 	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10845 	if (ret)
10846 		dev_warn(&pf->pdev->dev,
10847 			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10848 			 pf->cur_promisc ? "on" : "off",
10849 			 i40e_stat_str(&pf->hw, ret),
10850 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10851 
10852 	i40e_reset_all_vfs(pf, true);
10853 
10854 	/* tell the firmware that we're starting */
10855 	i40e_send_version(pf);
10856 
10857 	/* We've already released the lock, so don't do it again */
10858 	goto end_core_reset;
10859 
10860 end_unlock:
10861 	if (!lock_acquired)
10862 		rtnl_unlock();
10863 end_core_reset:
10864 	clear_bit(__I40E_RESET_FAILED, pf->state);
10865 clear_recovery:
10866 	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10867 	clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10868 }
10869 
10870 /**
10871  * i40e_reset_and_rebuild - reset and rebuild using a saved config
10872  * @pf: board private structure
10873  * @reinit: if the Main VSI needs to re-initialized.
10874  * @lock_acquired: indicates whether or not the lock has been acquired
10875  * before this function was called.
10876  **/
10877 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10878 				   bool lock_acquired)
10879 {
10880 	int ret;
10881 
10882 	if (test_bit(__I40E_IN_REMOVE, pf->state))
10883 		return;
10884 	/* Now we wait for GRST to settle out.
10885 	 * We don't have to delete the VEBs or VSIs from the hw switch
10886 	 * because the reset will make them disappear.
10887 	 */
10888 	ret = i40e_reset(pf);
10889 	if (!ret)
10890 		i40e_rebuild(pf, reinit, lock_acquired);
10891 }
10892 
10893 /**
10894  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10895  * @pf: board private structure
10896  *
10897  * Close up the VFs and other things in prep for a Core Reset,
10898  * then get ready to rebuild the world.
10899  * @lock_acquired: indicates whether or not the lock has been acquired
10900  * before this function was called.
10901  **/
10902 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10903 {
10904 	i40e_prep_for_reset(pf);
10905 	i40e_reset_and_rebuild(pf, false, lock_acquired);
10906 }
10907 
10908 /**
10909  * i40e_handle_mdd_event
10910  * @pf: pointer to the PF structure
10911  *
10912  * Called from the MDD irq handler to identify possibly malicious vfs
10913  **/
10914 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10915 {
10916 	struct i40e_hw *hw = &pf->hw;
10917 	bool mdd_detected = false;
10918 	struct i40e_vf *vf;
10919 	u32 reg;
10920 	int i;
10921 
10922 	if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10923 		return;
10924 
10925 	/* find what triggered the MDD event */
10926 	reg = rd32(hw, I40E_GL_MDET_TX);
10927 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10928 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10929 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
10930 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10931 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
10932 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10933 				I40E_GL_MDET_TX_EVENT_SHIFT;
10934 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10935 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
10936 				pf->hw.func_caps.base_queue;
10937 		if (netif_msg_tx_err(pf))
10938 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10939 				 event, queue, pf_num, vf_num);
10940 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10941 		mdd_detected = true;
10942 	}
10943 	reg = rd32(hw, I40E_GL_MDET_RX);
10944 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10945 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10946 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
10947 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10948 				I40E_GL_MDET_RX_EVENT_SHIFT;
10949 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10950 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
10951 				pf->hw.func_caps.base_queue;
10952 		if (netif_msg_rx_err(pf))
10953 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10954 				 event, queue, func);
10955 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10956 		mdd_detected = true;
10957 	}
10958 
10959 	if (mdd_detected) {
10960 		reg = rd32(hw, I40E_PF_MDET_TX);
10961 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10962 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10963 			dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10964 		}
10965 		reg = rd32(hw, I40E_PF_MDET_RX);
10966 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10967 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10968 			dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10969 		}
10970 	}
10971 
10972 	/* see if one of the VFs needs its hand slapped */
10973 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10974 		vf = &(pf->vf[i]);
10975 		reg = rd32(hw, I40E_VP_MDET_TX(i));
10976 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10977 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10978 			vf->num_mdd_events++;
10979 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10980 				 i);
10981 			dev_info(&pf->pdev->dev,
10982 				 "Use PF Control I/F to re-enable the VF\n");
10983 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10984 		}
10985 
10986 		reg = rd32(hw, I40E_VP_MDET_RX(i));
10987 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10988 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10989 			vf->num_mdd_events++;
10990 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10991 				 i);
10992 			dev_info(&pf->pdev->dev,
10993 				 "Use PF Control I/F to re-enable the VF\n");
10994 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10995 		}
10996 	}
10997 
10998 	/* re-enable mdd interrupt cause */
10999 	clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11000 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11001 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11002 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11003 	i40e_flush(hw);
11004 }
11005 
11006 /**
11007  * i40e_service_task - Run the driver's async subtasks
11008  * @work: pointer to work_struct containing our data
11009  **/
11010 static void i40e_service_task(struct work_struct *work)
11011 {
11012 	struct i40e_pf *pf = container_of(work,
11013 					  struct i40e_pf,
11014 					  service_task);
11015 	unsigned long start_time = jiffies;
11016 
11017 	/* don't bother with service tasks if a reset is in progress */
11018 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11019 	    test_bit(__I40E_SUSPENDED, pf->state))
11020 		return;
11021 
11022 	if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11023 		return;
11024 
11025 	if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11026 		i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11027 		i40e_sync_filters_subtask(pf);
11028 		i40e_reset_subtask(pf);
11029 		i40e_handle_mdd_event(pf);
11030 		i40e_vc_process_vflr_event(pf);
11031 		i40e_watchdog_subtask(pf);
11032 		i40e_fdir_reinit_subtask(pf);
11033 		if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11034 			/* Client subtask will reopen next time through. */
11035 			i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11036 							   true);
11037 		} else {
11038 			i40e_client_subtask(pf);
11039 			if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11040 					       pf->state))
11041 				i40e_notify_client_of_l2_param_changes(
11042 								pf->vsi[pf->lan_vsi]);
11043 		}
11044 		i40e_sync_filters_subtask(pf);
11045 	} else {
11046 		i40e_reset_subtask(pf);
11047 	}
11048 
11049 	i40e_clean_adminq_subtask(pf);
11050 
11051 	/* flush memory to make sure state is correct before next watchdog */
11052 	smp_mb__before_atomic();
11053 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
11054 
11055 	/* If the tasks have taken longer than one timer cycle or there
11056 	 * is more work to be done, reschedule the service task now
11057 	 * rather than wait for the timer to tick again.
11058 	 */
11059 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11060 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state)		 ||
11061 	    test_bit(__I40E_MDD_EVENT_PENDING, pf->state)		 ||
11062 	    test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11063 		i40e_service_event_schedule(pf);
11064 }
11065 
11066 /**
11067  * i40e_service_timer - timer callback
11068  * @t: timer list pointer
11069  **/
11070 static void i40e_service_timer(struct timer_list *t)
11071 {
11072 	struct i40e_pf *pf = from_timer(pf, t, service_timer);
11073 
11074 	mod_timer(&pf->service_timer,
11075 		  round_jiffies(jiffies + pf->service_timer_period));
11076 	i40e_service_event_schedule(pf);
11077 }
11078 
11079 /**
11080  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11081  * @vsi: the VSI being configured
11082  **/
11083 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11084 {
11085 	struct i40e_pf *pf = vsi->back;
11086 
11087 	switch (vsi->type) {
11088 	case I40E_VSI_MAIN:
11089 		vsi->alloc_queue_pairs = pf->num_lan_qps;
11090 		if (!vsi->num_tx_desc)
11091 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11092 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11093 		if (!vsi->num_rx_desc)
11094 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11095 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11096 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11097 			vsi->num_q_vectors = pf->num_lan_msix;
11098 		else
11099 			vsi->num_q_vectors = 1;
11100 
11101 		break;
11102 
11103 	case I40E_VSI_FDIR:
11104 		vsi->alloc_queue_pairs = 1;
11105 		vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11106 					 I40E_REQ_DESCRIPTOR_MULTIPLE);
11107 		vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11108 					 I40E_REQ_DESCRIPTOR_MULTIPLE);
11109 		vsi->num_q_vectors = pf->num_fdsb_msix;
11110 		break;
11111 
11112 	case I40E_VSI_VMDQ2:
11113 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11114 		if (!vsi->num_tx_desc)
11115 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11116 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11117 		if (!vsi->num_rx_desc)
11118 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11119 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11120 		vsi->num_q_vectors = pf->num_vmdq_msix;
11121 		break;
11122 
11123 	case I40E_VSI_SRIOV:
11124 		vsi->alloc_queue_pairs = pf->num_vf_qps;
11125 		if (!vsi->num_tx_desc)
11126 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11127 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11128 		if (!vsi->num_rx_desc)
11129 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11130 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11131 		break;
11132 
11133 	default:
11134 		WARN_ON(1);
11135 		return -ENODATA;
11136 	}
11137 
11138 	if (is_kdump_kernel()) {
11139 		vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11140 		vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11141 	}
11142 
11143 	return 0;
11144 }
11145 
11146 /**
11147  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11148  * @vsi: VSI pointer
11149  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11150  *
11151  * On error: returns error code (negative)
11152  * On success: returns 0
11153  **/
11154 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11155 {
11156 	struct i40e_ring **next_rings;
11157 	int size;
11158 	int ret = 0;
11159 
11160 	/* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11161 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11162 	       (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11163 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11164 	if (!vsi->tx_rings)
11165 		return -ENOMEM;
11166 	next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11167 	if (i40e_enabled_xdp_vsi(vsi)) {
11168 		vsi->xdp_rings = next_rings;
11169 		next_rings += vsi->alloc_queue_pairs;
11170 	}
11171 	vsi->rx_rings = next_rings;
11172 
11173 	if (alloc_qvectors) {
11174 		/* allocate memory for q_vector pointers */
11175 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11176 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11177 		if (!vsi->q_vectors) {
11178 			ret = -ENOMEM;
11179 			goto err_vectors;
11180 		}
11181 	}
11182 	return ret;
11183 
11184 err_vectors:
11185 	kfree(vsi->tx_rings);
11186 	return ret;
11187 }
11188 
11189 /**
11190  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11191  * @pf: board private structure
11192  * @type: type of VSI
11193  *
11194  * On error: returns error code (negative)
11195  * On success: returns vsi index in PF (positive)
11196  **/
11197 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11198 {
11199 	int ret = -ENODEV;
11200 	struct i40e_vsi *vsi;
11201 	int vsi_idx;
11202 	int i;
11203 
11204 	/* Need to protect the allocation of the VSIs at the PF level */
11205 	mutex_lock(&pf->switch_mutex);
11206 
11207 	/* VSI list may be fragmented if VSI creation/destruction has
11208 	 * been happening.  We can afford to do a quick scan to look
11209 	 * for any free VSIs in the list.
11210 	 *
11211 	 * find next empty vsi slot, looping back around if necessary
11212 	 */
11213 	i = pf->next_vsi;
11214 	while (i < pf->num_alloc_vsi && pf->vsi[i])
11215 		i++;
11216 	if (i >= pf->num_alloc_vsi) {
11217 		i = 0;
11218 		while (i < pf->next_vsi && pf->vsi[i])
11219 			i++;
11220 	}
11221 
11222 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11223 		vsi_idx = i;             /* Found one! */
11224 	} else {
11225 		ret = -ENODEV;
11226 		goto unlock_pf;  /* out of VSI slots! */
11227 	}
11228 	pf->next_vsi = ++i;
11229 
11230 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11231 	if (!vsi) {
11232 		ret = -ENOMEM;
11233 		goto unlock_pf;
11234 	}
11235 	vsi->type = type;
11236 	vsi->back = pf;
11237 	set_bit(__I40E_VSI_DOWN, vsi->state);
11238 	vsi->flags = 0;
11239 	vsi->idx = vsi_idx;
11240 	vsi->int_rate_limit = 0;
11241 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11242 				pf->rss_table_size : 64;
11243 	vsi->netdev_registered = false;
11244 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11245 	hash_init(vsi->mac_filter_hash);
11246 	vsi->irqs_ready = false;
11247 
11248 	if (type == I40E_VSI_MAIN) {
11249 		vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11250 		if (!vsi->af_xdp_zc_qps)
11251 			goto err_rings;
11252 	}
11253 
11254 	ret = i40e_set_num_rings_in_vsi(vsi);
11255 	if (ret)
11256 		goto err_rings;
11257 
11258 	ret = i40e_vsi_alloc_arrays(vsi, true);
11259 	if (ret)
11260 		goto err_rings;
11261 
11262 	/* Setup default MSIX irq handler for VSI */
11263 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11264 
11265 	/* Initialize VSI lock */
11266 	spin_lock_init(&vsi->mac_filter_hash_lock);
11267 	pf->vsi[vsi_idx] = vsi;
11268 	ret = vsi_idx;
11269 	goto unlock_pf;
11270 
11271 err_rings:
11272 	bitmap_free(vsi->af_xdp_zc_qps);
11273 	pf->next_vsi = i - 1;
11274 	kfree(vsi);
11275 unlock_pf:
11276 	mutex_unlock(&pf->switch_mutex);
11277 	return ret;
11278 }
11279 
11280 /**
11281  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11282  * @vsi: VSI pointer
11283  * @free_qvectors: a bool to specify if q_vectors need to be freed.
11284  *
11285  * On error: returns error code (negative)
11286  * On success: returns 0
11287  **/
11288 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11289 {
11290 	/* free the ring and vector containers */
11291 	if (free_qvectors) {
11292 		kfree(vsi->q_vectors);
11293 		vsi->q_vectors = NULL;
11294 	}
11295 	kfree(vsi->tx_rings);
11296 	vsi->tx_rings = NULL;
11297 	vsi->rx_rings = NULL;
11298 	vsi->xdp_rings = NULL;
11299 }
11300 
11301 /**
11302  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11303  * and lookup table
11304  * @vsi: Pointer to VSI structure
11305  */
11306 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11307 {
11308 	if (!vsi)
11309 		return;
11310 
11311 	kfree(vsi->rss_hkey_user);
11312 	vsi->rss_hkey_user = NULL;
11313 
11314 	kfree(vsi->rss_lut_user);
11315 	vsi->rss_lut_user = NULL;
11316 }
11317 
11318 /**
11319  * i40e_vsi_clear - Deallocate the VSI provided
11320  * @vsi: the VSI being un-configured
11321  **/
11322 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11323 {
11324 	struct i40e_pf *pf;
11325 
11326 	if (!vsi)
11327 		return 0;
11328 
11329 	if (!vsi->back)
11330 		goto free_vsi;
11331 	pf = vsi->back;
11332 
11333 	mutex_lock(&pf->switch_mutex);
11334 	if (!pf->vsi[vsi->idx]) {
11335 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11336 			vsi->idx, vsi->idx, vsi->type);
11337 		goto unlock_vsi;
11338 	}
11339 
11340 	if (pf->vsi[vsi->idx] != vsi) {
11341 		dev_err(&pf->pdev->dev,
11342 			"pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11343 			pf->vsi[vsi->idx]->idx,
11344 			pf->vsi[vsi->idx]->type,
11345 			vsi->idx, vsi->type);
11346 		goto unlock_vsi;
11347 	}
11348 
11349 	/* updates the PF for this cleared vsi */
11350 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11351 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11352 
11353 	bitmap_free(vsi->af_xdp_zc_qps);
11354 	i40e_vsi_free_arrays(vsi, true);
11355 	i40e_clear_rss_config_user(vsi);
11356 
11357 	pf->vsi[vsi->idx] = NULL;
11358 	if (vsi->idx < pf->next_vsi)
11359 		pf->next_vsi = vsi->idx;
11360 
11361 unlock_vsi:
11362 	mutex_unlock(&pf->switch_mutex);
11363 free_vsi:
11364 	kfree(vsi);
11365 
11366 	return 0;
11367 }
11368 
11369 /**
11370  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11371  * @vsi: the VSI being cleaned
11372  **/
11373 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11374 {
11375 	int i;
11376 
11377 	if (vsi->tx_rings && vsi->tx_rings[0]) {
11378 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11379 			kfree_rcu(vsi->tx_rings[i], rcu);
11380 			WRITE_ONCE(vsi->tx_rings[i], NULL);
11381 			WRITE_ONCE(vsi->rx_rings[i], NULL);
11382 			if (vsi->xdp_rings)
11383 				WRITE_ONCE(vsi->xdp_rings[i], NULL);
11384 		}
11385 	}
11386 }
11387 
11388 /**
11389  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11390  * @vsi: the VSI being configured
11391  **/
11392 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11393 {
11394 	int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11395 	struct i40e_pf *pf = vsi->back;
11396 	struct i40e_ring *ring;
11397 
11398 	/* Set basic values in the rings to be used later during open() */
11399 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11400 		/* allocate space for both Tx and Rx in one shot */
11401 		ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11402 		if (!ring)
11403 			goto err_out;
11404 
11405 		ring->queue_index = i;
11406 		ring->reg_idx = vsi->base_queue + i;
11407 		ring->ring_active = false;
11408 		ring->vsi = vsi;
11409 		ring->netdev = vsi->netdev;
11410 		ring->dev = &pf->pdev->dev;
11411 		ring->count = vsi->num_tx_desc;
11412 		ring->size = 0;
11413 		ring->dcb_tc = 0;
11414 		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11415 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11416 		ring->itr_setting = pf->tx_itr_default;
11417 		WRITE_ONCE(vsi->tx_rings[i], ring++);
11418 
11419 		if (!i40e_enabled_xdp_vsi(vsi))
11420 			goto setup_rx;
11421 
11422 		ring->queue_index = vsi->alloc_queue_pairs + i;
11423 		ring->reg_idx = vsi->base_queue + ring->queue_index;
11424 		ring->ring_active = false;
11425 		ring->vsi = vsi;
11426 		ring->netdev = NULL;
11427 		ring->dev = &pf->pdev->dev;
11428 		ring->count = vsi->num_tx_desc;
11429 		ring->size = 0;
11430 		ring->dcb_tc = 0;
11431 		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11432 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11433 		set_ring_xdp(ring);
11434 		ring->itr_setting = pf->tx_itr_default;
11435 		WRITE_ONCE(vsi->xdp_rings[i], ring++);
11436 
11437 setup_rx:
11438 		ring->queue_index = i;
11439 		ring->reg_idx = vsi->base_queue + i;
11440 		ring->ring_active = false;
11441 		ring->vsi = vsi;
11442 		ring->netdev = vsi->netdev;
11443 		ring->dev = &pf->pdev->dev;
11444 		ring->count = vsi->num_rx_desc;
11445 		ring->size = 0;
11446 		ring->dcb_tc = 0;
11447 		ring->itr_setting = pf->rx_itr_default;
11448 		WRITE_ONCE(vsi->rx_rings[i], ring);
11449 	}
11450 
11451 	return 0;
11452 
11453 err_out:
11454 	i40e_vsi_clear_rings(vsi);
11455 	return -ENOMEM;
11456 }
11457 
11458 /**
11459  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11460  * @pf: board private structure
11461  * @vectors: the number of MSI-X vectors to request
11462  *
11463  * Returns the number of vectors reserved, or error
11464  **/
11465 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11466 {
11467 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11468 					I40E_MIN_MSIX, vectors);
11469 	if (vectors < 0) {
11470 		dev_info(&pf->pdev->dev,
11471 			 "MSI-X vector reservation failed: %d\n", vectors);
11472 		vectors = 0;
11473 	}
11474 
11475 	return vectors;
11476 }
11477 
11478 /**
11479  * i40e_init_msix - Setup the MSIX capability
11480  * @pf: board private structure
11481  *
11482  * Work with the OS to set up the MSIX vectors needed.
11483  *
11484  * Returns the number of vectors reserved or negative on failure
11485  **/
11486 static int i40e_init_msix(struct i40e_pf *pf)
11487 {
11488 	struct i40e_hw *hw = &pf->hw;
11489 	int cpus, extra_vectors;
11490 	int vectors_left;
11491 	int v_budget, i;
11492 	int v_actual;
11493 	int iwarp_requested = 0;
11494 
11495 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11496 		return -ENODEV;
11497 
11498 	/* The number of vectors we'll request will be comprised of:
11499 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
11500 	 *   - The number of LAN queue pairs
11501 	 *	- Queues being used for RSS.
11502 	 *		We don't need as many as max_rss_size vectors.
11503 	 *		use rss_size instead in the calculation since that
11504 	 *		is governed by number of cpus in the system.
11505 	 *	- assumes symmetric Tx/Rx pairing
11506 	 *   - The number of VMDq pairs
11507 	 *   - The CPU count within the NUMA node if iWARP is enabled
11508 	 * Once we count this up, try the request.
11509 	 *
11510 	 * If we can't get what we want, we'll simplify to nearly nothing
11511 	 * and try again.  If that still fails, we punt.
11512 	 */
11513 	vectors_left = hw->func_caps.num_msix_vectors;
11514 	v_budget = 0;
11515 
11516 	/* reserve one vector for miscellaneous handler */
11517 	if (vectors_left) {
11518 		v_budget++;
11519 		vectors_left--;
11520 	}
11521 
11522 	/* reserve some vectors for the main PF traffic queues. Initially we
11523 	 * only reserve at most 50% of the available vectors, in the case that
11524 	 * the number of online CPUs is large. This ensures that we can enable
11525 	 * extra features as well. Once we've enabled the other features, we
11526 	 * will use any remaining vectors to reach as close as we can to the
11527 	 * number of online CPUs.
11528 	 */
11529 	cpus = num_online_cpus();
11530 	pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11531 	vectors_left -= pf->num_lan_msix;
11532 
11533 	/* reserve one vector for sideband flow director */
11534 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11535 		if (vectors_left) {
11536 			pf->num_fdsb_msix = 1;
11537 			v_budget++;
11538 			vectors_left--;
11539 		} else {
11540 			pf->num_fdsb_msix = 0;
11541 		}
11542 	}
11543 
11544 	/* can we reserve enough for iWARP? */
11545 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11546 		iwarp_requested = pf->num_iwarp_msix;
11547 
11548 		if (!vectors_left)
11549 			pf->num_iwarp_msix = 0;
11550 		else if (vectors_left < pf->num_iwarp_msix)
11551 			pf->num_iwarp_msix = 1;
11552 		v_budget += pf->num_iwarp_msix;
11553 		vectors_left -= pf->num_iwarp_msix;
11554 	}
11555 
11556 	/* any vectors left over go for VMDq support */
11557 	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11558 		if (!vectors_left) {
11559 			pf->num_vmdq_msix = 0;
11560 			pf->num_vmdq_qps = 0;
11561 		} else {
11562 			int vmdq_vecs_wanted =
11563 				pf->num_vmdq_vsis * pf->num_vmdq_qps;
11564 			int vmdq_vecs =
11565 				min_t(int, vectors_left, vmdq_vecs_wanted);
11566 
11567 			/* if we're short on vectors for what's desired, we limit
11568 			 * the queues per vmdq.  If this is still more than are
11569 			 * available, the user will need to change the number of
11570 			 * queues/vectors used by the PF later with the ethtool
11571 			 * channels command
11572 			 */
11573 			if (vectors_left < vmdq_vecs_wanted) {
11574 				pf->num_vmdq_qps = 1;
11575 				vmdq_vecs_wanted = pf->num_vmdq_vsis;
11576 				vmdq_vecs = min_t(int,
11577 						  vectors_left,
11578 						  vmdq_vecs_wanted);
11579 			}
11580 			pf->num_vmdq_msix = pf->num_vmdq_qps;
11581 
11582 			v_budget += vmdq_vecs;
11583 			vectors_left -= vmdq_vecs;
11584 		}
11585 	}
11586 
11587 	/* On systems with a large number of SMP cores, we previously limited
11588 	 * the number of vectors for num_lan_msix to be at most 50% of the
11589 	 * available vectors, to allow for other features. Now, we add back
11590 	 * the remaining vectors. However, we ensure that the total
11591 	 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11592 	 * calculate the number of vectors we can add without going over the
11593 	 * cap of CPUs. For systems with a small number of CPUs this will be
11594 	 * zero.
11595 	 */
11596 	extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11597 	pf->num_lan_msix += extra_vectors;
11598 	vectors_left -= extra_vectors;
11599 
11600 	WARN(vectors_left < 0,
11601 	     "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11602 
11603 	v_budget += pf->num_lan_msix;
11604 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11605 				   GFP_KERNEL);
11606 	if (!pf->msix_entries)
11607 		return -ENOMEM;
11608 
11609 	for (i = 0; i < v_budget; i++)
11610 		pf->msix_entries[i].entry = i;
11611 	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11612 
11613 	if (v_actual < I40E_MIN_MSIX) {
11614 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11615 		kfree(pf->msix_entries);
11616 		pf->msix_entries = NULL;
11617 		pci_disable_msix(pf->pdev);
11618 		return -ENODEV;
11619 
11620 	} else if (v_actual == I40E_MIN_MSIX) {
11621 		/* Adjust for minimal MSIX use */
11622 		pf->num_vmdq_vsis = 0;
11623 		pf->num_vmdq_qps = 0;
11624 		pf->num_lan_qps = 1;
11625 		pf->num_lan_msix = 1;
11626 
11627 	} else if (v_actual != v_budget) {
11628 		/* If we have limited resources, we will start with no vectors
11629 		 * for the special features and then allocate vectors to some
11630 		 * of these features based on the policy and at the end disable
11631 		 * the features that did not get any vectors.
11632 		 */
11633 		int vec;
11634 
11635 		dev_info(&pf->pdev->dev,
11636 			 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11637 			 v_actual, v_budget);
11638 		/* reserve the misc vector */
11639 		vec = v_actual - 1;
11640 
11641 		/* Scale vector usage down */
11642 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
11643 		pf->num_vmdq_vsis = 1;
11644 		pf->num_vmdq_qps = 1;
11645 
11646 		/* partition out the remaining vectors */
11647 		switch (vec) {
11648 		case 2:
11649 			pf->num_lan_msix = 1;
11650 			break;
11651 		case 3:
11652 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11653 				pf->num_lan_msix = 1;
11654 				pf->num_iwarp_msix = 1;
11655 			} else {
11656 				pf->num_lan_msix = 2;
11657 			}
11658 			break;
11659 		default:
11660 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11661 				pf->num_iwarp_msix = min_t(int, (vec / 3),
11662 						 iwarp_requested);
11663 				pf->num_vmdq_vsis = min_t(int, (vec / 3),
11664 						  I40E_DEFAULT_NUM_VMDQ_VSI);
11665 			} else {
11666 				pf->num_vmdq_vsis = min_t(int, (vec / 2),
11667 						  I40E_DEFAULT_NUM_VMDQ_VSI);
11668 			}
11669 			if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11670 				pf->num_fdsb_msix = 1;
11671 				vec--;
11672 			}
11673 			pf->num_lan_msix = min_t(int,
11674 			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11675 							      pf->num_lan_msix);
11676 			pf->num_lan_qps = pf->num_lan_msix;
11677 			break;
11678 		}
11679 	}
11680 
11681 	if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11682 	    (pf->num_fdsb_msix == 0)) {
11683 		dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11684 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11685 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11686 	}
11687 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11688 	    (pf->num_vmdq_msix == 0)) {
11689 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11690 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11691 	}
11692 
11693 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11694 	    (pf->num_iwarp_msix == 0)) {
11695 		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11696 		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11697 	}
11698 	i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11699 		   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11700 		   pf->num_lan_msix,
11701 		   pf->num_vmdq_msix * pf->num_vmdq_vsis,
11702 		   pf->num_fdsb_msix,
11703 		   pf->num_iwarp_msix);
11704 
11705 	return v_actual;
11706 }
11707 
11708 /**
11709  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11710  * @vsi: the VSI being configured
11711  * @v_idx: index of the vector in the vsi struct
11712  *
11713  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
11714  **/
11715 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11716 {
11717 	struct i40e_q_vector *q_vector;
11718 
11719 	/* allocate q_vector */
11720 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11721 	if (!q_vector)
11722 		return -ENOMEM;
11723 
11724 	q_vector->vsi = vsi;
11725 	q_vector->v_idx = v_idx;
11726 	cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11727 
11728 	if (vsi->netdev)
11729 		netif_napi_add(vsi->netdev, &q_vector->napi,
11730 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
11731 
11732 	/* tie q_vector and vsi together */
11733 	vsi->q_vectors[v_idx] = q_vector;
11734 
11735 	return 0;
11736 }
11737 
11738 /**
11739  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11740  * @vsi: the VSI being configured
11741  *
11742  * We allocate one q_vector per queue interrupt.  If allocation fails we
11743  * return -ENOMEM.
11744  **/
11745 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11746 {
11747 	struct i40e_pf *pf = vsi->back;
11748 	int err, v_idx, num_q_vectors;
11749 
11750 	/* if not MSIX, give the one vector only to the LAN VSI */
11751 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11752 		num_q_vectors = vsi->num_q_vectors;
11753 	else if (vsi == pf->vsi[pf->lan_vsi])
11754 		num_q_vectors = 1;
11755 	else
11756 		return -EINVAL;
11757 
11758 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11759 		err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11760 		if (err)
11761 			goto err_out;
11762 	}
11763 
11764 	return 0;
11765 
11766 err_out:
11767 	while (v_idx--)
11768 		i40e_free_q_vector(vsi, v_idx);
11769 
11770 	return err;
11771 }
11772 
11773 /**
11774  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11775  * @pf: board private structure to initialize
11776  **/
11777 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11778 {
11779 	int vectors = 0;
11780 	ssize_t size;
11781 
11782 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11783 		vectors = i40e_init_msix(pf);
11784 		if (vectors < 0) {
11785 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
11786 				       I40E_FLAG_IWARP_ENABLED	|
11787 				       I40E_FLAG_RSS_ENABLED	|
11788 				       I40E_FLAG_DCB_CAPABLE	|
11789 				       I40E_FLAG_DCB_ENABLED	|
11790 				       I40E_FLAG_SRIOV_ENABLED	|
11791 				       I40E_FLAG_FD_SB_ENABLED	|
11792 				       I40E_FLAG_FD_ATR_ENABLED	|
11793 				       I40E_FLAG_VMDQ_ENABLED);
11794 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11795 
11796 			/* rework the queue expectations without MSIX */
11797 			i40e_determine_queue_usage(pf);
11798 		}
11799 	}
11800 
11801 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11802 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11803 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11804 		vectors = pci_enable_msi(pf->pdev);
11805 		if (vectors < 0) {
11806 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11807 				 vectors);
11808 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11809 		}
11810 		vectors = 1;  /* one MSI or Legacy vector */
11811 	}
11812 
11813 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11814 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11815 
11816 	/* set up vector assignment tracking */
11817 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11818 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
11819 	if (!pf->irq_pile)
11820 		return -ENOMEM;
11821 
11822 	pf->irq_pile->num_entries = vectors;
11823 
11824 	/* track first vector for misc interrupts, ignore return */
11825 	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11826 
11827 	return 0;
11828 }
11829 
11830 /**
11831  * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11832  * @pf: private board data structure
11833  *
11834  * Restore the interrupt scheme that was cleared when we suspended the
11835  * device. This should be called during resume to re-allocate the q_vectors
11836  * and reacquire IRQs.
11837  */
11838 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11839 {
11840 	int err, i;
11841 
11842 	/* We cleared the MSI and MSI-X flags when disabling the old interrupt
11843 	 * scheme. We need to re-enabled them here in order to attempt to
11844 	 * re-acquire the MSI or MSI-X vectors
11845 	 */
11846 	pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11847 
11848 	err = i40e_init_interrupt_scheme(pf);
11849 	if (err)
11850 		return err;
11851 
11852 	/* Now that we've re-acquired IRQs, we need to remap the vectors and
11853 	 * rings together again.
11854 	 */
11855 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11856 		if (pf->vsi[i]) {
11857 			err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11858 			if (err)
11859 				goto err_unwind;
11860 			i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11861 		}
11862 	}
11863 
11864 	err = i40e_setup_misc_vector(pf);
11865 	if (err)
11866 		goto err_unwind;
11867 
11868 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11869 		i40e_client_update_msix_info(pf);
11870 
11871 	return 0;
11872 
11873 err_unwind:
11874 	while (i--) {
11875 		if (pf->vsi[i])
11876 			i40e_vsi_free_q_vectors(pf->vsi[i]);
11877 	}
11878 
11879 	return err;
11880 }
11881 
11882 /**
11883  * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11884  * non queue events in recovery mode
11885  * @pf: board private structure
11886  *
11887  * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11888  * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11889  * This is handled differently than in recovery mode since no Tx/Rx resources
11890  * are being allocated.
11891  **/
11892 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11893 {
11894 	int err;
11895 
11896 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11897 		err = i40e_setup_misc_vector(pf);
11898 
11899 		if (err) {
11900 			dev_info(&pf->pdev->dev,
11901 				 "MSI-X misc vector request failed, error %d\n",
11902 				 err);
11903 			return err;
11904 		}
11905 	} else {
11906 		u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11907 
11908 		err = request_irq(pf->pdev->irq, i40e_intr, flags,
11909 				  pf->int_name, pf);
11910 
11911 		if (err) {
11912 			dev_info(&pf->pdev->dev,
11913 				 "MSI/legacy misc vector request failed, error %d\n",
11914 				 err);
11915 			return err;
11916 		}
11917 		i40e_enable_misc_int_causes(pf);
11918 		i40e_irq_dynamic_enable_icr0(pf);
11919 	}
11920 
11921 	return 0;
11922 }
11923 
11924 /**
11925  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11926  * @pf: board private structure
11927  *
11928  * This sets up the handler for MSIX 0, which is used to manage the
11929  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
11930  * when in MSI or Legacy interrupt mode.
11931  **/
11932 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11933 {
11934 	struct i40e_hw *hw = &pf->hw;
11935 	int err = 0;
11936 
11937 	/* Only request the IRQ once, the first time through. */
11938 	if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11939 		err = request_irq(pf->msix_entries[0].vector,
11940 				  i40e_intr, 0, pf->int_name, pf);
11941 		if (err) {
11942 			clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11943 			dev_info(&pf->pdev->dev,
11944 				 "request_irq for %s failed: %d\n",
11945 				 pf->int_name, err);
11946 			return -EFAULT;
11947 		}
11948 	}
11949 
11950 	i40e_enable_misc_int_causes(pf);
11951 
11952 	/* associate no queues to the misc vector */
11953 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11954 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11955 
11956 	i40e_flush(hw);
11957 
11958 	i40e_irq_dynamic_enable_icr0(pf);
11959 
11960 	return err;
11961 }
11962 
11963 /**
11964  * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11965  * @vsi: Pointer to vsi structure
11966  * @seed: Buffter to store the hash keys
11967  * @lut: Buffer to store the lookup table entries
11968  * @lut_size: Size of buffer to store the lookup table entries
11969  *
11970  * Return 0 on success, negative on failure
11971  */
11972 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11973 			   u8 *lut, u16 lut_size)
11974 {
11975 	struct i40e_pf *pf = vsi->back;
11976 	struct i40e_hw *hw = &pf->hw;
11977 	int ret = 0;
11978 
11979 	if (seed) {
11980 		ret = i40e_aq_get_rss_key(hw, vsi->id,
11981 			(struct i40e_aqc_get_set_rss_key_data *)seed);
11982 		if (ret) {
11983 			dev_info(&pf->pdev->dev,
11984 				 "Cannot get RSS key, err %s aq_err %s\n",
11985 				 i40e_stat_str(&pf->hw, ret),
11986 				 i40e_aq_str(&pf->hw,
11987 					     pf->hw.aq.asq_last_status));
11988 			return ret;
11989 		}
11990 	}
11991 
11992 	if (lut) {
11993 		bool pf_lut = vsi->type == I40E_VSI_MAIN;
11994 
11995 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11996 		if (ret) {
11997 			dev_info(&pf->pdev->dev,
11998 				 "Cannot get RSS lut, err %s aq_err %s\n",
11999 				 i40e_stat_str(&pf->hw, ret),
12000 				 i40e_aq_str(&pf->hw,
12001 					     pf->hw.aq.asq_last_status));
12002 			return ret;
12003 		}
12004 	}
12005 
12006 	return ret;
12007 }
12008 
12009 /**
12010  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12011  * @vsi: Pointer to vsi structure
12012  * @seed: RSS hash seed
12013  * @lut: Lookup table
12014  * @lut_size: Lookup table size
12015  *
12016  * Returns 0 on success, negative on failure
12017  **/
12018 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12019 			       const u8 *lut, u16 lut_size)
12020 {
12021 	struct i40e_pf *pf = vsi->back;
12022 	struct i40e_hw *hw = &pf->hw;
12023 	u16 vf_id = vsi->vf_id;
12024 	u8 i;
12025 
12026 	/* Fill out hash function seed */
12027 	if (seed) {
12028 		u32 *seed_dw = (u32 *)seed;
12029 
12030 		if (vsi->type == I40E_VSI_MAIN) {
12031 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12032 				wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12033 		} else if (vsi->type == I40E_VSI_SRIOV) {
12034 			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12035 				wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12036 		} else {
12037 			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12038 		}
12039 	}
12040 
12041 	if (lut) {
12042 		u32 *lut_dw = (u32 *)lut;
12043 
12044 		if (vsi->type == I40E_VSI_MAIN) {
12045 			if (lut_size != I40E_HLUT_ARRAY_SIZE)
12046 				return -EINVAL;
12047 			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12048 				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12049 		} else if (vsi->type == I40E_VSI_SRIOV) {
12050 			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12051 				return -EINVAL;
12052 			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12053 				wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12054 		} else {
12055 			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12056 		}
12057 	}
12058 	i40e_flush(hw);
12059 
12060 	return 0;
12061 }
12062 
12063 /**
12064  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12065  * @vsi: Pointer to VSI structure
12066  * @seed: Buffer to store the keys
12067  * @lut: Buffer to store the lookup table entries
12068  * @lut_size: Size of buffer to store the lookup table entries
12069  *
12070  * Returns 0 on success, negative on failure
12071  */
12072 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12073 			    u8 *lut, u16 lut_size)
12074 {
12075 	struct i40e_pf *pf = vsi->back;
12076 	struct i40e_hw *hw = &pf->hw;
12077 	u16 i;
12078 
12079 	if (seed) {
12080 		u32 *seed_dw = (u32 *)seed;
12081 
12082 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12083 			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12084 	}
12085 	if (lut) {
12086 		u32 *lut_dw = (u32 *)lut;
12087 
12088 		if (lut_size != I40E_HLUT_ARRAY_SIZE)
12089 			return -EINVAL;
12090 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12091 			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12092 	}
12093 
12094 	return 0;
12095 }
12096 
12097 /**
12098  * i40e_config_rss - Configure RSS keys and lut
12099  * @vsi: Pointer to VSI structure
12100  * @seed: RSS hash seed
12101  * @lut: Lookup table
12102  * @lut_size: Lookup table size
12103  *
12104  * Returns 0 on success, negative on failure
12105  */
12106 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12107 {
12108 	struct i40e_pf *pf = vsi->back;
12109 
12110 	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12111 		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12112 	else
12113 		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12114 }
12115 
12116 /**
12117  * i40e_get_rss - Get RSS keys and lut
12118  * @vsi: Pointer to VSI structure
12119  * @seed: Buffer to store the keys
12120  * @lut: Buffer to store the lookup table entries
12121  * @lut_size: Size of buffer to store the lookup table entries
12122  *
12123  * Returns 0 on success, negative on failure
12124  */
12125 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12126 {
12127 	struct i40e_pf *pf = vsi->back;
12128 
12129 	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12130 		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12131 	else
12132 		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12133 }
12134 
12135 /**
12136  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12137  * @pf: Pointer to board private structure
12138  * @lut: Lookup table
12139  * @rss_table_size: Lookup table size
12140  * @rss_size: Range of queue number for hashing
12141  */
12142 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12143 		       u16 rss_table_size, u16 rss_size)
12144 {
12145 	u16 i;
12146 
12147 	for (i = 0; i < rss_table_size; i++)
12148 		lut[i] = i % rss_size;
12149 }
12150 
12151 /**
12152  * i40e_pf_config_rss - Prepare for RSS if used
12153  * @pf: board private structure
12154  **/
12155 static int i40e_pf_config_rss(struct i40e_pf *pf)
12156 {
12157 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12158 	u8 seed[I40E_HKEY_ARRAY_SIZE];
12159 	u8 *lut;
12160 	struct i40e_hw *hw = &pf->hw;
12161 	u32 reg_val;
12162 	u64 hena;
12163 	int ret;
12164 
12165 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12166 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12167 		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12168 	hena |= i40e_pf_get_default_rss_hena(pf);
12169 
12170 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12171 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12172 
12173 	/* Determine the RSS table size based on the hardware capabilities */
12174 	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12175 	reg_val = (pf->rss_table_size == 512) ?
12176 			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12177 			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12178 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12179 
12180 	/* Determine the RSS size of the VSI */
12181 	if (!vsi->rss_size) {
12182 		u16 qcount;
12183 		/* If the firmware does something weird during VSI init, we
12184 		 * could end up with zero TCs. Check for that to avoid
12185 		 * divide-by-zero. It probably won't pass traffic, but it also
12186 		 * won't panic.
12187 		 */
12188 		qcount = vsi->num_queue_pairs /
12189 			 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12190 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12191 	}
12192 	if (!vsi->rss_size)
12193 		return -EINVAL;
12194 
12195 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12196 	if (!lut)
12197 		return -ENOMEM;
12198 
12199 	/* Use user configured lut if there is one, otherwise use default */
12200 	if (vsi->rss_lut_user)
12201 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12202 	else
12203 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12204 
12205 	/* Use user configured hash key if there is one, otherwise
12206 	 * use default.
12207 	 */
12208 	if (vsi->rss_hkey_user)
12209 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12210 	else
12211 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12212 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12213 	kfree(lut);
12214 
12215 	return ret;
12216 }
12217 
12218 /**
12219  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12220  * @pf: board private structure
12221  * @queue_count: the requested queue count for rss.
12222  *
12223  * returns 0 if rss is not enabled, if enabled returns the final rss queue
12224  * count which may be different from the requested queue count.
12225  * Note: expects to be called while under rtnl_lock()
12226  **/
12227 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12228 {
12229 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12230 	int new_rss_size;
12231 
12232 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12233 		return 0;
12234 
12235 	queue_count = min_t(int, queue_count, num_online_cpus());
12236 	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12237 
12238 	if (queue_count != vsi->num_queue_pairs) {
12239 		u16 qcount;
12240 
12241 		vsi->req_queue_pairs = queue_count;
12242 		i40e_prep_for_reset(pf);
12243 		if (test_bit(__I40E_IN_REMOVE, pf->state))
12244 			return pf->alloc_rss_size;
12245 
12246 		pf->alloc_rss_size = new_rss_size;
12247 
12248 		i40e_reset_and_rebuild(pf, true, true);
12249 
12250 		/* Discard the user configured hash keys and lut, if less
12251 		 * queues are enabled.
12252 		 */
12253 		if (queue_count < vsi->rss_size) {
12254 			i40e_clear_rss_config_user(vsi);
12255 			dev_dbg(&pf->pdev->dev,
12256 				"discard user configured hash keys and lut\n");
12257 		}
12258 
12259 		/* Reset vsi->rss_size, as number of enabled queues changed */
12260 		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12261 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12262 
12263 		i40e_pf_config_rss(pf);
12264 	}
12265 	dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count:  %d/%d\n",
12266 		 vsi->req_queue_pairs, pf->rss_size_max);
12267 	return pf->alloc_rss_size;
12268 }
12269 
12270 /**
12271  * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12272  * @pf: board private structure
12273  **/
12274 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12275 {
12276 	i40e_status status;
12277 	bool min_valid, max_valid;
12278 	u32 max_bw, min_bw;
12279 
12280 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12281 					   &min_valid, &max_valid);
12282 
12283 	if (!status) {
12284 		if (min_valid)
12285 			pf->min_bw = min_bw;
12286 		if (max_valid)
12287 			pf->max_bw = max_bw;
12288 	}
12289 
12290 	return status;
12291 }
12292 
12293 /**
12294  * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12295  * @pf: board private structure
12296  **/
12297 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12298 {
12299 	struct i40e_aqc_configure_partition_bw_data bw_data;
12300 	i40e_status status;
12301 
12302 	memset(&bw_data, 0, sizeof(bw_data));
12303 
12304 	/* Set the valid bit for this PF */
12305 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12306 	bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12307 	bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12308 
12309 	/* Set the new bandwidths */
12310 	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12311 
12312 	return status;
12313 }
12314 
12315 /**
12316  * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12317  * @pf: board private structure
12318  **/
12319 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12320 {
12321 	/* Commit temporary BW setting to permanent NVM image */
12322 	enum i40e_admin_queue_err last_aq_status;
12323 	i40e_status ret;
12324 	u16 nvm_word;
12325 
12326 	if (pf->hw.partition_id != 1) {
12327 		dev_info(&pf->pdev->dev,
12328 			 "Commit BW only works on partition 1! This is partition %d",
12329 			 pf->hw.partition_id);
12330 		ret = I40E_NOT_SUPPORTED;
12331 		goto bw_commit_out;
12332 	}
12333 
12334 	/* Acquire NVM for read access */
12335 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12336 	last_aq_status = pf->hw.aq.asq_last_status;
12337 	if (ret) {
12338 		dev_info(&pf->pdev->dev,
12339 			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12340 			 i40e_stat_str(&pf->hw, ret),
12341 			 i40e_aq_str(&pf->hw, last_aq_status));
12342 		goto bw_commit_out;
12343 	}
12344 
12345 	/* Read word 0x10 of NVM - SW compatibility word 1 */
12346 	ret = i40e_aq_read_nvm(&pf->hw,
12347 			       I40E_SR_NVM_CONTROL_WORD,
12348 			       0x10, sizeof(nvm_word), &nvm_word,
12349 			       false, NULL);
12350 	/* Save off last admin queue command status before releasing
12351 	 * the NVM
12352 	 */
12353 	last_aq_status = pf->hw.aq.asq_last_status;
12354 	i40e_release_nvm(&pf->hw);
12355 	if (ret) {
12356 		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12357 			 i40e_stat_str(&pf->hw, ret),
12358 			 i40e_aq_str(&pf->hw, last_aq_status));
12359 		goto bw_commit_out;
12360 	}
12361 
12362 	/* Wait a bit for NVM release to complete */
12363 	msleep(50);
12364 
12365 	/* Acquire NVM for write access */
12366 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12367 	last_aq_status = pf->hw.aq.asq_last_status;
12368 	if (ret) {
12369 		dev_info(&pf->pdev->dev,
12370 			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12371 			 i40e_stat_str(&pf->hw, ret),
12372 			 i40e_aq_str(&pf->hw, last_aq_status));
12373 		goto bw_commit_out;
12374 	}
12375 	/* Write it back out unchanged to initiate update NVM,
12376 	 * which will force a write of the shadow (alt) RAM to
12377 	 * the NVM - thus storing the bandwidth values permanently.
12378 	 */
12379 	ret = i40e_aq_update_nvm(&pf->hw,
12380 				 I40E_SR_NVM_CONTROL_WORD,
12381 				 0x10, sizeof(nvm_word),
12382 				 &nvm_word, true, 0, NULL);
12383 	/* Save off last admin queue command status before releasing
12384 	 * the NVM
12385 	 */
12386 	last_aq_status = pf->hw.aq.asq_last_status;
12387 	i40e_release_nvm(&pf->hw);
12388 	if (ret)
12389 		dev_info(&pf->pdev->dev,
12390 			 "BW settings NOT SAVED, err %s aq_err %s\n",
12391 			 i40e_stat_str(&pf->hw, ret),
12392 			 i40e_aq_str(&pf->hw, last_aq_status));
12393 bw_commit_out:
12394 
12395 	return ret;
12396 }
12397 
12398 /**
12399  * i40e_is_total_port_shutdown_enabled - read NVM and return value
12400  * if total port shutdown feature is enabled for this PF
12401  * @pf: board private structure
12402  **/
12403 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12404 {
12405 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED	BIT(4)
12406 #define I40E_FEATURES_ENABLE_PTR		0x2A
12407 #define I40E_CURRENT_SETTING_PTR		0x2B
12408 #define I40E_LINK_BEHAVIOR_WORD_OFFSET		0x2D
12409 #define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1
12410 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)
12411 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4
12412 	i40e_status read_status = I40E_SUCCESS;
12413 	u16 sr_emp_sr_settings_ptr = 0;
12414 	u16 features_enable = 0;
12415 	u16 link_behavior = 0;
12416 	bool ret = false;
12417 
12418 	read_status = i40e_read_nvm_word(&pf->hw,
12419 					 I40E_SR_EMP_SR_SETTINGS_PTR,
12420 					 &sr_emp_sr_settings_ptr);
12421 	if (read_status)
12422 		goto err_nvm;
12423 	read_status = i40e_read_nvm_word(&pf->hw,
12424 					 sr_emp_sr_settings_ptr +
12425 					 I40E_FEATURES_ENABLE_PTR,
12426 					 &features_enable);
12427 	if (read_status)
12428 		goto err_nvm;
12429 	if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12430 		read_status = i40e_read_nvm_module_data(&pf->hw,
12431 							I40E_SR_EMP_SR_SETTINGS_PTR,
12432 							I40E_CURRENT_SETTING_PTR,
12433 							I40E_LINK_BEHAVIOR_WORD_OFFSET,
12434 							I40E_LINK_BEHAVIOR_WORD_LENGTH,
12435 							&link_behavior);
12436 		if (read_status)
12437 			goto err_nvm;
12438 		link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12439 		ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12440 	}
12441 	return ret;
12442 
12443 err_nvm:
12444 	dev_warn(&pf->pdev->dev,
12445 		 "total-port-shutdown feature is off due to read nvm error: %s\n",
12446 		 i40e_stat_str(&pf->hw, read_status));
12447 	return ret;
12448 }
12449 
12450 /**
12451  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12452  * @pf: board private structure to initialize
12453  *
12454  * i40e_sw_init initializes the Adapter private data structure.
12455  * Fields are initialized based on PCI device information and
12456  * OS network device settings (MTU size).
12457  **/
12458 static int i40e_sw_init(struct i40e_pf *pf)
12459 {
12460 	int err = 0;
12461 	int size;
12462 	u16 pow;
12463 
12464 	/* Set default capability flags */
12465 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12466 		    I40E_FLAG_MSI_ENABLED     |
12467 		    I40E_FLAG_MSIX_ENABLED;
12468 
12469 	/* Set default ITR */
12470 	pf->rx_itr_default = I40E_ITR_RX_DEF;
12471 	pf->tx_itr_default = I40E_ITR_TX_DEF;
12472 
12473 	/* Depending on PF configurations, it is possible that the RSS
12474 	 * maximum might end up larger than the available queues
12475 	 */
12476 	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12477 	pf->alloc_rss_size = 1;
12478 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12479 	pf->rss_size_max = min_t(int, pf->rss_size_max,
12480 				 pf->hw.func_caps.num_tx_qp);
12481 
12482 	/* find the next higher power-of-2 of num cpus */
12483 	pow = roundup_pow_of_two(num_online_cpus());
12484 	pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12485 
12486 	if (pf->hw.func_caps.rss) {
12487 		pf->flags |= I40E_FLAG_RSS_ENABLED;
12488 		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12489 					   num_online_cpus());
12490 	}
12491 
12492 	/* MFP mode enabled */
12493 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12494 		pf->flags |= I40E_FLAG_MFP_ENABLED;
12495 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12496 		if (i40e_get_partition_bw_setting(pf)) {
12497 			dev_warn(&pf->pdev->dev,
12498 				 "Could not get partition bw settings\n");
12499 		} else {
12500 			dev_info(&pf->pdev->dev,
12501 				 "Partition BW Min = %8.8x, Max = %8.8x\n",
12502 				 pf->min_bw, pf->max_bw);
12503 
12504 			/* nudge the Tx scheduler */
12505 			i40e_set_partition_bw_setting(pf);
12506 		}
12507 	}
12508 
12509 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12510 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12511 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12512 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12513 		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12514 		    pf->hw.num_partitions > 1)
12515 			dev_info(&pf->pdev->dev,
12516 				 "Flow Director Sideband mode Disabled in MFP mode\n");
12517 		else
12518 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12519 		pf->fdir_pf_filter_count =
12520 				 pf->hw.func_caps.fd_filters_guaranteed;
12521 		pf->hw.fdir_shared_filter_count =
12522 				 pf->hw.func_caps.fd_filters_best_effort;
12523 	}
12524 
12525 	if (pf->hw.mac.type == I40E_MAC_X722) {
12526 		pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12527 				    I40E_HW_128_QP_RSS_CAPABLE |
12528 				    I40E_HW_ATR_EVICT_CAPABLE |
12529 				    I40E_HW_WB_ON_ITR_CAPABLE |
12530 				    I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12531 				    I40E_HW_NO_PCI_LINK_CHECK |
12532 				    I40E_HW_USE_SET_LLDP_MIB |
12533 				    I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12534 				    I40E_HW_PTP_L4_CAPABLE |
12535 				    I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12536 				    I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12537 
12538 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12539 		if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12540 		    I40E_FDEVICT_PCTYPE_DEFAULT) {
12541 			dev_warn(&pf->pdev->dev,
12542 				 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12543 			pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12544 		}
12545 	} else if ((pf->hw.aq.api_maj_ver > 1) ||
12546 		   ((pf->hw.aq.api_maj_ver == 1) &&
12547 		    (pf->hw.aq.api_min_ver > 4))) {
12548 		/* Supported in FW API version higher than 1.4 */
12549 		pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12550 	}
12551 
12552 	/* Enable HW ATR eviction if possible */
12553 	if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12554 		pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12555 
12556 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12557 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12558 	    (pf->hw.aq.fw_maj_ver < 4))) {
12559 		pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12560 		/* No DCB support  for FW < v4.33 */
12561 		pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12562 	}
12563 
12564 	/* Disable FW LLDP if FW < v4.3 */
12565 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12566 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12567 	    (pf->hw.aq.fw_maj_ver < 4)))
12568 		pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12569 
12570 	/* Use the FW Set LLDP MIB API if FW > v4.40 */
12571 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12572 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12573 	    (pf->hw.aq.fw_maj_ver >= 5)))
12574 		pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12575 
12576 	/* Enable PTP L4 if FW > v6.0 */
12577 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
12578 	    pf->hw.aq.fw_maj_ver >= 6)
12579 		pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12580 
12581 	if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12582 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12583 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12584 		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12585 	}
12586 
12587 	if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12588 		pf->flags |= I40E_FLAG_IWARP_ENABLED;
12589 		/* IWARP needs one extra vector for CQP just like MISC.*/
12590 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12591 	}
12592 	/* Stopping FW LLDP engine is supported on XL710 and X722
12593 	 * starting from FW versions determined in i40e_init_adminq.
12594 	 * Stopping the FW LLDP engine is not supported on XL710
12595 	 * if NPAR is functioning so unset this hw flag in this case.
12596 	 */
12597 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
12598 	    pf->hw.func_caps.npar_enable &&
12599 	    (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12600 		pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12601 
12602 #ifdef CONFIG_PCI_IOV
12603 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12604 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12605 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12606 		pf->num_req_vfs = min_t(int,
12607 					pf->hw.func_caps.num_vfs,
12608 					I40E_MAX_VF_COUNT);
12609 	}
12610 #endif /* CONFIG_PCI_IOV */
12611 	pf->eeprom_version = 0xDEAD;
12612 	pf->lan_veb = I40E_NO_VEB;
12613 	pf->lan_vsi = I40E_NO_VSI;
12614 
12615 	/* By default FW has this off for performance reasons */
12616 	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12617 
12618 	/* set up queue assignment tracking */
12619 	size = sizeof(struct i40e_lump_tracking)
12620 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12621 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
12622 	if (!pf->qp_pile) {
12623 		err = -ENOMEM;
12624 		goto sw_init_done;
12625 	}
12626 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12627 
12628 	pf->tx_timeout_recovery_level = 1;
12629 
12630 	if (pf->hw.mac.type != I40E_MAC_X722 &&
12631 	    i40e_is_total_port_shutdown_enabled(pf)) {
12632 		/* Link down on close must be on when total port shutdown
12633 		 * is enabled for a given port
12634 		 */
12635 		pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12636 			      I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12637 		dev_info(&pf->pdev->dev,
12638 			 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12639 	}
12640 	mutex_init(&pf->switch_mutex);
12641 
12642 sw_init_done:
12643 	return err;
12644 }
12645 
12646 /**
12647  * i40e_set_ntuple - set the ntuple feature flag and take action
12648  * @pf: board private structure to initialize
12649  * @features: the feature set that the stack is suggesting
12650  *
12651  * returns a bool to indicate if reset needs to happen
12652  **/
12653 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12654 {
12655 	bool need_reset = false;
12656 
12657 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
12658 	 * the state changed, we need to reset.
12659 	 */
12660 	if (features & NETIF_F_NTUPLE) {
12661 		/* Enable filters and mark for reset */
12662 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12663 			need_reset = true;
12664 		/* enable FD_SB only if there is MSI-X vector and no cloud
12665 		 * filters exist
12666 		 */
12667 		if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12668 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12669 			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12670 		}
12671 	} else {
12672 		/* turn off filters, mark for reset and clear SW filter list */
12673 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12674 			need_reset = true;
12675 			i40e_fdir_filter_exit(pf);
12676 		}
12677 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12678 		clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12679 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12680 
12681 		/* reset fd counters */
12682 		pf->fd_add_err = 0;
12683 		pf->fd_atr_cnt = 0;
12684 		/* if ATR was auto disabled it can be re-enabled. */
12685 		if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12686 			if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12687 			    (I40E_DEBUG_FD & pf->hw.debug_mask))
12688 				dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12689 	}
12690 	return need_reset;
12691 }
12692 
12693 /**
12694  * i40e_clear_rss_lut - clear the rx hash lookup table
12695  * @vsi: the VSI being configured
12696  **/
12697 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12698 {
12699 	struct i40e_pf *pf = vsi->back;
12700 	struct i40e_hw *hw = &pf->hw;
12701 	u16 vf_id = vsi->vf_id;
12702 	u8 i;
12703 
12704 	if (vsi->type == I40E_VSI_MAIN) {
12705 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12706 			wr32(hw, I40E_PFQF_HLUT(i), 0);
12707 	} else if (vsi->type == I40E_VSI_SRIOV) {
12708 		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12709 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12710 	} else {
12711 		dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12712 	}
12713 }
12714 
12715 /**
12716  * i40e_set_features - set the netdev feature flags
12717  * @netdev: ptr to the netdev being adjusted
12718  * @features: the feature set that the stack is suggesting
12719  * Note: expects to be called while under rtnl_lock()
12720  **/
12721 static int i40e_set_features(struct net_device *netdev,
12722 			     netdev_features_t features)
12723 {
12724 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12725 	struct i40e_vsi *vsi = np->vsi;
12726 	struct i40e_pf *pf = vsi->back;
12727 	bool need_reset;
12728 
12729 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12730 		i40e_pf_config_rss(pf);
12731 	else if (!(features & NETIF_F_RXHASH) &&
12732 		 netdev->features & NETIF_F_RXHASH)
12733 		i40e_clear_rss_lut(vsi);
12734 
12735 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
12736 		i40e_vlan_stripping_enable(vsi);
12737 	else
12738 		i40e_vlan_stripping_disable(vsi);
12739 
12740 	if (!(features & NETIF_F_HW_TC) &&
12741 	    (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12742 		dev_err(&pf->pdev->dev,
12743 			"Offloaded tc filters active, can't turn hw_tc_offload off");
12744 		return -EINVAL;
12745 	}
12746 
12747 	if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12748 		i40e_del_all_macvlans(vsi);
12749 
12750 	need_reset = i40e_set_ntuple(pf, features);
12751 
12752 	if (need_reset)
12753 		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12754 
12755 	return 0;
12756 }
12757 
12758 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12759 				    unsigned int table, unsigned int idx,
12760 				    struct udp_tunnel_info *ti)
12761 {
12762 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12763 	struct i40e_hw *hw = &np->vsi->back->hw;
12764 	u8 type, filter_index;
12765 	i40e_status ret;
12766 
12767 	type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12768 						   I40E_AQC_TUNNEL_TYPE_NGE;
12769 
12770 	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12771 				     NULL);
12772 	if (ret) {
12773 		netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12774 			    i40e_stat_str(hw, ret),
12775 			    i40e_aq_str(hw, hw->aq.asq_last_status));
12776 		return -EIO;
12777 	}
12778 
12779 	udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12780 	return 0;
12781 }
12782 
12783 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12784 				      unsigned int table, unsigned int idx,
12785 				      struct udp_tunnel_info *ti)
12786 {
12787 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12788 	struct i40e_hw *hw = &np->vsi->back->hw;
12789 	i40e_status ret;
12790 
12791 	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12792 	if (ret) {
12793 		netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12794 			    i40e_stat_str(hw, ret),
12795 			    i40e_aq_str(hw, hw->aq.asq_last_status));
12796 		return -EIO;
12797 	}
12798 
12799 	return 0;
12800 }
12801 
12802 static int i40e_get_phys_port_id(struct net_device *netdev,
12803 				 struct netdev_phys_item_id *ppid)
12804 {
12805 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12806 	struct i40e_pf *pf = np->vsi->back;
12807 	struct i40e_hw *hw = &pf->hw;
12808 
12809 	if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12810 		return -EOPNOTSUPP;
12811 
12812 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12813 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12814 
12815 	return 0;
12816 }
12817 
12818 /**
12819  * i40e_ndo_fdb_add - add an entry to the hardware database
12820  * @ndm: the input from the stack
12821  * @tb: pointer to array of nladdr (unused)
12822  * @dev: the net device pointer
12823  * @addr: the MAC address entry being added
12824  * @vid: VLAN ID
12825  * @flags: instructions from stack about fdb operation
12826  * @extack: netlink extended ack, unused currently
12827  */
12828 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12829 			    struct net_device *dev,
12830 			    const unsigned char *addr, u16 vid,
12831 			    u16 flags,
12832 			    struct netlink_ext_ack *extack)
12833 {
12834 	struct i40e_netdev_priv *np = netdev_priv(dev);
12835 	struct i40e_pf *pf = np->vsi->back;
12836 	int err = 0;
12837 
12838 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12839 		return -EOPNOTSUPP;
12840 
12841 	if (vid) {
12842 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12843 		return -EINVAL;
12844 	}
12845 
12846 	/* Hardware does not support aging addresses so if a
12847 	 * ndm_state is given only allow permanent addresses
12848 	 */
12849 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12850 		netdev_info(dev, "FDB only supports static addresses\n");
12851 		return -EINVAL;
12852 	}
12853 
12854 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12855 		err = dev_uc_add_excl(dev, addr);
12856 	else if (is_multicast_ether_addr(addr))
12857 		err = dev_mc_add_excl(dev, addr);
12858 	else
12859 		err = -EINVAL;
12860 
12861 	/* Only return duplicate errors if NLM_F_EXCL is set */
12862 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
12863 		err = 0;
12864 
12865 	return err;
12866 }
12867 
12868 /**
12869  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12870  * @dev: the netdev being configured
12871  * @nlh: RTNL message
12872  * @flags: bridge flags
12873  * @extack: netlink extended ack
12874  *
12875  * Inserts a new hardware bridge if not already created and
12876  * enables the bridging mode requested (VEB or VEPA). If the
12877  * hardware bridge has already been inserted and the request
12878  * is to change the mode then that requires a PF reset to
12879  * allow rebuild of the components with required hardware
12880  * bridge mode enabled.
12881  *
12882  * Note: expects to be called while under rtnl_lock()
12883  **/
12884 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12885 				   struct nlmsghdr *nlh,
12886 				   u16 flags,
12887 				   struct netlink_ext_ack *extack)
12888 {
12889 	struct i40e_netdev_priv *np = netdev_priv(dev);
12890 	struct i40e_vsi *vsi = np->vsi;
12891 	struct i40e_pf *pf = vsi->back;
12892 	struct i40e_veb *veb = NULL;
12893 	struct nlattr *attr, *br_spec;
12894 	int i, rem;
12895 
12896 	/* Only for PF VSI for now */
12897 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12898 		return -EOPNOTSUPP;
12899 
12900 	/* Find the HW bridge for PF VSI */
12901 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12902 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12903 			veb = pf->veb[i];
12904 	}
12905 
12906 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12907 
12908 	nla_for_each_nested(attr, br_spec, rem) {
12909 		__u16 mode;
12910 
12911 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
12912 			continue;
12913 
12914 		mode = nla_get_u16(attr);
12915 		if ((mode != BRIDGE_MODE_VEPA) &&
12916 		    (mode != BRIDGE_MODE_VEB))
12917 			return -EINVAL;
12918 
12919 		/* Insert a new HW bridge */
12920 		if (!veb) {
12921 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12922 					     vsi->tc_config.enabled_tc);
12923 			if (veb) {
12924 				veb->bridge_mode = mode;
12925 				i40e_config_bridge_mode(veb);
12926 			} else {
12927 				/* No Bridge HW offload available */
12928 				return -ENOENT;
12929 			}
12930 			break;
12931 		} else if (mode != veb->bridge_mode) {
12932 			/* Existing HW bridge but different mode needs reset */
12933 			veb->bridge_mode = mode;
12934 			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12935 			if (mode == BRIDGE_MODE_VEB)
12936 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12937 			else
12938 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12939 			i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12940 			break;
12941 		}
12942 	}
12943 
12944 	return 0;
12945 }
12946 
12947 /**
12948  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12949  * @skb: skb buff
12950  * @pid: process id
12951  * @seq: RTNL message seq #
12952  * @dev: the netdev being configured
12953  * @filter_mask: unused
12954  * @nlflags: netlink flags passed in
12955  *
12956  * Return the mode in which the hardware bridge is operating in
12957  * i.e VEB or VEPA.
12958  **/
12959 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12960 				   struct net_device *dev,
12961 				   u32 __always_unused filter_mask,
12962 				   int nlflags)
12963 {
12964 	struct i40e_netdev_priv *np = netdev_priv(dev);
12965 	struct i40e_vsi *vsi = np->vsi;
12966 	struct i40e_pf *pf = vsi->back;
12967 	struct i40e_veb *veb = NULL;
12968 	int i;
12969 
12970 	/* Only for PF VSI for now */
12971 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12972 		return -EOPNOTSUPP;
12973 
12974 	/* Find the HW bridge for the PF VSI */
12975 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12976 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12977 			veb = pf->veb[i];
12978 	}
12979 
12980 	if (!veb)
12981 		return 0;
12982 
12983 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12984 				       0, 0, nlflags, filter_mask, NULL);
12985 }
12986 
12987 /**
12988  * i40e_features_check - Validate encapsulated packet conforms to limits
12989  * @skb: skb buff
12990  * @dev: This physical port's netdev
12991  * @features: Offload features that the stack believes apply
12992  **/
12993 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12994 					     struct net_device *dev,
12995 					     netdev_features_t features)
12996 {
12997 	size_t len;
12998 
12999 	/* No point in doing any of this if neither checksum nor GSO are
13000 	 * being requested for this frame.  We can rule out both by just
13001 	 * checking for CHECKSUM_PARTIAL
13002 	 */
13003 	if (skb->ip_summed != CHECKSUM_PARTIAL)
13004 		return features;
13005 
13006 	/* We cannot support GSO if the MSS is going to be less than
13007 	 * 64 bytes.  If it is then we need to drop support for GSO.
13008 	 */
13009 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13010 		features &= ~NETIF_F_GSO_MASK;
13011 
13012 	/* MACLEN can support at most 63 words */
13013 	len = skb_network_header(skb) - skb->data;
13014 	if (len & ~(63 * 2))
13015 		goto out_err;
13016 
13017 	/* IPLEN and EIPLEN can support at most 127 dwords */
13018 	len = skb_transport_header(skb) - skb_network_header(skb);
13019 	if (len & ~(127 * 4))
13020 		goto out_err;
13021 
13022 	if (skb->encapsulation) {
13023 		/* L4TUNLEN can support 127 words */
13024 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
13025 		if (len & ~(127 * 2))
13026 			goto out_err;
13027 
13028 		/* IPLEN can support at most 127 dwords */
13029 		len = skb_inner_transport_header(skb) -
13030 		      skb_inner_network_header(skb);
13031 		if (len & ~(127 * 4))
13032 			goto out_err;
13033 	}
13034 
13035 	/* No need to validate L4LEN as TCP is the only protocol with a
13036 	 * a flexible value and we support all possible values supported
13037 	 * by TCP, which is at most 15 dwords
13038 	 */
13039 
13040 	return features;
13041 out_err:
13042 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13043 }
13044 
13045 /**
13046  * i40e_xdp_setup - add/remove an XDP program
13047  * @vsi: VSI to changed
13048  * @prog: XDP program
13049  * @extack: netlink extended ack
13050  **/
13051 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13052 			  struct netlink_ext_ack *extack)
13053 {
13054 	int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13055 	struct i40e_pf *pf = vsi->back;
13056 	struct bpf_prog *old_prog;
13057 	bool need_reset;
13058 	int i;
13059 
13060 	/* Don't allow frames that span over multiple buffers */
13061 	if (frame_size > vsi->rx_buf_len) {
13062 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13063 		return -EINVAL;
13064 	}
13065 
13066 	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
13067 	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13068 
13069 	if (need_reset)
13070 		i40e_prep_for_reset(pf);
13071 
13072 	/* VSI shall be deleted in a moment, just return EINVAL */
13073 	if (test_bit(__I40E_IN_REMOVE, pf->state))
13074 		return -EINVAL;
13075 
13076 	old_prog = xchg(&vsi->xdp_prog, prog);
13077 
13078 	if (need_reset) {
13079 		if (!prog)
13080 			/* Wait until ndo_xsk_wakeup completes. */
13081 			synchronize_rcu();
13082 		i40e_reset_and_rebuild(pf, true, true);
13083 	}
13084 
13085 	for (i = 0; i < vsi->num_queue_pairs; i++)
13086 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13087 
13088 	if (old_prog)
13089 		bpf_prog_put(old_prog);
13090 
13091 	/* Kick start the NAPI context if there is an AF_XDP socket open
13092 	 * on that queue id. This so that receiving will start.
13093 	 */
13094 	if (need_reset && prog)
13095 		for (i = 0; i < vsi->num_queue_pairs; i++)
13096 			if (vsi->xdp_rings[i]->xsk_pool)
13097 				(void)i40e_xsk_wakeup(vsi->netdev, i,
13098 						      XDP_WAKEUP_RX);
13099 
13100 	return 0;
13101 }
13102 
13103 /**
13104  * i40e_enter_busy_conf - Enters busy config state
13105  * @vsi: vsi
13106  *
13107  * Returns 0 on success, <0 for failure.
13108  **/
13109 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13110 {
13111 	struct i40e_pf *pf = vsi->back;
13112 	int timeout = 50;
13113 
13114 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13115 		timeout--;
13116 		if (!timeout)
13117 			return -EBUSY;
13118 		usleep_range(1000, 2000);
13119 	}
13120 
13121 	return 0;
13122 }
13123 
13124 /**
13125  * i40e_exit_busy_conf - Exits busy config state
13126  * @vsi: vsi
13127  **/
13128 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13129 {
13130 	struct i40e_pf *pf = vsi->back;
13131 
13132 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
13133 }
13134 
13135 /**
13136  * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13137  * @vsi: vsi
13138  * @queue_pair: queue pair
13139  **/
13140 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13141 {
13142 	memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13143 	       sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13144 	memset(&vsi->tx_rings[queue_pair]->stats, 0,
13145 	       sizeof(vsi->tx_rings[queue_pair]->stats));
13146 	if (i40e_enabled_xdp_vsi(vsi)) {
13147 		memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13148 		       sizeof(vsi->xdp_rings[queue_pair]->stats));
13149 	}
13150 }
13151 
13152 /**
13153  * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13154  * @vsi: vsi
13155  * @queue_pair: queue pair
13156  **/
13157 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13158 {
13159 	i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13160 	if (i40e_enabled_xdp_vsi(vsi)) {
13161 		/* Make sure that in-progress ndo_xdp_xmit calls are
13162 		 * completed.
13163 		 */
13164 		synchronize_rcu();
13165 		i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13166 	}
13167 	i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13168 }
13169 
13170 /**
13171  * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13172  * @vsi: vsi
13173  * @queue_pair: queue pair
13174  * @enable: true for enable, false for disable
13175  **/
13176 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13177 					bool enable)
13178 {
13179 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13180 	struct i40e_q_vector *q_vector = rxr->q_vector;
13181 
13182 	if (!vsi->netdev)
13183 		return;
13184 
13185 	/* All rings in a qp belong to the same qvector. */
13186 	if (q_vector->rx.ring || q_vector->tx.ring) {
13187 		if (enable)
13188 			napi_enable(&q_vector->napi);
13189 		else
13190 			napi_disable(&q_vector->napi);
13191 	}
13192 }
13193 
13194 /**
13195  * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13196  * @vsi: vsi
13197  * @queue_pair: queue pair
13198  * @enable: true for enable, false for disable
13199  *
13200  * Returns 0 on success, <0 on failure.
13201  **/
13202 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13203 					bool enable)
13204 {
13205 	struct i40e_pf *pf = vsi->back;
13206 	int pf_q, ret = 0;
13207 
13208 	pf_q = vsi->base_queue + queue_pair;
13209 	ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13210 				     false /*is xdp*/, enable);
13211 	if (ret) {
13212 		dev_info(&pf->pdev->dev,
13213 			 "VSI seid %d Tx ring %d %sable timeout\n",
13214 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13215 		return ret;
13216 	}
13217 
13218 	i40e_control_rx_q(pf, pf_q, enable);
13219 	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13220 	if (ret) {
13221 		dev_info(&pf->pdev->dev,
13222 			 "VSI seid %d Rx ring %d %sable timeout\n",
13223 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13224 		return ret;
13225 	}
13226 
13227 	/* Due to HW errata, on Rx disable only, the register can
13228 	 * indicate done before it really is. Needs 50ms to be sure
13229 	 */
13230 	if (!enable)
13231 		mdelay(50);
13232 
13233 	if (!i40e_enabled_xdp_vsi(vsi))
13234 		return ret;
13235 
13236 	ret = i40e_control_wait_tx_q(vsi->seid, pf,
13237 				     pf_q + vsi->alloc_queue_pairs,
13238 				     true /*is xdp*/, enable);
13239 	if (ret) {
13240 		dev_info(&pf->pdev->dev,
13241 			 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13242 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13243 	}
13244 
13245 	return ret;
13246 }
13247 
13248 /**
13249  * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13250  * @vsi: vsi
13251  * @queue_pair: queue_pair
13252  **/
13253 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13254 {
13255 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13256 	struct i40e_pf *pf = vsi->back;
13257 	struct i40e_hw *hw = &pf->hw;
13258 
13259 	/* All rings in a qp belong to the same qvector. */
13260 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13261 		i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13262 	else
13263 		i40e_irq_dynamic_enable_icr0(pf);
13264 
13265 	i40e_flush(hw);
13266 }
13267 
13268 /**
13269  * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13270  * @vsi: vsi
13271  * @queue_pair: queue_pair
13272  **/
13273 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13274 {
13275 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13276 	struct i40e_pf *pf = vsi->back;
13277 	struct i40e_hw *hw = &pf->hw;
13278 
13279 	/* For simplicity, instead of removing the qp interrupt causes
13280 	 * from the interrupt linked list, we simply disable the interrupt, and
13281 	 * leave the list intact.
13282 	 *
13283 	 * All rings in a qp belong to the same qvector.
13284 	 */
13285 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13286 		u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13287 
13288 		wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13289 		i40e_flush(hw);
13290 		synchronize_irq(pf->msix_entries[intpf].vector);
13291 	} else {
13292 		/* Legacy and MSI mode - this stops all interrupt handling */
13293 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13294 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13295 		i40e_flush(hw);
13296 		synchronize_irq(pf->pdev->irq);
13297 	}
13298 }
13299 
13300 /**
13301  * i40e_queue_pair_disable - Disables a queue pair
13302  * @vsi: vsi
13303  * @queue_pair: queue pair
13304  *
13305  * Returns 0 on success, <0 on failure.
13306  **/
13307 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13308 {
13309 	int err;
13310 
13311 	err = i40e_enter_busy_conf(vsi);
13312 	if (err)
13313 		return err;
13314 
13315 	i40e_queue_pair_disable_irq(vsi, queue_pair);
13316 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13317 	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13318 	i40e_queue_pair_clean_rings(vsi, queue_pair);
13319 	i40e_queue_pair_reset_stats(vsi, queue_pair);
13320 
13321 	return err;
13322 }
13323 
13324 /**
13325  * i40e_queue_pair_enable - Enables a queue pair
13326  * @vsi: vsi
13327  * @queue_pair: queue pair
13328  *
13329  * Returns 0 on success, <0 on failure.
13330  **/
13331 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13332 {
13333 	int err;
13334 
13335 	err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13336 	if (err)
13337 		return err;
13338 
13339 	if (i40e_enabled_xdp_vsi(vsi)) {
13340 		err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13341 		if (err)
13342 			return err;
13343 	}
13344 
13345 	err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13346 	if (err)
13347 		return err;
13348 
13349 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13350 	i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13351 	i40e_queue_pair_enable_irq(vsi, queue_pair);
13352 
13353 	i40e_exit_busy_conf(vsi);
13354 
13355 	return err;
13356 }
13357 
13358 /**
13359  * i40e_xdp - implements ndo_bpf for i40e
13360  * @dev: netdevice
13361  * @xdp: XDP command
13362  **/
13363 static int i40e_xdp(struct net_device *dev,
13364 		    struct netdev_bpf *xdp)
13365 {
13366 	struct i40e_netdev_priv *np = netdev_priv(dev);
13367 	struct i40e_vsi *vsi = np->vsi;
13368 
13369 	if (vsi->type != I40E_VSI_MAIN)
13370 		return -EINVAL;
13371 
13372 	switch (xdp->command) {
13373 	case XDP_SETUP_PROG:
13374 		return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13375 	case XDP_SETUP_XSK_POOL:
13376 		return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13377 					   xdp->xsk.queue_id);
13378 	default:
13379 		return -EINVAL;
13380 	}
13381 }
13382 
13383 static const struct net_device_ops i40e_netdev_ops = {
13384 	.ndo_open		= i40e_open,
13385 	.ndo_stop		= i40e_close,
13386 	.ndo_start_xmit		= i40e_lan_xmit_frame,
13387 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
13388 	.ndo_set_rx_mode	= i40e_set_rx_mode,
13389 	.ndo_validate_addr	= eth_validate_addr,
13390 	.ndo_set_mac_address	= i40e_set_mac,
13391 	.ndo_change_mtu		= i40e_change_mtu,
13392 	.ndo_eth_ioctl		= i40e_ioctl,
13393 	.ndo_tx_timeout		= i40e_tx_timeout,
13394 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
13395 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
13396 #ifdef CONFIG_NET_POLL_CONTROLLER
13397 	.ndo_poll_controller	= i40e_netpoll,
13398 #endif
13399 	.ndo_setup_tc		= __i40e_setup_tc,
13400 	.ndo_select_queue	= i40e_lan_select_queue,
13401 	.ndo_set_features	= i40e_set_features,
13402 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
13403 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
13404 	.ndo_get_vf_stats	= i40e_get_vf_stats,
13405 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
13406 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
13407 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
13408 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
13409 	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
13410 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
13411 	.ndo_fdb_add		= i40e_ndo_fdb_add,
13412 	.ndo_features_check	= i40e_features_check,
13413 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
13414 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
13415 	.ndo_bpf		= i40e_xdp,
13416 	.ndo_xdp_xmit		= i40e_xdp_xmit,
13417 	.ndo_xsk_wakeup	        = i40e_xsk_wakeup,
13418 	.ndo_dfwd_add_station	= i40e_fwd_add,
13419 	.ndo_dfwd_del_station	= i40e_fwd_del,
13420 };
13421 
13422 /**
13423  * i40e_config_netdev - Setup the netdev flags
13424  * @vsi: the VSI being configured
13425  *
13426  * Returns 0 on success, negative value on failure
13427  **/
13428 static int i40e_config_netdev(struct i40e_vsi *vsi)
13429 {
13430 	struct i40e_pf *pf = vsi->back;
13431 	struct i40e_hw *hw = &pf->hw;
13432 	struct i40e_netdev_priv *np;
13433 	struct net_device *netdev;
13434 	u8 broadcast[ETH_ALEN];
13435 	u8 mac_addr[ETH_ALEN];
13436 	int etherdev_size;
13437 	netdev_features_t hw_enc_features;
13438 	netdev_features_t hw_features;
13439 
13440 	etherdev_size = sizeof(struct i40e_netdev_priv);
13441 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13442 	if (!netdev)
13443 		return -ENOMEM;
13444 
13445 	vsi->netdev = netdev;
13446 	np = netdev_priv(netdev);
13447 	np->vsi = vsi;
13448 
13449 	hw_enc_features = NETIF_F_SG			|
13450 			  NETIF_F_HW_CSUM		|
13451 			  NETIF_F_HIGHDMA		|
13452 			  NETIF_F_SOFT_FEATURES		|
13453 			  NETIF_F_TSO			|
13454 			  NETIF_F_TSO_ECN		|
13455 			  NETIF_F_TSO6			|
13456 			  NETIF_F_GSO_GRE		|
13457 			  NETIF_F_GSO_GRE_CSUM		|
13458 			  NETIF_F_GSO_PARTIAL		|
13459 			  NETIF_F_GSO_IPXIP4		|
13460 			  NETIF_F_GSO_IPXIP6		|
13461 			  NETIF_F_GSO_UDP_TUNNEL	|
13462 			  NETIF_F_GSO_UDP_TUNNEL_CSUM	|
13463 			  NETIF_F_GSO_UDP_L4		|
13464 			  NETIF_F_SCTP_CRC		|
13465 			  NETIF_F_RXHASH		|
13466 			  NETIF_F_RXCSUM		|
13467 			  0;
13468 
13469 	if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13470 		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13471 
13472 	netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13473 
13474 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13475 
13476 	netdev->hw_enc_features |= hw_enc_features;
13477 
13478 	/* record features VLANs can make use of */
13479 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13480 
13481 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE |		\
13482 				   NETIF_F_GSO_GRE_CSUM |	\
13483 				   NETIF_F_GSO_IPXIP4 |		\
13484 				   NETIF_F_GSO_IPXIP6 |		\
13485 				   NETIF_F_GSO_UDP_TUNNEL |	\
13486 				   NETIF_F_GSO_UDP_TUNNEL_CSUM)
13487 
13488 	netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13489 	netdev->features |= NETIF_F_GSO_PARTIAL |
13490 			    I40E_GSO_PARTIAL_FEATURES;
13491 
13492 	netdev->mpls_features |= NETIF_F_SG;
13493 	netdev->mpls_features |= NETIF_F_HW_CSUM;
13494 	netdev->mpls_features |= NETIF_F_TSO;
13495 	netdev->mpls_features |= NETIF_F_TSO6;
13496 	netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13497 
13498 	/* enable macvlan offloads */
13499 	netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13500 
13501 	hw_features = hw_enc_features		|
13502 		      NETIF_F_HW_VLAN_CTAG_TX	|
13503 		      NETIF_F_HW_VLAN_CTAG_RX;
13504 
13505 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13506 		hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13507 
13508 	netdev->hw_features |= hw_features;
13509 
13510 	netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13511 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13512 
13513 	netdev->features &= ~NETIF_F_HW_TC;
13514 
13515 	if (vsi->type == I40E_VSI_MAIN) {
13516 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13517 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
13518 		/* The following steps are necessary for two reasons. First,
13519 		 * some older NVM configurations load a default MAC-VLAN
13520 		 * filter that will accept any tagged packet, and we want to
13521 		 * replace this with a normal filter. Additionally, it is
13522 		 * possible our MAC address was provided by the platform using
13523 		 * Open Firmware or similar.
13524 		 *
13525 		 * Thus, we need to remove the default filter and install one
13526 		 * specific to the MAC address.
13527 		 */
13528 		i40e_rm_default_mac_filter(vsi, mac_addr);
13529 		spin_lock_bh(&vsi->mac_filter_hash_lock);
13530 		i40e_add_mac_filter(vsi, mac_addr);
13531 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
13532 	} else {
13533 		/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13534 		 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13535 		 * the end, which is 4 bytes long, so force truncation of the
13536 		 * original name by IFNAMSIZ - 4
13537 		 */
13538 		snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13539 			 IFNAMSIZ - 4,
13540 			 pf->vsi[pf->lan_vsi]->netdev->name);
13541 		eth_random_addr(mac_addr);
13542 
13543 		spin_lock_bh(&vsi->mac_filter_hash_lock);
13544 		i40e_add_mac_filter(vsi, mac_addr);
13545 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
13546 	}
13547 
13548 	/* Add the broadcast filter so that we initially will receive
13549 	 * broadcast packets. Note that when a new VLAN is first added the
13550 	 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13551 	 * specific filters as part of transitioning into "vlan" operation.
13552 	 * When more VLANs are added, the driver will copy each existing MAC
13553 	 * filter and add it for the new VLAN.
13554 	 *
13555 	 * Broadcast filters are handled specially by
13556 	 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13557 	 * promiscuous bit instead of adding this directly as a MAC/VLAN
13558 	 * filter. The subtask will update the correct broadcast promiscuous
13559 	 * bits as VLANs become active or inactive.
13560 	 */
13561 	eth_broadcast_addr(broadcast);
13562 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13563 	i40e_add_mac_filter(vsi, broadcast);
13564 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13565 
13566 	eth_hw_addr_set(netdev, mac_addr);
13567 	ether_addr_copy(netdev->perm_addr, mac_addr);
13568 
13569 	/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13570 	netdev->neigh_priv_len = sizeof(u32) * 4;
13571 
13572 	netdev->priv_flags |= IFF_UNICAST_FLT;
13573 	netdev->priv_flags |= IFF_SUPP_NOFCS;
13574 	/* Setup netdev TC information */
13575 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13576 
13577 	netdev->netdev_ops = &i40e_netdev_ops;
13578 	netdev->watchdog_timeo = 5 * HZ;
13579 	i40e_set_ethtool_ops(netdev);
13580 
13581 	/* MTU range: 68 - 9706 */
13582 	netdev->min_mtu = ETH_MIN_MTU;
13583 	netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13584 
13585 	return 0;
13586 }
13587 
13588 /**
13589  * i40e_vsi_delete - Delete a VSI from the switch
13590  * @vsi: the VSI being removed
13591  *
13592  * Returns 0 on success, negative value on failure
13593  **/
13594 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13595 {
13596 	/* remove default VSI is not allowed */
13597 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13598 		return;
13599 
13600 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13601 }
13602 
13603 /**
13604  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13605  * @vsi: the VSI being queried
13606  *
13607  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13608  **/
13609 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13610 {
13611 	struct i40e_veb *veb;
13612 	struct i40e_pf *pf = vsi->back;
13613 
13614 	/* Uplink is not a bridge so default to VEB */
13615 	if (vsi->veb_idx >= I40E_MAX_VEB)
13616 		return 1;
13617 
13618 	veb = pf->veb[vsi->veb_idx];
13619 	if (!veb) {
13620 		dev_info(&pf->pdev->dev,
13621 			 "There is no veb associated with the bridge\n");
13622 		return -ENOENT;
13623 	}
13624 
13625 	/* Uplink is a bridge in VEPA mode */
13626 	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13627 		return 0;
13628 	} else {
13629 		/* Uplink is a bridge in VEB mode */
13630 		return 1;
13631 	}
13632 
13633 	/* VEPA is now default bridge, so return 0 */
13634 	return 0;
13635 }
13636 
13637 /**
13638  * i40e_add_vsi - Add a VSI to the switch
13639  * @vsi: the VSI being configured
13640  *
13641  * This initializes a VSI context depending on the VSI type to be added and
13642  * passes it down to the add_vsi aq command.
13643  **/
13644 static int i40e_add_vsi(struct i40e_vsi *vsi)
13645 {
13646 	int ret = -ENODEV;
13647 	struct i40e_pf *pf = vsi->back;
13648 	struct i40e_hw *hw = &pf->hw;
13649 	struct i40e_vsi_context ctxt;
13650 	struct i40e_mac_filter *f;
13651 	struct hlist_node *h;
13652 	int bkt;
13653 
13654 	u8 enabled_tc = 0x1; /* TC0 enabled */
13655 	int f_count = 0;
13656 
13657 	memset(&ctxt, 0, sizeof(ctxt));
13658 	switch (vsi->type) {
13659 	case I40E_VSI_MAIN:
13660 		/* The PF's main VSI is already setup as part of the
13661 		 * device initialization, so we'll not bother with
13662 		 * the add_vsi call, but we will retrieve the current
13663 		 * VSI context.
13664 		 */
13665 		ctxt.seid = pf->main_vsi_seid;
13666 		ctxt.pf_num = pf->hw.pf_id;
13667 		ctxt.vf_num = 0;
13668 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13669 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13670 		if (ret) {
13671 			dev_info(&pf->pdev->dev,
13672 				 "couldn't get PF vsi config, err %s aq_err %s\n",
13673 				 i40e_stat_str(&pf->hw, ret),
13674 				 i40e_aq_str(&pf->hw,
13675 					     pf->hw.aq.asq_last_status));
13676 			return -ENOENT;
13677 		}
13678 		vsi->info = ctxt.info;
13679 		vsi->info.valid_sections = 0;
13680 
13681 		vsi->seid = ctxt.seid;
13682 		vsi->id = ctxt.vsi_number;
13683 
13684 		enabled_tc = i40e_pf_get_tc_map(pf);
13685 
13686 		/* Source pruning is enabled by default, so the flag is
13687 		 * negative logic - if it's set, we need to fiddle with
13688 		 * the VSI to disable source pruning.
13689 		 */
13690 		if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13691 			memset(&ctxt, 0, sizeof(ctxt));
13692 			ctxt.seid = pf->main_vsi_seid;
13693 			ctxt.pf_num = pf->hw.pf_id;
13694 			ctxt.vf_num = 0;
13695 			ctxt.info.valid_sections |=
13696 				     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13697 			ctxt.info.switch_id =
13698 				   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13699 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13700 			if (ret) {
13701 				dev_info(&pf->pdev->dev,
13702 					 "update vsi failed, err %s aq_err %s\n",
13703 					 i40e_stat_str(&pf->hw, ret),
13704 					 i40e_aq_str(&pf->hw,
13705 						     pf->hw.aq.asq_last_status));
13706 				ret = -ENOENT;
13707 				goto err;
13708 			}
13709 		}
13710 
13711 		/* MFP mode setup queue map and update VSI */
13712 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13713 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13714 			memset(&ctxt, 0, sizeof(ctxt));
13715 			ctxt.seid = pf->main_vsi_seid;
13716 			ctxt.pf_num = pf->hw.pf_id;
13717 			ctxt.vf_num = 0;
13718 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13719 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13720 			if (ret) {
13721 				dev_info(&pf->pdev->dev,
13722 					 "update vsi failed, err %s aq_err %s\n",
13723 					 i40e_stat_str(&pf->hw, ret),
13724 					 i40e_aq_str(&pf->hw,
13725 						    pf->hw.aq.asq_last_status));
13726 				ret = -ENOENT;
13727 				goto err;
13728 			}
13729 			/* update the local VSI info queue map */
13730 			i40e_vsi_update_queue_map(vsi, &ctxt);
13731 			vsi->info.valid_sections = 0;
13732 		} else {
13733 			/* Default/Main VSI is only enabled for TC0
13734 			 * reconfigure it to enable all TCs that are
13735 			 * available on the port in SFP mode.
13736 			 * For MFP case the iSCSI PF would use this
13737 			 * flow to enable LAN+iSCSI TC.
13738 			 */
13739 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
13740 			if (ret) {
13741 				/* Single TC condition is not fatal,
13742 				 * message and continue
13743 				 */
13744 				dev_info(&pf->pdev->dev,
13745 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13746 					 enabled_tc,
13747 					 i40e_stat_str(&pf->hw, ret),
13748 					 i40e_aq_str(&pf->hw,
13749 						    pf->hw.aq.asq_last_status));
13750 			}
13751 		}
13752 		break;
13753 
13754 	case I40E_VSI_FDIR:
13755 		ctxt.pf_num = hw->pf_id;
13756 		ctxt.vf_num = 0;
13757 		ctxt.uplink_seid = vsi->uplink_seid;
13758 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13759 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13760 		if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13761 		    (i40e_is_vsi_uplink_mode_veb(vsi))) {
13762 			ctxt.info.valid_sections |=
13763 			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13764 			ctxt.info.switch_id =
13765 			   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13766 		}
13767 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13768 		break;
13769 
13770 	case I40E_VSI_VMDQ2:
13771 		ctxt.pf_num = hw->pf_id;
13772 		ctxt.vf_num = 0;
13773 		ctxt.uplink_seid = vsi->uplink_seid;
13774 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13775 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13776 
13777 		/* This VSI is connected to VEB so the switch_id
13778 		 * should be set to zero by default.
13779 		 */
13780 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13781 			ctxt.info.valid_sections |=
13782 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13783 			ctxt.info.switch_id =
13784 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13785 		}
13786 
13787 		/* Setup the VSI tx/rx queue map for TC0 only for now */
13788 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13789 		break;
13790 
13791 	case I40E_VSI_SRIOV:
13792 		ctxt.pf_num = hw->pf_id;
13793 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13794 		ctxt.uplink_seid = vsi->uplink_seid;
13795 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13796 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13797 
13798 		/* This VSI is connected to VEB so the switch_id
13799 		 * should be set to zero by default.
13800 		 */
13801 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13802 			ctxt.info.valid_sections |=
13803 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13804 			ctxt.info.switch_id =
13805 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13806 		}
13807 
13808 		if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13809 			ctxt.info.valid_sections |=
13810 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13811 			ctxt.info.queueing_opt_flags |=
13812 				(I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13813 				 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13814 		}
13815 
13816 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13817 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13818 		if (pf->vf[vsi->vf_id].spoofchk) {
13819 			ctxt.info.valid_sections |=
13820 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13821 			ctxt.info.sec_flags |=
13822 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13823 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13824 		}
13825 		/* Setup the VSI tx/rx queue map for TC0 only for now */
13826 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13827 		break;
13828 
13829 	case I40E_VSI_IWARP:
13830 		/* send down message to iWARP */
13831 		break;
13832 
13833 	default:
13834 		return -ENODEV;
13835 	}
13836 
13837 	if (vsi->type != I40E_VSI_MAIN) {
13838 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13839 		if (ret) {
13840 			dev_info(&vsi->back->pdev->dev,
13841 				 "add vsi failed, err %s aq_err %s\n",
13842 				 i40e_stat_str(&pf->hw, ret),
13843 				 i40e_aq_str(&pf->hw,
13844 					     pf->hw.aq.asq_last_status));
13845 			ret = -ENOENT;
13846 			goto err;
13847 		}
13848 		vsi->info = ctxt.info;
13849 		vsi->info.valid_sections = 0;
13850 		vsi->seid = ctxt.seid;
13851 		vsi->id = ctxt.vsi_number;
13852 	}
13853 
13854 	vsi->active_filters = 0;
13855 	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13856 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13857 	/* If macvlan filters already exist, force them to get loaded */
13858 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13859 		f->state = I40E_FILTER_NEW;
13860 		f_count++;
13861 	}
13862 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13863 
13864 	if (f_count) {
13865 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13866 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13867 	}
13868 
13869 	/* Update VSI BW information */
13870 	ret = i40e_vsi_get_bw_info(vsi);
13871 	if (ret) {
13872 		dev_info(&pf->pdev->dev,
13873 			 "couldn't get vsi bw info, err %s aq_err %s\n",
13874 			 i40e_stat_str(&pf->hw, ret),
13875 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13876 		/* VSI is already added so not tearing that up */
13877 		ret = 0;
13878 	}
13879 
13880 err:
13881 	return ret;
13882 }
13883 
13884 /**
13885  * i40e_vsi_release - Delete a VSI and free its resources
13886  * @vsi: the VSI being removed
13887  *
13888  * Returns 0 on success or < 0 on error
13889  **/
13890 int i40e_vsi_release(struct i40e_vsi *vsi)
13891 {
13892 	struct i40e_mac_filter *f;
13893 	struct hlist_node *h;
13894 	struct i40e_veb *veb = NULL;
13895 	struct i40e_pf *pf;
13896 	u16 uplink_seid;
13897 	int i, n, bkt;
13898 
13899 	pf = vsi->back;
13900 
13901 	/* release of a VEB-owner or last VSI is not allowed */
13902 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13903 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13904 			 vsi->seid, vsi->uplink_seid);
13905 		return -ENODEV;
13906 	}
13907 	if (vsi == pf->vsi[pf->lan_vsi] &&
13908 	    !test_bit(__I40E_DOWN, pf->state)) {
13909 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13910 		return -ENODEV;
13911 	}
13912 	set_bit(__I40E_VSI_RELEASING, vsi->state);
13913 	uplink_seid = vsi->uplink_seid;
13914 	if (vsi->type != I40E_VSI_SRIOV) {
13915 		if (vsi->netdev_registered) {
13916 			vsi->netdev_registered = false;
13917 			if (vsi->netdev) {
13918 				/* results in a call to i40e_close() */
13919 				unregister_netdev(vsi->netdev);
13920 			}
13921 		} else {
13922 			i40e_vsi_close(vsi);
13923 		}
13924 		i40e_vsi_disable_irq(vsi);
13925 	}
13926 
13927 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13928 
13929 	/* clear the sync flag on all filters */
13930 	if (vsi->netdev) {
13931 		__dev_uc_unsync(vsi->netdev, NULL);
13932 		__dev_mc_unsync(vsi->netdev, NULL);
13933 	}
13934 
13935 	/* make sure any remaining filters are marked for deletion */
13936 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13937 		__i40e_del_filter(vsi, f);
13938 
13939 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13940 
13941 	i40e_sync_vsi_filters(vsi);
13942 
13943 	i40e_vsi_delete(vsi);
13944 	i40e_vsi_free_q_vectors(vsi);
13945 	if (vsi->netdev) {
13946 		free_netdev(vsi->netdev);
13947 		vsi->netdev = NULL;
13948 	}
13949 	i40e_vsi_clear_rings(vsi);
13950 	i40e_vsi_clear(vsi);
13951 
13952 	/* If this was the last thing on the VEB, except for the
13953 	 * controlling VSI, remove the VEB, which puts the controlling
13954 	 * VSI onto the next level down in the switch.
13955 	 *
13956 	 * Well, okay, there's one more exception here: don't remove
13957 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
13958 	 * from up the network stack.
13959 	 */
13960 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13961 		if (pf->vsi[i] &&
13962 		    pf->vsi[i]->uplink_seid == uplink_seid &&
13963 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13964 			n++;      /* count the VSIs */
13965 		}
13966 	}
13967 	for (i = 0; i < I40E_MAX_VEB; i++) {
13968 		if (!pf->veb[i])
13969 			continue;
13970 		if (pf->veb[i]->uplink_seid == uplink_seid)
13971 			n++;     /* count the VEBs */
13972 		if (pf->veb[i]->seid == uplink_seid)
13973 			veb = pf->veb[i];
13974 	}
13975 	if (n == 0 && veb && veb->uplink_seid != 0)
13976 		i40e_veb_release(veb);
13977 
13978 	return 0;
13979 }
13980 
13981 /**
13982  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13983  * @vsi: ptr to the VSI
13984  *
13985  * This should only be called after i40e_vsi_mem_alloc() which allocates the
13986  * corresponding SW VSI structure and initializes num_queue_pairs for the
13987  * newly allocated VSI.
13988  *
13989  * Returns 0 on success or negative on failure
13990  **/
13991 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13992 {
13993 	int ret = -ENOENT;
13994 	struct i40e_pf *pf = vsi->back;
13995 
13996 	if (vsi->q_vectors[0]) {
13997 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13998 			 vsi->seid);
13999 		return -EEXIST;
14000 	}
14001 
14002 	if (vsi->base_vector) {
14003 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14004 			 vsi->seid, vsi->base_vector);
14005 		return -EEXIST;
14006 	}
14007 
14008 	ret = i40e_vsi_alloc_q_vectors(vsi);
14009 	if (ret) {
14010 		dev_info(&pf->pdev->dev,
14011 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14012 			 vsi->num_q_vectors, vsi->seid, ret);
14013 		vsi->num_q_vectors = 0;
14014 		goto vector_setup_out;
14015 	}
14016 
14017 	/* In Legacy mode, we do not have to get any other vector since we
14018 	 * piggyback on the misc/ICR0 for queue interrupts.
14019 	*/
14020 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14021 		return ret;
14022 	if (vsi->num_q_vectors)
14023 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14024 						 vsi->num_q_vectors, vsi->idx);
14025 	if (vsi->base_vector < 0) {
14026 		dev_info(&pf->pdev->dev,
14027 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14028 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14029 		i40e_vsi_free_q_vectors(vsi);
14030 		ret = -ENOENT;
14031 		goto vector_setup_out;
14032 	}
14033 
14034 vector_setup_out:
14035 	return ret;
14036 }
14037 
14038 /**
14039  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14040  * @vsi: pointer to the vsi.
14041  *
14042  * This re-allocates a vsi's queue resources.
14043  *
14044  * Returns pointer to the successfully allocated and configured VSI sw struct
14045  * on success, otherwise returns NULL on failure.
14046  **/
14047 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14048 {
14049 	u16 alloc_queue_pairs;
14050 	struct i40e_pf *pf;
14051 	u8 enabled_tc;
14052 	int ret;
14053 
14054 	if (!vsi)
14055 		return NULL;
14056 
14057 	pf = vsi->back;
14058 
14059 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14060 	i40e_vsi_clear_rings(vsi);
14061 
14062 	i40e_vsi_free_arrays(vsi, false);
14063 	i40e_set_num_rings_in_vsi(vsi);
14064 	ret = i40e_vsi_alloc_arrays(vsi, false);
14065 	if (ret)
14066 		goto err_vsi;
14067 
14068 	alloc_queue_pairs = vsi->alloc_queue_pairs *
14069 			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14070 
14071 	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14072 	if (ret < 0) {
14073 		dev_info(&pf->pdev->dev,
14074 			 "failed to get tracking for %d queues for VSI %d err %d\n",
14075 			 alloc_queue_pairs, vsi->seid, ret);
14076 		goto err_vsi;
14077 	}
14078 	vsi->base_queue = ret;
14079 
14080 	/* Update the FW view of the VSI. Force a reset of TC and queue
14081 	 * layout configurations.
14082 	 */
14083 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14084 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14085 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14086 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14087 	if (vsi->type == I40E_VSI_MAIN)
14088 		i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14089 
14090 	/* assign it some queues */
14091 	ret = i40e_alloc_rings(vsi);
14092 	if (ret)
14093 		goto err_rings;
14094 
14095 	/* map all of the rings to the q_vectors */
14096 	i40e_vsi_map_rings_to_vectors(vsi);
14097 	return vsi;
14098 
14099 err_rings:
14100 	i40e_vsi_free_q_vectors(vsi);
14101 	if (vsi->netdev_registered) {
14102 		vsi->netdev_registered = false;
14103 		unregister_netdev(vsi->netdev);
14104 		free_netdev(vsi->netdev);
14105 		vsi->netdev = NULL;
14106 	}
14107 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14108 err_vsi:
14109 	i40e_vsi_clear(vsi);
14110 	return NULL;
14111 }
14112 
14113 /**
14114  * i40e_vsi_setup - Set up a VSI by a given type
14115  * @pf: board private structure
14116  * @type: VSI type
14117  * @uplink_seid: the switch element to link to
14118  * @param1: usage depends upon VSI type. For VF types, indicates VF id
14119  *
14120  * This allocates the sw VSI structure and its queue resources, then add a VSI
14121  * to the identified VEB.
14122  *
14123  * Returns pointer to the successfully allocated and configure VSI sw struct on
14124  * success, otherwise returns NULL on failure.
14125  **/
14126 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14127 				u16 uplink_seid, u32 param1)
14128 {
14129 	struct i40e_vsi *vsi = NULL;
14130 	struct i40e_veb *veb = NULL;
14131 	u16 alloc_queue_pairs;
14132 	int ret, i;
14133 	int v_idx;
14134 
14135 	/* The requested uplink_seid must be either
14136 	 *     - the PF's port seid
14137 	 *              no VEB is needed because this is the PF
14138 	 *              or this is a Flow Director special case VSI
14139 	 *     - seid of an existing VEB
14140 	 *     - seid of a VSI that owns an existing VEB
14141 	 *     - seid of a VSI that doesn't own a VEB
14142 	 *              a new VEB is created and the VSI becomes the owner
14143 	 *     - seid of the PF VSI, which is what creates the first VEB
14144 	 *              this is a special case of the previous
14145 	 *
14146 	 * Find which uplink_seid we were given and create a new VEB if needed
14147 	 */
14148 	for (i = 0; i < I40E_MAX_VEB; i++) {
14149 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14150 			veb = pf->veb[i];
14151 			break;
14152 		}
14153 	}
14154 
14155 	if (!veb && uplink_seid != pf->mac_seid) {
14156 
14157 		for (i = 0; i < pf->num_alloc_vsi; i++) {
14158 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14159 				vsi = pf->vsi[i];
14160 				break;
14161 			}
14162 		}
14163 		if (!vsi) {
14164 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14165 				 uplink_seid);
14166 			return NULL;
14167 		}
14168 
14169 		if (vsi->uplink_seid == pf->mac_seid)
14170 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14171 					     vsi->tc_config.enabled_tc);
14172 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14173 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14174 					     vsi->tc_config.enabled_tc);
14175 		if (veb) {
14176 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14177 				dev_info(&vsi->back->pdev->dev,
14178 					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14179 				return NULL;
14180 			}
14181 			/* We come up by default in VEPA mode if SRIOV is not
14182 			 * already enabled, in which case we can't force VEPA
14183 			 * mode.
14184 			 */
14185 			if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14186 				veb->bridge_mode = BRIDGE_MODE_VEPA;
14187 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14188 			}
14189 			i40e_config_bridge_mode(veb);
14190 		}
14191 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14192 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14193 				veb = pf->veb[i];
14194 		}
14195 		if (!veb) {
14196 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14197 			return NULL;
14198 		}
14199 
14200 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14201 		uplink_seid = veb->seid;
14202 	}
14203 
14204 	/* get vsi sw struct */
14205 	v_idx = i40e_vsi_mem_alloc(pf, type);
14206 	if (v_idx < 0)
14207 		goto err_alloc;
14208 	vsi = pf->vsi[v_idx];
14209 	if (!vsi)
14210 		goto err_alloc;
14211 	vsi->type = type;
14212 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14213 
14214 	if (type == I40E_VSI_MAIN)
14215 		pf->lan_vsi = v_idx;
14216 	else if (type == I40E_VSI_SRIOV)
14217 		vsi->vf_id = param1;
14218 	/* assign it some queues */
14219 	alloc_queue_pairs = vsi->alloc_queue_pairs *
14220 			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14221 
14222 	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14223 	if (ret < 0) {
14224 		dev_info(&pf->pdev->dev,
14225 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
14226 			 alloc_queue_pairs, vsi->seid, ret);
14227 		goto err_vsi;
14228 	}
14229 	vsi->base_queue = ret;
14230 
14231 	/* get a VSI from the hardware */
14232 	vsi->uplink_seid = uplink_seid;
14233 	ret = i40e_add_vsi(vsi);
14234 	if (ret)
14235 		goto err_vsi;
14236 
14237 	switch (vsi->type) {
14238 	/* setup the netdev if needed */
14239 	case I40E_VSI_MAIN:
14240 	case I40E_VSI_VMDQ2:
14241 		ret = i40e_config_netdev(vsi);
14242 		if (ret)
14243 			goto err_netdev;
14244 		ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14245 		if (ret)
14246 			goto err_netdev;
14247 		ret = register_netdev(vsi->netdev);
14248 		if (ret)
14249 			goto err_netdev;
14250 		vsi->netdev_registered = true;
14251 		netif_carrier_off(vsi->netdev);
14252 #ifdef CONFIG_I40E_DCB
14253 		/* Setup DCB netlink interface */
14254 		i40e_dcbnl_setup(vsi);
14255 #endif /* CONFIG_I40E_DCB */
14256 		fallthrough;
14257 	case I40E_VSI_FDIR:
14258 		/* set up vectors and rings if needed */
14259 		ret = i40e_vsi_setup_vectors(vsi);
14260 		if (ret)
14261 			goto err_msix;
14262 
14263 		ret = i40e_alloc_rings(vsi);
14264 		if (ret)
14265 			goto err_rings;
14266 
14267 		/* map all of the rings to the q_vectors */
14268 		i40e_vsi_map_rings_to_vectors(vsi);
14269 
14270 		i40e_vsi_reset_stats(vsi);
14271 		break;
14272 	default:
14273 		/* no netdev or rings for the other VSI types */
14274 		break;
14275 	}
14276 
14277 	if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14278 	    (vsi->type == I40E_VSI_VMDQ2)) {
14279 		ret = i40e_vsi_config_rss(vsi);
14280 	}
14281 	return vsi;
14282 
14283 err_rings:
14284 	i40e_vsi_free_q_vectors(vsi);
14285 err_msix:
14286 	if (vsi->netdev_registered) {
14287 		vsi->netdev_registered = false;
14288 		unregister_netdev(vsi->netdev);
14289 		free_netdev(vsi->netdev);
14290 		vsi->netdev = NULL;
14291 	}
14292 err_netdev:
14293 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14294 err_vsi:
14295 	i40e_vsi_clear(vsi);
14296 err_alloc:
14297 	return NULL;
14298 }
14299 
14300 /**
14301  * i40e_veb_get_bw_info - Query VEB BW information
14302  * @veb: the veb to query
14303  *
14304  * Query the Tx scheduler BW configuration data for given VEB
14305  **/
14306 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14307 {
14308 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14309 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14310 	struct i40e_pf *pf = veb->pf;
14311 	struct i40e_hw *hw = &pf->hw;
14312 	u32 tc_bw_max;
14313 	int ret = 0;
14314 	int i;
14315 
14316 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14317 						  &bw_data, NULL);
14318 	if (ret) {
14319 		dev_info(&pf->pdev->dev,
14320 			 "query veb bw config failed, err %s aq_err %s\n",
14321 			 i40e_stat_str(&pf->hw, ret),
14322 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14323 		goto out;
14324 	}
14325 
14326 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14327 						   &ets_data, NULL);
14328 	if (ret) {
14329 		dev_info(&pf->pdev->dev,
14330 			 "query veb bw ets config failed, err %s aq_err %s\n",
14331 			 i40e_stat_str(&pf->hw, ret),
14332 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14333 		goto out;
14334 	}
14335 
14336 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14337 	veb->bw_max_quanta = ets_data.tc_bw_max;
14338 	veb->is_abs_credits = bw_data.absolute_credits_enable;
14339 	veb->enabled_tc = ets_data.tc_valid_bits;
14340 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14341 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14342 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14343 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14344 		veb->bw_tc_limit_credits[i] =
14345 					le16_to_cpu(bw_data.tc_bw_limits[i]);
14346 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14347 	}
14348 
14349 out:
14350 	return ret;
14351 }
14352 
14353 /**
14354  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14355  * @pf: board private structure
14356  *
14357  * On error: returns error code (negative)
14358  * On success: returns vsi index in PF (positive)
14359  **/
14360 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14361 {
14362 	int ret = -ENOENT;
14363 	struct i40e_veb *veb;
14364 	int i;
14365 
14366 	/* Need to protect the allocation of switch elements at the PF level */
14367 	mutex_lock(&pf->switch_mutex);
14368 
14369 	/* VEB list may be fragmented if VEB creation/destruction has
14370 	 * been happening.  We can afford to do a quick scan to look
14371 	 * for any free slots in the list.
14372 	 *
14373 	 * find next empty veb slot, looping back around if necessary
14374 	 */
14375 	i = 0;
14376 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14377 		i++;
14378 	if (i >= I40E_MAX_VEB) {
14379 		ret = -ENOMEM;
14380 		goto err_alloc_veb;  /* out of VEB slots! */
14381 	}
14382 
14383 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14384 	if (!veb) {
14385 		ret = -ENOMEM;
14386 		goto err_alloc_veb;
14387 	}
14388 	veb->pf = pf;
14389 	veb->idx = i;
14390 	veb->enabled_tc = 1;
14391 
14392 	pf->veb[i] = veb;
14393 	ret = i;
14394 err_alloc_veb:
14395 	mutex_unlock(&pf->switch_mutex);
14396 	return ret;
14397 }
14398 
14399 /**
14400  * i40e_switch_branch_release - Delete a branch of the switch tree
14401  * @branch: where to start deleting
14402  *
14403  * This uses recursion to find the tips of the branch to be
14404  * removed, deleting until we get back to and can delete this VEB.
14405  **/
14406 static void i40e_switch_branch_release(struct i40e_veb *branch)
14407 {
14408 	struct i40e_pf *pf = branch->pf;
14409 	u16 branch_seid = branch->seid;
14410 	u16 veb_idx = branch->idx;
14411 	int i;
14412 
14413 	/* release any VEBs on this VEB - RECURSION */
14414 	for (i = 0; i < I40E_MAX_VEB; i++) {
14415 		if (!pf->veb[i])
14416 			continue;
14417 		if (pf->veb[i]->uplink_seid == branch->seid)
14418 			i40e_switch_branch_release(pf->veb[i]);
14419 	}
14420 
14421 	/* Release the VSIs on this VEB, but not the owner VSI.
14422 	 *
14423 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14424 	 *       the VEB itself, so don't use (*branch) after this loop.
14425 	 */
14426 	for (i = 0; i < pf->num_alloc_vsi; i++) {
14427 		if (!pf->vsi[i])
14428 			continue;
14429 		if (pf->vsi[i]->uplink_seid == branch_seid &&
14430 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14431 			i40e_vsi_release(pf->vsi[i]);
14432 		}
14433 	}
14434 
14435 	/* There's one corner case where the VEB might not have been
14436 	 * removed, so double check it here and remove it if needed.
14437 	 * This case happens if the veb was created from the debugfs
14438 	 * commands and no VSIs were added to it.
14439 	 */
14440 	if (pf->veb[veb_idx])
14441 		i40e_veb_release(pf->veb[veb_idx]);
14442 }
14443 
14444 /**
14445  * i40e_veb_clear - remove veb struct
14446  * @veb: the veb to remove
14447  **/
14448 static void i40e_veb_clear(struct i40e_veb *veb)
14449 {
14450 	if (!veb)
14451 		return;
14452 
14453 	if (veb->pf) {
14454 		struct i40e_pf *pf = veb->pf;
14455 
14456 		mutex_lock(&pf->switch_mutex);
14457 		if (pf->veb[veb->idx] == veb)
14458 			pf->veb[veb->idx] = NULL;
14459 		mutex_unlock(&pf->switch_mutex);
14460 	}
14461 
14462 	kfree(veb);
14463 }
14464 
14465 /**
14466  * i40e_veb_release - Delete a VEB and free its resources
14467  * @veb: the VEB being removed
14468  **/
14469 void i40e_veb_release(struct i40e_veb *veb)
14470 {
14471 	struct i40e_vsi *vsi = NULL;
14472 	struct i40e_pf *pf;
14473 	int i, n = 0;
14474 
14475 	pf = veb->pf;
14476 
14477 	/* find the remaining VSI and check for extras */
14478 	for (i = 0; i < pf->num_alloc_vsi; i++) {
14479 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14480 			n++;
14481 			vsi = pf->vsi[i];
14482 		}
14483 	}
14484 	if (n != 1) {
14485 		dev_info(&pf->pdev->dev,
14486 			 "can't remove VEB %d with %d VSIs left\n",
14487 			 veb->seid, n);
14488 		return;
14489 	}
14490 
14491 	/* move the remaining VSI to uplink veb */
14492 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14493 	if (veb->uplink_seid) {
14494 		vsi->uplink_seid = veb->uplink_seid;
14495 		if (veb->uplink_seid == pf->mac_seid)
14496 			vsi->veb_idx = I40E_NO_VEB;
14497 		else
14498 			vsi->veb_idx = veb->veb_idx;
14499 	} else {
14500 		/* floating VEB */
14501 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14502 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14503 	}
14504 
14505 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14506 	i40e_veb_clear(veb);
14507 }
14508 
14509 /**
14510  * i40e_add_veb - create the VEB in the switch
14511  * @veb: the VEB to be instantiated
14512  * @vsi: the controlling VSI
14513  **/
14514 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14515 {
14516 	struct i40e_pf *pf = veb->pf;
14517 	bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14518 	int ret;
14519 
14520 	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14521 			      veb->enabled_tc, false,
14522 			      &veb->seid, enable_stats, NULL);
14523 
14524 	/* get a VEB from the hardware */
14525 	if (ret) {
14526 		dev_info(&pf->pdev->dev,
14527 			 "couldn't add VEB, err %s aq_err %s\n",
14528 			 i40e_stat_str(&pf->hw, ret),
14529 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14530 		return -EPERM;
14531 	}
14532 
14533 	/* get statistics counter */
14534 	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14535 					 &veb->stats_idx, NULL, NULL, NULL);
14536 	if (ret) {
14537 		dev_info(&pf->pdev->dev,
14538 			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14539 			 i40e_stat_str(&pf->hw, ret),
14540 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14541 		return -EPERM;
14542 	}
14543 	ret = i40e_veb_get_bw_info(veb);
14544 	if (ret) {
14545 		dev_info(&pf->pdev->dev,
14546 			 "couldn't get VEB bw info, err %s aq_err %s\n",
14547 			 i40e_stat_str(&pf->hw, ret),
14548 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14549 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14550 		return -ENOENT;
14551 	}
14552 
14553 	vsi->uplink_seid = veb->seid;
14554 	vsi->veb_idx = veb->idx;
14555 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14556 
14557 	return 0;
14558 }
14559 
14560 /**
14561  * i40e_veb_setup - Set up a VEB
14562  * @pf: board private structure
14563  * @flags: VEB setup flags
14564  * @uplink_seid: the switch element to link to
14565  * @vsi_seid: the initial VSI seid
14566  * @enabled_tc: Enabled TC bit-map
14567  *
14568  * This allocates the sw VEB structure and links it into the switch
14569  * It is possible and legal for this to be a duplicate of an already
14570  * existing VEB.  It is also possible for both uplink and vsi seids
14571  * to be zero, in order to create a floating VEB.
14572  *
14573  * Returns pointer to the successfully allocated VEB sw struct on
14574  * success, otherwise returns NULL on failure.
14575  **/
14576 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14577 				u16 uplink_seid, u16 vsi_seid,
14578 				u8 enabled_tc)
14579 {
14580 	struct i40e_veb *veb, *uplink_veb = NULL;
14581 	int vsi_idx, veb_idx;
14582 	int ret;
14583 
14584 	/* if one seid is 0, the other must be 0 to create a floating relay */
14585 	if ((uplink_seid == 0 || vsi_seid == 0) &&
14586 	    (uplink_seid + vsi_seid != 0)) {
14587 		dev_info(&pf->pdev->dev,
14588 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14589 			 uplink_seid, vsi_seid);
14590 		return NULL;
14591 	}
14592 
14593 	/* make sure there is such a vsi and uplink */
14594 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14595 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14596 			break;
14597 	if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14598 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14599 			 vsi_seid);
14600 		return NULL;
14601 	}
14602 
14603 	if (uplink_seid && uplink_seid != pf->mac_seid) {
14604 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14605 			if (pf->veb[veb_idx] &&
14606 			    pf->veb[veb_idx]->seid == uplink_seid) {
14607 				uplink_veb = pf->veb[veb_idx];
14608 				break;
14609 			}
14610 		}
14611 		if (!uplink_veb) {
14612 			dev_info(&pf->pdev->dev,
14613 				 "uplink seid %d not found\n", uplink_seid);
14614 			return NULL;
14615 		}
14616 	}
14617 
14618 	/* get veb sw struct */
14619 	veb_idx = i40e_veb_mem_alloc(pf);
14620 	if (veb_idx < 0)
14621 		goto err_alloc;
14622 	veb = pf->veb[veb_idx];
14623 	veb->flags = flags;
14624 	veb->uplink_seid = uplink_seid;
14625 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14626 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14627 
14628 	/* create the VEB in the switch */
14629 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14630 	if (ret)
14631 		goto err_veb;
14632 	if (vsi_idx == pf->lan_vsi)
14633 		pf->lan_veb = veb->idx;
14634 
14635 	return veb;
14636 
14637 err_veb:
14638 	i40e_veb_clear(veb);
14639 err_alloc:
14640 	return NULL;
14641 }
14642 
14643 /**
14644  * i40e_setup_pf_switch_element - set PF vars based on switch type
14645  * @pf: board private structure
14646  * @ele: element we are building info from
14647  * @num_reported: total number of elements
14648  * @printconfig: should we print the contents
14649  *
14650  * helper function to assist in extracting a few useful SEID values.
14651  **/
14652 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14653 				struct i40e_aqc_switch_config_element_resp *ele,
14654 				u16 num_reported, bool printconfig)
14655 {
14656 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14657 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14658 	u8 element_type = ele->element_type;
14659 	u16 seid = le16_to_cpu(ele->seid);
14660 
14661 	if (printconfig)
14662 		dev_info(&pf->pdev->dev,
14663 			 "type=%d seid=%d uplink=%d downlink=%d\n",
14664 			 element_type, seid, uplink_seid, downlink_seid);
14665 
14666 	switch (element_type) {
14667 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
14668 		pf->mac_seid = seid;
14669 		break;
14670 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
14671 		/* Main VEB? */
14672 		if (uplink_seid != pf->mac_seid)
14673 			break;
14674 		if (pf->lan_veb >= I40E_MAX_VEB) {
14675 			int v;
14676 
14677 			/* find existing or else empty VEB */
14678 			for (v = 0; v < I40E_MAX_VEB; v++) {
14679 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14680 					pf->lan_veb = v;
14681 					break;
14682 				}
14683 			}
14684 			if (pf->lan_veb >= I40E_MAX_VEB) {
14685 				v = i40e_veb_mem_alloc(pf);
14686 				if (v < 0)
14687 					break;
14688 				pf->lan_veb = v;
14689 			}
14690 		}
14691 		if (pf->lan_veb >= I40E_MAX_VEB)
14692 			break;
14693 
14694 		pf->veb[pf->lan_veb]->seid = seid;
14695 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14696 		pf->veb[pf->lan_veb]->pf = pf;
14697 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14698 		break;
14699 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
14700 		if (num_reported != 1)
14701 			break;
14702 		/* This is immediately after a reset so we can assume this is
14703 		 * the PF's VSI
14704 		 */
14705 		pf->mac_seid = uplink_seid;
14706 		pf->pf_seid = downlink_seid;
14707 		pf->main_vsi_seid = seid;
14708 		if (printconfig)
14709 			dev_info(&pf->pdev->dev,
14710 				 "pf_seid=%d main_vsi_seid=%d\n",
14711 				 pf->pf_seid, pf->main_vsi_seid);
14712 		break;
14713 	case I40E_SWITCH_ELEMENT_TYPE_PF:
14714 	case I40E_SWITCH_ELEMENT_TYPE_VF:
14715 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
14716 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
14717 	case I40E_SWITCH_ELEMENT_TYPE_PE:
14718 	case I40E_SWITCH_ELEMENT_TYPE_PA:
14719 		/* ignore these for now */
14720 		break;
14721 	default:
14722 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14723 			 element_type, seid);
14724 		break;
14725 	}
14726 }
14727 
14728 /**
14729  * i40e_fetch_switch_configuration - Get switch config from firmware
14730  * @pf: board private structure
14731  * @printconfig: should we print the contents
14732  *
14733  * Get the current switch configuration from the device and
14734  * extract a few useful SEID values.
14735  **/
14736 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14737 {
14738 	struct i40e_aqc_get_switch_config_resp *sw_config;
14739 	u16 next_seid = 0;
14740 	int ret = 0;
14741 	u8 *aq_buf;
14742 	int i;
14743 
14744 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14745 	if (!aq_buf)
14746 		return -ENOMEM;
14747 
14748 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14749 	do {
14750 		u16 num_reported, num_total;
14751 
14752 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14753 						I40E_AQ_LARGE_BUF,
14754 						&next_seid, NULL);
14755 		if (ret) {
14756 			dev_info(&pf->pdev->dev,
14757 				 "get switch config failed err %s aq_err %s\n",
14758 				 i40e_stat_str(&pf->hw, ret),
14759 				 i40e_aq_str(&pf->hw,
14760 					     pf->hw.aq.asq_last_status));
14761 			kfree(aq_buf);
14762 			return -ENOENT;
14763 		}
14764 
14765 		num_reported = le16_to_cpu(sw_config->header.num_reported);
14766 		num_total = le16_to_cpu(sw_config->header.num_total);
14767 
14768 		if (printconfig)
14769 			dev_info(&pf->pdev->dev,
14770 				 "header: %d reported %d total\n",
14771 				 num_reported, num_total);
14772 
14773 		for (i = 0; i < num_reported; i++) {
14774 			struct i40e_aqc_switch_config_element_resp *ele =
14775 				&sw_config->element[i];
14776 
14777 			i40e_setup_pf_switch_element(pf, ele, num_reported,
14778 						     printconfig);
14779 		}
14780 	} while (next_seid != 0);
14781 
14782 	kfree(aq_buf);
14783 	return ret;
14784 }
14785 
14786 /**
14787  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14788  * @pf: board private structure
14789  * @reinit: if the Main VSI needs to re-initialized.
14790  * @lock_acquired: indicates whether or not the lock has been acquired
14791  *
14792  * Returns 0 on success, negative value on failure
14793  **/
14794 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14795 {
14796 	u16 flags = 0;
14797 	int ret;
14798 
14799 	/* find out what's out there already */
14800 	ret = i40e_fetch_switch_configuration(pf, false);
14801 	if (ret) {
14802 		dev_info(&pf->pdev->dev,
14803 			 "couldn't fetch switch config, err %s aq_err %s\n",
14804 			 i40e_stat_str(&pf->hw, ret),
14805 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14806 		return ret;
14807 	}
14808 	i40e_pf_reset_stats(pf);
14809 
14810 	/* set the switch config bit for the whole device to
14811 	 * support limited promisc or true promisc
14812 	 * when user requests promisc. The default is limited
14813 	 * promisc.
14814 	*/
14815 
14816 	if ((pf->hw.pf_id == 0) &&
14817 	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14818 		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14819 		pf->last_sw_conf_flags = flags;
14820 	}
14821 
14822 	if (pf->hw.pf_id == 0) {
14823 		u16 valid_flags;
14824 
14825 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14826 		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14827 						NULL);
14828 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14829 			dev_info(&pf->pdev->dev,
14830 				 "couldn't set switch config bits, err %s aq_err %s\n",
14831 				 i40e_stat_str(&pf->hw, ret),
14832 				 i40e_aq_str(&pf->hw,
14833 					     pf->hw.aq.asq_last_status));
14834 			/* not a fatal problem, just keep going */
14835 		}
14836 		pf->last_sw_conf_valid_flags = valid_flags;
14837 	}
14838 
14839 	/* first time setup */
14840 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14841 		struct i40e_vsi *vsi = NULL;
14842 		u16 uplink_seid;
14843 
14844 		/* Set up the PF VSI associated with the PF's main VSI
14845 		 * that is already in the HW switch
14846 		 */
14847 		if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14848 			uplink_seid = pf->veb[pf->lan_veb]->seid;
14849 		else
14850 			uplink_seid = pf->mac_seid;
14851 		if (pf->lan_vsi == I40E_NO_VSI)
14852 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14853 		else if (reinit)
14854 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14855 		if (!vsi) {
14856 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14857 			i40e_cloud_filter_exit(pf);
14858 			i40e_fdir_teardown(pf);
14859 			return -EAGAIN;
14860 		}
14861 	} else {
14862 		/* force a reset of TC and queue layout configurations */
14863 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14864 
14865 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14866 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14867 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14868 	}
14869 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14870 
14871 	i40e_fdir_sb_setup(pf);
14872 
14873 	/* Setup static PF queue filter control settings */
14874 	ret = i40e_setup_pf_filter_control(pf);
14875 	if (ret) {
14876 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14877 			 ret);
14878 		/* Failure here should not stop continuing other steps */
14879 	}
14880 
14881 	/* enable RSS in the HW, even for only one queue, as the stack can use
14882 	 * the hash
14883 	 */
14884 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14885 		i40e_pf_config_rss(pf);
14886 
14887 	/* fill in link information and enable LSE reporting */
14888 	i40e_link_event(pf);
14889 
14890 	/* Initialize user-specific link properties */
14891 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14892 				  I40E_AQ_AN_COMPLETED) ? true : false);
14893 
14894 	i40e_ptp_init(pf);
14895 
14896 	if (!lock_acquired)
14897 		rtnl_lock();
14898 
14899 	/* repopulate tunnel port filters */
14900 	udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14901 
14902 	if (!lock_acquired)
14903 		rtnl_unlock();
14904 
14905 	return ret;
14906 }
14907 
14908 /**
14909  * i40e_determine_queue_usage - Work out queue distribution
14910  * @pf: board private structure
14911  **/
14912 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14913 {
14914 	int queues_left;
14915 	int q_max;
14916 
14917 	pf->num_lan_qps = 0;
14918 
14919 	/* Find the max queues to be put into basic use.  We'll always be
14920 	 * using TC0, whether or not DCB is running, and TC0 will get the
14921 	 * big RSS set.
14922 	 */
14923 	queues_left = pf->hw.func_caps.num_tx_qp;
14924 
14925 	if ((queues_left == 1) ||
14926 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14927 		/* one qp for PF, no queues for anything else */
14928 		queues_left = 0;
14929 		pf->alloc_rss_size = pf->num_lan_qps = 1;
14930 
14931 		/* make sure all the fancies are disabled */
14932 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
14933 			       I40E_FLAG_IWARP_ENABLED	|
14934 			       I40E_FLAG_FD_SB_ENABLED	|
14935 			       I40E_FLAG_FD_ATR_ENABLED	|
14936 			       I40E_FLAG_DCB_CAPABLE	|
14937 			       I40E_FLAG_DCB_ENABLED	|
14938 			       I40E_FLAG_SRIOV_ENABLED	|
14939 			       I40E_FLAG_VMDQ_ENABLED);
14940 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14941 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14942 				  I40E_FLAG_FD_SB_ENABLED |
14943 				  I40E_FLAG_FD_ATR_ENABLED |
14944 				  I40E_FLAG_DCB_CAPABLE))) {
14945 		/* one qp for PF */
14946 		pf->alloc_rss_size = pf->num_lan_qps = 1;
14947 		queues_left -= pf->num_lan_qps;
14948 
14949 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
14950 			       I40E_FLAG_IWARP_ENABLED	|
14951 			       I40E_FLAG_FD_SB_ENABLED	|
14952 			       I40E_FLAG_FD_ATR_ENABLED	|
14953 			       I40E_FLAG_DCB_ENABLED	|
14954 			       I40E_FLAG_VMDQ_ENABLED);
14955 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14956 	} else {
14957 		/* Not enough queues for all TCs */
14958 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14959 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14960 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14961 					I40E_FLAG_DCB_ENABLED);
14962 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14963 		}
14964 
14965 		/* limit lan qps to the smaller of qps, cpus or msix */
14966 		q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14967 		q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14968 		q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14969 		pf->num_lan_qps = q_max;
14970 
14971 		queues_left -= pf->num_lan_qps;
14972 	}
14973 
14974 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14975 		if (queues_left > 1) {
14976 			queues_left -= 1; /* save 1 queue for FD */
14977 		} else {
14978 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14979 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14980 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14981 		}
14982 	}
14983 
14984 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14985 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14986 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14987 					(queues_left / pf->num_vf_qps));
14988 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14989 	}
14990 
14991 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14992 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14993 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14994 					  (queues_left / pf->num_vmdq_qps));
14995 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14996 	}
14997 
14998 	pf->queues_left = queues_left;
14999 	dev_dbg(&pf->pdev->dev,
15000 		"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15001 		pf->hw.func_caps.num_tx_qp,
15002 		!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
15003 		pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15004 		pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15005 		queues_left);
15006 }
15007 
15008 /**
15009  * i40e_setup_pf_filter_control - Setup PF static filter control
15010  * @pf: PF to be setup
15011  *
15012  * i40e_setup_pf_filter_control sets up a PF's initial filter control
15013  * settings. If PE/FCoE are enabled then it will also set the per PF
15014  * based filter sizes required for them. It also enables Flow director,
15015  * ethertype and macvlan type filter settings for the pf.
15016  *
15017  * Returns 0 on success, negative on failure
15018  **/
15019 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15020 {
15021 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
15022 
15023 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15024 
15025 	/* Flow Director is enabled */
15026 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15027 		settings->enable_fdir = true;
15028 
15029 	/* Ethtype and MACVLAN filters enabled for PF */
15030 	settings->enable_ethtype = true;
15031 	settings->enable_macvlan = true;
15032 
15033 	if (i40e_set_filter_control(&pf->hw, settings))
15034 		return -ENOENT;
15035 
15036 	return 0;
15037 }
15038 
15039 #define INFO_STRING_LEN 255
15040 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15041 static void i40e_print_features(struct i40e_pf *pf)
15042 {
15043 	struct i40e_hw *hw = &pf->hw;
15044 	char *buf;
15045 	int i;
15046 
15047 	buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15048 	if (!buf)
15049 		return;
15050 
15051 	i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15052 #ifdef CONFIG_PCI_IOV
15053 	i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15054 #endif
15055 	i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15056 		      pf->hw.func_caps.num_vsis,
15057 		      pf->vsi[pf->lan_vsi]->num_queue_pairs);
15058 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
15059 		i += scnprintf(&buf[i], REMAIN(i), " RSS");
15060 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15061 		i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15062 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15063 		i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15064 		i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15065 	}
15066 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15067 		i += scnprintf(&buf[i], REMAIN(i), " DCB");
15068 	i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15069 	i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15070 	if (pf->flags & I40E_FLAG_PTP)
15071 		i += scnprintf(&buf[i], REMAIN(i), " PTP");
15072 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15073 		i += scnprintf(&buf[i], REMAIN(i), " VEB");
15074 	else
15075 		i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15076 
15077 	dev_info(&pf->pdev->dev, "%s\n", buf);
15078 	kfree(buf);
15079 	WARN_ON(i > INFO_STRING_LEN);
15080 }
15081 
15082 /**
15083  * i40e_get_platform_mac_addr - get platform-specific MAC address
15084  * @pdev: PCI device information struct
15085  * @pf: board private structure
15086  *
15087  * Look up the MAC address for the device. First we'll try
15088  * eth_platform_get_mac_address, which will check Open Firmware, or arch
15089  * specific fallback. Otherwise, we'll default to the stored value in
15090  * firmware.
15091  **/
15092 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15093 {
15094 	if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15095 		i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15096 }
15097 
15098 /**
15099  * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15100  * @fec_cfg: FEC option to set in flags
15101  * @flags: ptr to flags in which we set FEC option
15102  **/
15103 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15104 {
15105 	if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15106 		*flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15107 	if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15108 	    (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15109 		*flags |= I40E_FLAG_RS_FEC;
15110 		*flags &= ~I40E_FLAG_BASE_R_FEC;
15111 	}
15112 	if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15113 	    (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15114 		*flags |= I40E_FLAG_BASE_R_FEC;
15115 		*flags &= ~I40E_FLAG_RS_FEC;
15116 	}
15117 	if (fec_cfg == 0)
15118 		*flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15119 }
15120 
15121 /**
15122  * i40e_check_recovery_mode - check if we are running transition firmware
15123  * @pf: board private structure
15124  *
15125  * Check registers indicating the firmware runs in recovery mode. Sets the
15126  * appropriate driver state.
15127  *
15128  * Returns true if the recovery mode was detected, false otherwise
15129  **/
15130 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15131 {
15132 	u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15133 
15134 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15135 		dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15136 		dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15137 		set_bit(__I40E_RECOVERY_MODE, pf->state);
15138 
15139 		return true;
15140 	}
15141 	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15142 		dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15143 
15144 	return false;
15145 }
15146 
15147 /**
15148  * i40e_pf_loop_reset - perform reset in a loop.
15149  * @pf: board private structure
15150  *
15151  * This function is useful when a NIC is about to enter recovery mode.
15152  * When a NIC's internal data structures are corrupted the NIC's
15153  * firmware is going to enter recovery mode.
15154  * Right after a POR it takes about 7 minutes for firmware to enter
15155  * recovery mode. Until that time a NIC is in some kind of intermediate
15156  * state. After that time period the NIC almost surely enters
15157  * recovery mode. The only way for a driver to detect intermediate
15158  * state is to issue a series of pf-resets and check a return value.
15159  * If a PF reset returns success then the firmware could be in recovery
15160  * mode so the caller of this code needs to check for recovery mode
15161  * if this function returns success. There is a little chance that
15162  * firmware will hang in intermediate state forever.
15163  * Since waiting 7 minutes is quite a lot of time this function waits
15164  * 10 seconds and then gives up by returning an error.
15165  *
15166  * Return 0 on success, negative on failure.
15167  **/
15168 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15169 {
15170 	/* wait max 10 seconds for PF reset to succeed */
15171 	const unsigned long time_end = jiffies + 10 * HZ;
15172 
15173 	struct i40e_hw *hw = &pf->hw;
15174 	i40e_status ret;
15175 
15176 	ret = i40e_pf_reset(hw);
15177 	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15178 		usleep_range(10000, 20000);
15179 		ret = i40e_pf_reset(hw);
15180 	}
15181 
15182 	if (ret == I40E_SUCCESS)
15183 		pf->pfr_count++;
15184 	else
15185 		dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15186 
15187 	return ret;
15188 }
15189 
15190 /**
15191  * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15192  * @pf: board private structure
15193  *
15194  * Check FW registers to determine if FW issued unexpected EMP Reset.
15195  * Every time when unexpected EMP Reset occurs the FW increments
15196  * a counter of unexpected EMP Resets. When the counter reaches 10
15197  * the FW should enter the Recovery mode
15198  *
15199  * Returns true if FW issued unexpected EMP Reset
15200  **/
15201 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15202 {
15203 	const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15204 			   I40E_GL_FWSTS_FWS1B_MASK;
15205 	return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15206 	       (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15207 }
15208 
15209 /**
15210  * i40e_handle_resets - handle EMP resets and PF resets
15211  * @pf: board private structure
15212  *
15213  * Handle both EMP resets and PF resets and conclude whether there are
15214  * any issues regarding these resets. If there are any issues then
15215  * generate log entry.
15216  *
15217  * Return 0 if NIC is healthy or negative value when there are issues
15218  * with resets
15219  **/
15220 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15221 {
15222 	const i40e_status pfr = i40e_pf_loop_reset(pf);
15223 	const bool is_empr = i40e_check_fw_empr(pf);
15224 
15225 	if (is_empr || pfr != I40E_SUCCESS)
15226 		dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15227 
15228 	return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15229 }
15230 
15231 /**
15232  * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15233  * @pf: board private structure
15234  * @hw: ptr to the hardware info
15235  *
15236  * This function does a minimal setup of all subsystems needed for running
15237  * recovery mode.
15238  *
15239  * Returns 0 on success, negative on failure
15240  **/
15241 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15242 {
15243 	struct i40e_vsi *vsi;
15244 	int err;
15245 	int v_idx;
15246 
15247 	pci_save_state(pf->pdev);
15248 
15249 	/* set up periodic task facility */
15250 	timer_setup(&pf->service_timer, i40e_service_timer, 0);
15251 	pf->service_timer_period = HZ;
15252 
15253 	INIT_WORK(&pf->service_task, i40e_service_task);
15254 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
15255 
15256 	err = i40e_init_interrupt_scheme(pf);
15257 	if (err)
15258 		goto err_switch_setup;
15259 
15260 	/* The number of VSIs reported by the FW is the minimum guaranteed
15261 	 * to us; HW supports far more and we share the remaining pool with
15262 	 * the other PFs. We allocate space for more than the guarantee with
15263 	 * the understanding that we might not get them all later.
15264 	 */
15265 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15266 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15267 	else
15268 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15269 
15270 	/* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15271 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15272 			  GFP_KERNEL);
15273 	if (!pf->vsi) {
15274 		err = -ENOMEM;
15275 		goto err_switch_setup;
15276 	}
15277 
15278 	/* We allocate one VSI which is needed as absolute minimum
15279 	 * in order to register the netdev
15280 	 */
15281 	v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15282 	if (v_idx < 0) {
15283 		err = v_idx;
15284 		goto err_switch_setup;
15285 	}
15286 	pf->lan_vsi = v_idx;
15287 	vsi = pf->vsi[v_idx];
15288 	if (!vsi) {
15289 		err = -EFAULT;
15290 		goto err_switch_setup;
15291 	}
15292 	vsi->alloc_queue_pairs = 1;
15293 	err = i40e_config_netdev(vsi);
15294 	if (err)
15295 		goto err_switch_setup;
15296 	err = register_netdev(vsi->netdev);
15297 	if (err)
15298 		goto err_switch_setup;
15299 	vsi->netdev_registered = true;
15300 	i40e_dbg_pf_init(pf);
15301 
15302 	err = i40e_setup_misc_vector_for_recovery_mode(pf);
15303 	if (err)
15304 		goto err_switch_setup;
15305 
15306 	/* tell the firmware that we're starting */
15307 	i40e_send_version(pf);
15308 
15309 	/* since everything's happy, start the service_task timer */
15310 	mod_timer(&pf->service_timer,
15311 		  round_jiffies(jiffies + pf->service_timer_period));
15312 
15313 	return 0;
15314 
15315 err_switch_setup:
15316 	i40e_reset_interrupt_capability(pf);
15317 	del_timer_sync(&pf->service_timer);
15318 	i40e_shutdown_adminq(hw);
15319 	iounmap(hw->hw_addr);
15320 	pci_disable_pcie_error_reporting(pf->pdev);
15321 	pci_release_mem_regions(pf->pdev);
15322 	pci_disable_device(pf->pdev);
15323 	kfree(pf);
15324 
15325 	return err;
15326 }
15327 
15328 /**
15329  * i40e_set_subsystem_device_id - set subsystem device id
15330  * @hw: pointer to the hardware info
15331  *
15332  * Set PCI subsystem device id either from a pci_dev structure or
15333  * a specific FW register.
15334  **/
15335 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15336 {
15337 	struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15338 
15339 	hw->subsystem_device_id = pdev->subsystem_device ?
15340 		pdev->subsystem_device :
15341 		(ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15342 }
15343 
15344 /**
15345  * i40e_probe - Device initialization routine
15346  * @pdev: PCI device information struct
15347  * @ent: entry in i40e_pci_tbl
15348  *
15349  * i40e_probe initializes a PF identified by a pci_dev structure.
15350  * The OS initialization, configuring of the PF private structure,
15351  * and a hardware reset occur.
15352  *
15353  * Returns 0 on success, negative on failure
15354  **/
15355 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15356 {
15357 	struct i40e_aq_get_phy_abilities_resp abilities;
15358 #ifdef CONFIG_I40E_DCB
15359 	enum i40e_get_fw_lldp_status_resp lldp_status;
15360 	i40e_status status;
15361 #endif /* CONFIG_I40E_DCB */
15362 	struct i40e_pf *pf;
15363 	struct i40e_hw *hw;
15364 	static u16 pfs_found;
15365 	u16 wol_nvm_bits;
15366 	u16 link_status;
15367 	int err;
15368 	u32 val;
15369 	u32 i;
15370 
15371 	err = pci_enable_device_mem(pdev);
15372 	if (err)
15373 		return err;
15374 
15375 	/* set up for high or low dma */
15376 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15377 	if (err) {
15378 		dev_err(&pdev->dev,
15379 			"DMA configuration failed: 0x%x\n", err);
15380 		goto err_dma;
15381 	}
15382 
15383 	/* set up pci connections */
15384 	err = pci_request_mem_regions(pdev, i40e_driver_name);
15385 	if (err) {
15386 		dev_info(&pdev->dev,
15387 			 "pci_request_selected_regions failed %d\n", err);
15388 		goto err_pci_reg;
15389 	}
15390 
15391 	pci_enable_pcie_error_reporting(pdev);
15392 	pci_set_master(pdev);
15393 
15394 	/* Now that we have a PCI connection, we need to do the
15395 	 * low level device setup.  This is primarily setting up
15396 	 * the Admin Queue structures and then querying for the
15397 	 * device's current profile information.
15398 	 */
15399 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15400 	if (!pf) {
15401 		err = -ENOMEM;
15402 		goto err_pf_alloc;
15403 	}
15404 	pf->next_vsi = 0;
15405 	pf->pdev = pdev;
15406 	set_bit(__I40E_DOWN, pf->state);
15407 
15408 	hw = &pf->hw;
15409 	hw->back = pf;
15410 
15411 	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15412 				I40E_MAX_CSR_SPACE);
15413 	/* We believe that the highest register to read is
15414 	 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15415 	 * is not less than that before mapping to prevent a
15416 	 * kernel panic.
15417 	 */
15418 	if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15419 		dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15420 			pf->ioremap_len);
15421 		err = -ENOMEM;
15422 		goto err_ioremap;
15423 	}
15424 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15425 	if (!hw->hw_addr) {
15426 		err = -EIO;
15427 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15428 			 (unsigned int)pci_resource_start(pdev, 0),
15429 			 pf->ioremap_len, err);
15430 		goto err_ioremap;
15431 	}
15432 	hw->vendor_id = pdev->vendor;
15433 	hw->device_id = pdev->device;
15434 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15435 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
15436 	i40e_set_subsystem_device_id(hw);
15437 	hw->bus.device = PCI_SLOT(pdev->devfn);
15438 	hw->bus.func = PCI_FUNC(pdev->devfn);
15439 	hw->bus.bus_id = pdev->bus->number;
15440 	pf->instance = pfs_found;
15441 
15442 	/* Select something other than the 802.1ad ethertype for the
15443 	 * switch to use internally and drop on ingress.
15444 	 */
15445 	hw->switch_tag = 0xffff;
15446 	hw->first_tag = ETH_P_8021AD;
15447 	hw->second_tag = ETH_P_8021Q;
15448 
15449 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15450 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15451 	INIT_LIST_HEAD(&pf->ddp_old_prof);
15452 
15453 	/* set up the locks for the AQ, do this only once in probe
15454 	 * and destroy them only once in remove
15455 	 */
15456 	mutex_init(&hw->aq.asq_mutex);
15457 	mutex_init(&hw->aq.arq_mutex);
15458 
15459 	pf->msg_enable = netif_msg_init(debug,
15460 					NETIF_MSG_DRV |
15461 					NETIF_MSG_PROBE |
15462 					NETIF_MSG_LINK);
15463 	if (debug < -1)
15464 		pf->hw.debug_mask = debug;
15465 
15466 	/* do a special CORER for clearing PXE mode once at init */
15467 	if (hw->revision_id == 0 &&
15468 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15469 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15470 		i40e_flush(hw);
15471 		msleep(200);
15472 		pf->corer_count++;
15473 
15474 		i40e_clear_pxe_mode(hw);
15475 	}
15476 
15477 	/* Reset here to make sure all is clean and to define PF 'n' */
15478 	i40e_clear_hw(hw);
15479 
15480 	err = i40e_set_mac_type(hw);
15481 	if (err) {
15482 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15483 			 err);
15484 		goto err_pf_reset;
15485 	}
15486 
15487 	err = i40e_handle_resets(pf);
15488 	if (err)
15489 		goto err_pf_reset;
15490 
15491 	i40e_check_recovery_mode(pf);
15492 
15493 	if (is_kdump_kernel()) {
15494 		hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15495 		hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15496 	} else {
15497 		hw->aq.num_arq_entries = I40E_AQ_LEN;
15498 		hw->aq.num_asq_entries = I40E_AQ_LEN;
15499 	}
15500 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15501 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15502 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15503 
15504 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15505 		 "%s-%s:misc",
15506 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15507 
15508 	err = i40e_init_shared_code(hw);
15509 	if (err) {
15510 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15511 			 err);
15512 		goto err_pf_reset;
15513 	}
15514 
15515 	/* set up a default setting for link flow control */
15516 	pf->hw.fc.requested_mode = I40E_FC_NONE;
15517 
15518 	err = i40e_init_adminq(hw);
15519 	if (err) {
15520 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
15521 			dev_info(&pdev->dev,
15522 				 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15523 				 hw->aq.api_maj_ver,
15524 				 hw->aq.api_min_ver,
15525 				 I40E_FW_API_VERSION_MAJOR,
15526 				 I40E_FW_MINOR_VERSION(hw));
15527 		else
15528 			dev_info(&pdev->dev,
15529 				 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15530 
15531 		goto err_pf_reset;
15532 	}
15533 	i40e_get_oem_version(hw);
15534 
15535 	/* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15536 	dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15537 		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15538 		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15539 		 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15540 		 hw->subsystem_vendor_id, hw->subsystem_device_id);
15541 
15542 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15543 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15544 		dev_dbg(&pdev->dev,
15545 			"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15546 			 hw->aq.api_maj_ver,
15547 			 hw->aq.api_min_ver,
15548 			 I40E_FW_API_VERSION_MAJOR,
15549 			 I40E_FW_MINOR_VERSION(hw));
15550 	else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15551 		dev_info(&pdev->dev,
15552 			 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15553 			 hw->aq.api_maj_ver,
15554 			 hw->aq.api_min_ver,
15555 			 I40E_FW_API_VERSION_MAJOR,
15556 			 I40E_FW_MINOR_VERSION(hw));
15557 
15558 	i40e_verify_eeprom(pf);
15559 
15560 	/* Rev 0 hardware was never productized */
15561 	if (hw->revision_id < 1)
15562 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15563 
15564 	i40e_clear_pxe_mode(hw);
15565 
15566 	err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15567 	if (err)
15568 		goto err_adminq_setup;
15569 
15570 	err = i40e_sw_init(pf);
15571 	if (err) {
15572 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15573 		goto err_sw_init;
15574 	}
15575 
15576 	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15577 		return i40e_init_recovery_mode(pf, hw);
15578 
15579 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15580 				hw->func_caps.num_rx_qp, 0, 0);
15581 	if (err) {
15582 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15583 		goto err_init_lan_hmc;
15584 	}
15585 
15586 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15587 	if (err) {
15588 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15589 		err = -ENOENT;
15590 		goto err_configure_lan_hmc;
15591 	}
15592 
15593 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
15594 	 * Ignore error return codes because if it was already disabled via
15595 	 * hardware settings this will fail
15596 	 */
15597 	if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15598 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15599 		i40e_aq_stop_lldp(hw, true, false, NULL);
15600 	}
15601 
15602 	/* allow a platform config to override the HW addr */
15603 	i40e_get_platform_mac_addr(pdev, pf);
15604 
15605 	if (!is_valid_ether_addr(hw->mac.addr)) {
15606 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15607 		err = -EIO;
15608 		goto err_mac_addr;
15609 	}
15610 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15611 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15612 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15613 	if (is_valid_ether_addr(hw->mac.port_addr))
15614 		pf->hw_features |= I40E_HW_PORT_ID_VALID;
15615 
15616 	i40e_ptp_alloc_pins(pf);
15617 	pci_set_drvdata(pdev, pf);
15618 	pci_save_state(pdev);
15619 
15620 #ifdef CONFIG_I40E_DCB
15621 	status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15622 	(!status &&
15623 	 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15624 		(pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15625 		(pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15626 	dev_info(&pdev->dev,
15627 		 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15628 			"FW LLDP is disabled\n" :
15629 			"FW LLDP is enabled\n");
15630 
15631 	/* Enable FW to write default DCB config on link-up */
15632 	i40e_aq_set_dcb_parameters(hw, true, NULL);
15633 
15634 	err = i40e_init_pf_dcb(pf);
15635 	if (err) {
15636 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15637 		pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15638 		/* Continue without DCB enabled */
15639 	}
15640 #endif /* CONFIG_I40E_DCB */
15641 
15642 	/* set up periodic task facility */
15643 	timer_setup(&pf->service_timer, i40e_service_timer, 0);
15644 	pf->service_timer_period = HZ;
15645 
15646 	INIT_WORK(&pf->service_task, i40e_service_task);
15647 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
15648 
15649 	/* NVM bit on means WoL disabled for the port */
15650 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15651 	if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15652 		pf->wol_en = false;
15653 	else
15654 		pf->wol_en = true;
15655 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15656 
15657 	/* set up the main switch operations */
15658 	i40e_determine_queue_usage(pf);
15659 	err = i40e_init_interrupt_scheme(pf);
15660 	if (err)
15661 		goto err_switch_setup;
15662 
15663 	/* Reduce Tx and Rx pairs for kdump
15664 	 * When MSI-X is enabled, it's not allowed to use more TC queue
15665 	 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15666 	 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15667 	 */
15668 	if (is_kdump_kernel())
15669 		pf->num_lan_msix = 1;
15670 
15671 	pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15672 	pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15673 	pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15674 	pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15675 	pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15676 	pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15677 						    UDP_TUNNEL_TYPE_GENEVE;
15678 
15679 	/* The number of VSIs reported by the FW is the minimum guaranteed
15680 	 * to us; HW supports far more and we share the remaining pool with
15681 	 * the other PFs. We allocate space for more than the guarantee with
15682 	 * the understanding that we might not get them all later.
15683 	 */
15684 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15685 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15686 	else
15687 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15688 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15689 		dev_warn(&pf->pdev->dev,
15690 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15691 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15692 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15693 	}
15694 
15695 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15696 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15697 			  GFP_KERNEL);
15698 	if (!pf->vsi) {
15699 		err = -ENOMEM;
15700 		goto err_switch_setup;
15701 	}
15702 
15703 #ifdef CONFIG_PCI_IOV
15704 	/* prep for VF support */
15705 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15706 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15707 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15708 		if (pci_num_vf(pdev))
15709 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15710 	}
15711 #endif
15712 	err = i40e_setup_pf_switch(pf, false, false);
15713 	if (err) {
15714 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15715 		goto err_vsis;
15716 	}
15717 	INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15718 
15719 	/* if FDIR VSI was set up, start it now */
15720 	for (i = 0; i < pf->num_alloc_vsi; i++) {
15721 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15722 			i40e_vsi_open(pf->vsi[i]);
15723 			break;
15724 		}
15725 	}
15726 
15727 	/* The driver only wants link up/down and module qualification
15728 	 * reports from firmware.  Note the negative logic.
15729 	 */
15730 	err = i40e_aq_set_phy_int_mask(&pf->hw,
15731 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
15732 					 I40E_AQ_EVENT_MEDIA_NA |
15733 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15734 	if (err)
15735 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15736 			 i40e_stat_str(&pf->hw, err),
15737 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15738 
15739 	/* Reconfigure hardware for allowing smaller MSS in the case
15740 	 * of TSO, so that we avoid the MDD being fired and causing
15741 	 * a reset in the case of small MSS+TSO.
15742 	 */
15743 	val = rd32(hw, I40E_REG_MSS);
15744 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15745 		val &= ~I40E_REG_MSS_MIN_MASK;
15746 		val |= I40E_64BYTE_MSS;
15747 		wr32(hw, I40E_REG_MSS, val);
15748 	}
15749 
15750 	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15751 		msleep(75);
15752 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15753 		if (err)
15754 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15755 				 i40e_stat_str(&pf->hw, err),
15756 				 i40e_aq_str(&pf->hw,
15757 					     pf->hw.aq.asq_last_status));
15758 	}
15759 	/* The main driver is (mostly) up and happy. We need to set this state
15760 	 * before setting up the misc vector or we get a race and the vector
15761 	 * ends up disabled forever.
15762 	 */
15763 	clear_bit(__I40E_DOWN, pf->state);
15764 
15765 	/* In case of MSIX we are going to setup the misc vector right here
15766 	 * to handle admin queue events etc. In case of legacy and MSI
15767 	 * the misc functionality and queue processing is combined in
15768 	 * the same vector and that gets setup at open.
15769 	 */
15770 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15771 		err = i40e_setup_misc_vector(pf);
15772 		if (err) {
15773 			dev_info(&pdev->dev,
15774 				 "setup of misc vector failed: %d\n", err);
15775 			i40e_cloud_filter_exit(pf);
15776 			i40e_fdir_teardown(pf);
15777 			goto err_vsis;
15778 		}
15779 	}
15780 
15781 #ifdef CONFIG_PCI_IOV
15782 	/* prep for VF support */
15783 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15784 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15785 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15786 		/* disable link interrupts for VFs */
15787 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15788 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15789 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15790 		i40e_flush(hw);
15791 
15792 		if (pci_num_vf(pdev)) {
15793 			dev_info(&pdev->dev,
15794 				 "Active VFs found, allocating resources.\n");
15795 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15796 			if (err)
15797 				dev_info(&pdev->dev,
15798 					 "Error %d allocating resources for existing VFs\n",
15799 					 err);
15800 		}
15801 	}
15802 #endif /* CONFIG_PCI_IOV */
15803 
15804 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15805 		pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15806 						      pf->num_iwarp_msix,
15807 						      I40E_IWARP_IRQ_PILE_ID);
15808 		if (pf->iwarp_base_vector < 0) {
15809 			dev_info(&pdev->dev,
15810 				 "failed to get tracking for %d vectors for IWARP err=%d\n",
15811 				 pf->num_iwarp_msix, pf->iwarp_base_vector);
15812 			pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15813 		}
15814 	}
15815 
15816 	i40e_dbg_pf_init(pf);
15817 
15818 	/* tell the firmware that we're starting */
15819 	i40e_send_version(pf);
15820 
15821 	/* since everything's happy, start the service_task timer */
15822 	mod_timer(&pf->service_timer,
15823 		  round_jiffies(jiffies + pf->service_timer_period));
15824 
15825 	/* add this PF to client device list and launch a client service task */
15826 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15827 		err = i40e_lan_add_device(pf);
15828 		if (err)
15829 			dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15830 				 err);
15831 	}
15832 
15833 #define PCI_SPEED_SIZE 8
15834 #define PCI_WIDTH_SIZE 8
15835 	/* Devices on the IOSF bus do not have this information
15836 	 * and will report PCI Gen 1 x 1 by default so don't bother
15837 	 * checking them.
15838 	 */
15839 	if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15840 		char speed[PCI_SPEED_SIZE] = "Unknown";
15841 		char width[PCI_WIDTH_SIZE] = "Unknown";
15842 
15843 		/* Get the negotiated link width and speed from PCI config
15844 		 * space
15845 		 */
15846 		pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15847 					  &link_status);
15848 
15849 		i40e_set_pci_config_data(hw, link_status);
15850 
15851 		switch (hw->bus.speed) {
15852 		case i40e_bus_speed_8000:
15853 			strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15854 		case i40e_bus_speed_5000:
15855 			strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15856 		case i40e_bus_speed_2500:
15857 			strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15858 		default:
15859 			break;
15860 		}
15861 		switch (hw->bus.width) {
15862 		case i40e_bus_width_pcie_x8:
15863 			strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15864 		case i40e_bus_width_pcie_x4:
15865 			strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15866 		case i40e_bus_width_pcie_x2:
15867 			strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15868 		case i40e_bus_width_pcie_x1:
15869 			strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15870 		default:
15871 			break;
15872 		}
15873 
15874 		dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15875 			 speed, width);
15876 
15877 		if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15878 		    hw->bus.speed < i40e_bus_speed_8000) {
15879 			dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15880 			dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15881 		}
15882 	}
15883 
15884 	/* get the requested speeds from the fw */
15885 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15886 	if (err)
15887 		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
15888 			i40e_stat_str(&pf->hw, err),
15889 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15890 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15891 
15892 	/* set the FEC config due to the board capabilities */
15893 	i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15894 
15895 	/* get the supported phy types from the fw */
15896 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15897 	if (err)
15898 		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
15899 			i40e_stat_str(&pf->hw, err),
15900 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15901 
15902 	/* make sure the MFS hasn't been set lower than the default */
15903 #define MAX_FRAME_SIZE_DEFAULT 0x2600
15904 	val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15905 	       I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15906 	if (val < MAX_FRAME_SIZE_DEFAULT)
15907 		dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15908 			 i, val);
15909 
15910 	/* Add a filter to drop all Flow control frames from any VSI from being
15911 	 * transmitted. By doing so we stop a malicious VF from sending out
15912 	 * PAUSE or PFC frames and potentially controlling traffic for other
15913 	 * PF/VF VSIs.
15914 	 * The FW can still send Flow control frames if enabled.
15915 	 */
15916 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15917 						       pf->main_vsi_seid);
15918 
15919 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15920 		(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15921 		pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15922 	if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15923 		pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15924 	/* print a string summarizing features */
15925 	i40e_print_features(pf);
15926 
15927 	return 0;
15928 
15929 	/* Unwind what we've done if something failed in the setup */
15930 err_vsis:
15931 	set_bit(__I40E_DOWN, pf->state);
15932 	i40e_clear_interrupt_scheme(pf);
15933 	kfree(pf->vsi);
15934 err_switch_setup:
15935 	i40e_reset_interrupt_capability(pf);
15936 	del_timer_sync(&pf->service_timer);
15937 err_mac_addr:
15938 err_configure_lan_hmc:
15939 	(void)i40e_shutdown_lan_hmc(hw);
15940 err_init_lan_hmc:
15941 	kfree(pf->qp_pile);
15942 err_sw_init:
15943 err_adminq_setup:
15944 err_pf_reset:
15945 	iounmap(hw->hw_addr);
15946 err_ioremap:
15947 	kfree(pf);
15948 err_pf_alloc:
15949 	pci_disable_pcie_error_reporting(pdev);
15950 	pci_release_mem_regions(pdev);
15951 err_pci_reg:
15952 err_dma:
15953 	pci_disable_device(pdev);
15954 	return err;
15955 }
15956 
15957 /**
15958  * i40e_remove - Device removal routine
15959  * @pdev: PCI device information struct
15960  *
15961  * i40e_remove is called by the PCI subsystem to alert the driver
15962  * that is should release a PCI device.  This could be caused by a
15963  * Hot-Plug event, or because the driver is going to be removed from
15964  * memory.
15965  **/
15966 static void i40e_remove(struct pci_dev *pdev)
15967 {
15968 	struct i40e_pf *pf = pci_get_drvdata(pdev);
15969 	struct i40e_hw *hw = &pf->hw;
15970 	i40e_status ret_code;
15971 	int i;
15972 
15973 	i40e_dbg_pf_exit(pf);
15974 
15975 	i40e_ptp_stop(pf);
15976 
15977 	/* Disable RSS in hw */
15978 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15979 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15980 
15981 	/* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
15982 	 * flags, once they are set, i40e_rebuild should not be called as
15983 	 * i40e_prep_for_reset always returns early.
15984 	 */
15985 	while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15986 		usleep_range(1000, 2000);
15987 	set_bit(__I40E_IN_REMOVE, pf->state);
15988 
15989 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15990 		set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15991 		i40e_free_vfs(pf);
15992 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15993 	}
15994 	/* no more scheduling of any task */
15995 	set_bit(__I40E_SUSPENDED, pf->state);
15996 	set_bit(__I40E_DOWN, pf->state);
15997 	if (pf->service_timer.function)
15998 		del_timer_sync(&pf->service_timer);
15999 	if (pf->service_task.func)
16000 		cancel_work_sync(&pf->service_task);
16001 
16002 	if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16003 		struct i40e_vsi *vsi = pf->vsi[0];
16004 
16005 		/* We know that we have allocated only one vsi for this PF,
16006 		 * it was just for registering netdevice, so the interface
16007 		 * could be visible in the 'ifconfig' output
16008 		 */
16009 		unregister_netdev(vsi->netdev);
16010 		free_netdev(vsi->netdev);
16011 
16012 		goto unmap;
16013 	}
16014 
16015 	/* Client close must be called explicitly here because the timer
16016 	 * has been stopped.
16017 	 */
16018 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16019 
16020 	i40e_fdir_teardown(pf);
16021 
16022 	/* If there is a switch structure or any orphans, remove them.
16023 	 * This will leave only the PF's VSI remaining.
16024 	 */
16025 	for (i = 0; i < I40E_MAX_VEB; i++) {
16026 		if (!pf->veb[i])
16027 			continue;
16028 
16029 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16030 		    pf->veb[i]->uplink_seid == 0)
16031 			i40e_switch_branch_release(pf->veb[i]);
16032 	}
16033 
16034 	/* Now we can shutdown the PF's VSI, just before we kill
16035 	 * adminq and hmc.
16036 	 */
16037 	if (pf->vsi[pf->lan_vsi])
16038 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16039 
16040 	i40e_cloud_filter_exit(pf);
16041 
16042 	/* remove attached clients */
16043 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16044 		ret_code = i40e_lan_del_device(pf);
16045 		if (ret_code)
16046 			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16047 				 ret_code);
16048 	}
16049 
16050 	/* shutdown and destroy the HMC */
16051 	if (hw->hmc.hmc_obj) {
16052 		ret_code = i40e_shutdown_lan_hmc(hw);
16053 		if (ret_code)
16054 			dev_warn(&pdev->dev,
16055 				 "Failed to destroy the HMC resources: %d\n",
16056 				 ret_code);
16057 	}
16058 
16059 unmap:
16060 	/* Free MSI/legacy interrupt 0 when in recovery mode. */
16061 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16062 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16063 		free_irq(pf->pdev->irq, pf);
16064 
16065 	/* shutdown the adminq */
16066 	i40e_shutdown_adminq(hw);
16067 
16068 	/* destroy the locks only once, here */
16069 	mutex_destroy(&hw->aq.arq_mutex);
16070 	mutex_destroy(&hw->aq.asq_mutex);
16071 
16072 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16073 	rtnl_lock();
16074 	i40e_clear_interrupt_scheme(pf);
16075 	for (i = 0; i < pf->num_alloc_vsi; i++) {
16076 		if (pf->vsi[i]) {
16077 			if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16078 				i40e_vsi_clear_rings(pf->vsi[i]);
16079 			i40e_vsi_clear(pf->vsi[i]);
16080 			pf->vsi[i] = NULL;
16081 		}
16082 	}
16083 	rtnl_unlock();
16084 
16085 	for (i = 0; i < I40E_MAX_VEB; i++) {
16086 		kfree(pf->veb[i]);
16087 		pf->veb[i] = NULL;
16088 	}
16089 
16090 	kfree(pf->qp_pile);
16091 	kfree(pf->vsi);
16092 
16093 	iounmap(hw->hw_addr);
16094 	kfree(pf);
16095 	pci_release_mem_regions(pdev);
16096 
16097 	pci_disable_pcie_error_reporting(pdev);
16098 	pci_disable_device(pdev);
16099 }
16100 
16101 /**
16102  * i40e_pci_error_detected - warning that something funky happened in PCI land
16103  * @pdev: PCI device information struct
16104  * @error: the type of PCI error
16105  *
16106  * Called to warn that something happened and the error handling steps
16107  * are in progress.  Allows the driver to quiesce things, be ready for
16108  * remediation.
16109  **/
16110 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16111 						pci_channel_state_t error)
16112 {
16113 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16114 
16115 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16116 
16117 	if (!pf) {
16118 		dev_info(&pdev->dev,
16119 			 "Cannot recover - error happened during device probe\n");
16120 		return PCI_ERS_RESULT_DISCONNECT;
16121 	}
16122 
16123 	/* shutdown all operations */
16124 	if (!test_bit(__I40E_SUSPENDED, pf->state))
16125 		i40e_prep_for_reset(pf);
16126 
16127 	/* Request a slot reset */
16128 	return PCI_ERS_RESULT_NEED_RESET;
16129 }
16130 
16131 /**
16132  * i40e_pci_error_slot_reset - a PCI slot reset just happened
16133  * @pdev: PCI device information struct
16134  *
16135  * Called to find if the driver can work with the device now that
16136  * the pci slot has been reset.  If a basic connection seems good
16137  * (registers are readable and have sane content) then return a
16138  * happy little PCI_ERS_RESULT_xxx.
16139  **/
16140 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16141 {
16142 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16143 	pci_ers_result_t result;
16144 	u32 reg;
16145 
16146 	dev_dbg(&pdev->dev, "%s\n", __func__);
16147 	if (pci_enable_device_mem(pdev)) {
16148 		dev_info(&pdev->dev,
16149 			 "Cannot re-enable PCI device after reset.\n");
16150 		result = PCI_ERS_RESULT_DISCONNECT;
16151 	} else {
16152 		pci_set_master(pdev);
16153 		pci_restore_state(pdev);
16154 		pci_save_state(pdev);
16155 		pci_wake_from_d3(pdev, false);
16156 
16157 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16158 		if (reg == 0)
16159 			result = PCI_ERS_RESULT_RECOVERED;
16160 		else
16161 			result = PCI_ERS_RESULT_DISCONNECT;
16162 	}
16163 
16164 	return result;
16165 }
16166 
16167 /**
16168  * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16169  * @pdev: PCI device information struct
16170  */
16171 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16172 {
16173 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16174 
16175 	i40e_prep_for_reset(pf);
16176 }
16177 
16178 /**
16179  * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16180  * @pdev: PCI device information struct
16181  */
16182 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16183 {
16184 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16185 
16186 	if (test_bit(__I40E_IN_REMOVE, pf->state))
16187 		return;
16188 
16189 	i40e_reset_and_rebuild(pf, false, false);
16190 }
16191 
16192 /**
16193  * i40e_pci_error_resume - restart operations after PCI error recovery
16194  * @pdev: PCI device information struct
16195  *
16196  * Called to allow the driver to bring things back up after PCI error
16197  * and/or reset recovery has finished.
16198  **/
16199 static void i40e_pci_error_resume(struct pci_dev *pdev)
16200 {
16201 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16202 
16203 	dev_dbg(&pdev->dev, "%s\n", __func__);
16204 	if (test_bit(__I40E_SUSPENDED, pf->state))
16205 		return;
16206 
16207 	i40e_handle_reset_warning(pf, false);
16208 }
16209 
16210 /**
16211  * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16212  * using the mac_address_write admin q function
16213  * @pf: pointer to i40e_pf struct
16214  **/
16215 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16216 {
16217 	struct i40e_hw *hw = &pf->hw;
16218 	i40e_status ret;
16219 	u8 mac_addr[6];
16220 	u16 flags = 0;
16221 
16222 	/* Get current MAC address in case it's an LAA */
16223 	if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16224 		ether_addr_copy(mac_addr,
16225 				pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16226 	} else {
16227 		dev_err(&pf->pdev->dev,
16228 			"Failed to retrieve MAC address; using default\n");
16229 		ether_addr_copy(mac_addr, hw->mac.addr);
16230 	}
16231 
16232 	/* The FW expects the mac address write cmd to first be called with
16233 	 * one of these flags before calling it again with the multicast
16234 	 * enable flags.
16235 	 */
16236 	flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16237 
16238 	if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16239 		flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16240 
16241 	ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16242 	if (ret) {
16243 		dev_err(&pf->pdev->dev,
16244 			"Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16245 		return;
16246 	}
16247 
16248 	flags = I40E_AQC_MC_MAG_EN
16249 			| I40E_AQC_WOL_PRESERVE_ON_PFR
16250 			| I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16251 	ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16252 	if (ret)
16253 		dev_err(&pf->pdev->dev,
16254 			"Failed to enable Multicast Magic Packet wake up\n");
16255 }
16256 
16257 /**
16258  * i40e_shutdown - PCI callback for shutting down
16259  * @pdev: PCI device information struct
16260  **/
16261 static void i40e_shutdown(struct pci_dev *pdev)
16262 {
16263 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16264 	struct i40e_hw *hw = &pf->hw;
16265 
16266 	set_bit(__I40E_SUSPENDED, pf->state);
16267 	set_bit(__I40E_DOWN, pf->state);
16268 
16269 	del_timer_sync(&pf->service_timer);
16270 	cancel_work_sync(&pf->service_task);
16271 	i40e_cloud_filter_exit(pf);
16272 	i40e_fdir_teardown(pf);
16273 
16274 	/* Client close must be called explicitly here because the timer
16275 	 * has been stopped.
16276 	 */
16277 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16278 
16279 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16280 		i40e_enable_mc_magic_wake(pf);
16281 
16282 	i40e_prep_for_reset(pf);
16283 
16284 	wr32(hw, I40E_PFPM_APM,
16285 	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16286 	wr32(hw, I40E_PFPM_WUFC,
16287 	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16288 
16289 	/* Free MSI/legacy interrupt 0 when in recovery mode. */
16290 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16291 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16292 		free_irq(pf->pdev->irq, pf);
16293 
16294 	/* Since we're going to destroy queues during the
16295 	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16296 	 * whole section
16297 	 */
16298 	rtnl_lock();
16299 	i40e_clear_interrupt_scheme(pf);
16300 	rtnl_unlock();
16301 
16302 	if (system_state == SYSTEM_POWER_OFF) {
16303 		pci_wake_from_d3(pdev, pf->wol_en);
16304 		pci_set_power_state(pdev, PCI_D3hot);
16305 	}
16306 }
16307 
16308 /**
16309  * i40e_suspend - PM callback for moving to D3
16310  * @dev: generic device information structure
16311  **/
16312 static int __maybe_unused i40e_suspend(struct device *dev)
16313 {
16314 	struct i40e_pf *pf = dev_get_drvdata(dev);
16315 	struct i40e_hw *hw = &pf->hw;
16316 
16317 	/* If we're already suspended, then there is nothing to do */
16318 	if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16319 		return 0;
16320 
16321 	set_bit(__I40E_DOWN, pf->state);
16322 
16323 	/* Ensure service task will not be running */
16324 	del_timer_sync(&pf->service_timer);
16325 	cancel_work_sync(&pf->service_task);
16326 
16327 	/* Client close must be called explicitly here because the timer
16328 	 * has been stopped.
16329 	 */
16330 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16331 
16332 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16333 		i40e_enable_mc_magic_wake(pf);
16334 
16335 	/* Since we're going to destroy queues during the
16336 	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16337 	 * whole section
16338 	 */
16339 	rtnl_lock();
16340 
16341 	i40e_prep_for_reset(pf);
16342 
16343 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16344 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16345 
16346 	/* Clear the interrupt scheme and release our IRQs so that the system
16347 	 * can safely hibernate even when there are a large number of CPUs.
16348 	 * Otherwise hibernation might fail when mapping all the vectors back
16349 	 * to CPU0.
16350 	 */
16351 	i40e_clear_interrupt_scheme(pf);
16352 
16353 	rtnl_unlock();
16354 
16355 	return 0;
16356 }
16357 
16358 /**
16359  * i40e_resume - PM callback for waking up from D3
16360  * @dev: generic device information structure
16361  **/
16362 static int __maybe_unused i40e_resume(struct device *dev)
16363 {
16364 	struct i40e_pf *pf = dev_get_drvdata(dev);
16365 	int err;
16366 
16367 	/* If we're not suspended, then there is nothing to do */
16368 	if (!test_bit(__I40E_SUSPENDED, pf->state))
16369 		return 0;
16370 
16371 	/* We need to hold the RTNL lock prior to restoring interrupt schemes,
16372 	 * since we're going to be restoring queues
16373 	 */
16374 	rtnl_lock();
16375 
16376 	/* We cleared the interrupt scheme when we suspended, so we need to
16377 	 * restore it now to resume device functionality.
16378 	 */
16379 	err = i40e_restore_interrupt_scheme(pf);
16380 	if (err) {
16381 		dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16382 			err);
16383 	}
16384 
16385 	clear_bit(__I40E_DOWN, pf->state);
16386 	i40e_reset_and_rebuild(pf, false, true);
16387 
16388 	rtnl_unlock();
16389 
16390 	/* Clear suspended state last after everything is recovered */
16391 	clear_bit(__I40E_SUSPENDED, pf->state);
16392 
16393 	/* Restart the service task */
16394 	mod_timer(&pf->service_timer,
16395 		  round_jiffies(jiffies + pf->service_timer_period));
16396 
16397 	return 0;
16398 }
16399 
16400 static const struct pci_error_handlers i40e_err_handler = {
16401 	.error_detected = i40e_pci_error_detected,
16402 	.slot_reset = i40e_pci_error_slot_reset,
16403 	.reset_prepare = i40e_pci_error_reset_prepare,
16404 	.reset_done = i40e_pci_error_reset_done,
16405 	.resume = i40e_pci_error_resume,
16406 };
16407 
16408 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16409 
16410 static struct pci_driver i40e_driver = {
16411 	.name     = i40e_driver_name,
16412 	.id_table = i40e_pci_tbl,
16413 	.probe    = i40e_probe,
16414 	.remove   = i40e_remove,
16415 	.driver   = {
16416 		.pm = &i40e_pm_ops,
16417 	},
16418 	.shutdown = i40e_shutdown,
16419 	.err_handler = &i40e_err_handler,
16420 	.sriov_configure = i40e_pci_sriov_configure,
16421 };
16422 
16423 /**
16424  * i40e_init_module - Driver registration routine
16425  *
16426  * i40e_init_module is the first routine called when the driver is
16427  * loaded. All it does is register with the PCI subsystem.
16428  **/
16429 static int __init i40e_init_module(void)
16430 {
16431 	pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16432 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16433 
16434 	/* There is no need to throttle the number of active tasks because
16435 	 * each device limits its own task using a state bit for scheduling
16436 	 * the service task, and the device tasks do not interfere with each
16437 	 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16438 	 * since we need to be able to guarantee forward progress even under
16439 	 * memory pressure.
16440 	 */
16441 	i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16442 	if (!i40e_wq) {
16443 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16444 		return -ENOMEM;
16445 	}
16446 
16447 	i40e_dbg_init();
16448 	return pci_register_driver(&i40e_driver);
16449 }
16450 module_init(i40e_init_module);
16451 
16452 /**
16453  * i40e_exit_module - Driver exit cleanup routine
16454  *
16455  * i40e_exit_module is called just before the driver is removed
16456  * from memory.
16457  **/
16458 static void __exit i40e_exit_module(void)
16459 {
16460 	pci_unregister_driver(&i40e_driver);
16461 	destroy_workqueue(i40e_wq);
16462 	ida_destroy(&i40e_client_ida);
16463 	i40e_dbg_exit();
16464 }
16465 module_exit(i40e_exit_module);
16466