1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 
25 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
26 static const char ice_driver_string[] = DRV_SUMMARY;
27 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
28 
29 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
30 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
31 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
32 
33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34 MODULE_DESCRIPTION(DRV_SUMMARY);
35 MODULE_LICENSE("GPL v2");
36 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
37 
38 static int debug = -1;
39 module_param(debug, int, 0644);
40 #ifndef CONFIG_DYNAMIC_DEBUG
41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
42 #else
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
44 #endif /* !CONFIG_DYNAMIC_DEBUG */
45 
46 static DEFINE_IDA(ice_aux_ida);
47 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
48 EXPORT_SYMBOL(ice_xdp_locking_key);
49 
50 static struct workqueue_struct *ice_wq;
51 static const struct net_device_ops ice_netdev_safe_mode_ops;
52 static const struct net_device_ops ice_netdev_ops;
53 
54 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
55 
56 static void ice_vsi_release_all(struct ice_pf *pf);
57 
58 bool netif_is_ice(struct net_device *dev)
59 {
60 	return dev && (dev->netdev_ops == &ice_netdev_ops);
61 }
62 
63 /**
64  * ice_get_tx_pending - returns number of Tx descriptors not processed
65  * @ring: the ring of descriptors
66  */
67 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
68 {
69 	u16 head, tail;
70 
71 	head = ring->next_to_clean;
72 	tail = ring->next_to_use;
73 
74 	if (head != tail)
75 		return (head < tail) ?
76 			tail - head : (tail + ring->count - head);
77 	return 0;
78 }
79 
80 /**
81  * ice_check_for_hang_subtask - check for and recover hung queues
82  * @pf: pointer to PF struct
83  */
84 static void ice_check_for_hang_subtask(struct ice_pf *pf)
85 {
86 	struct ice_vsi *vsi = NULL;
87 	struct ice_hw *hw;
88 	unsigned int i;
89 	int packets;
90 	u32 v;
91 
92 	ice_for_each_vsi(pf, v)
93 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
94 			vsi = pf->vsi[v];
95 			break;
96 		}
97 
98 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
99 		return;
100 
101 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
102 		return;
103 
104 	hw = &vsi->back->hw;
105 
106 	ice_for_each_txq(vsi, i) {
107 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
108 
109 		if (tx_ring && tx_ring->desc) {
110 			/* If packet counter has not changed the queue is
111 			 * likely stalled, so force an interrupt for this
112 			 * queue.
113 			 *
114 			 * prev_pkt would be negative if there was no
115 			 * pending work.
116 			 */
117 			packets = tx_ring->stats.pkts & INT_MAX;
118 			if (tx_ring->tx_stats.prev_pkt == packets) {
119 				/* Trigger sw interrupt to revive the queue */
120 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
121 				continue;
122 			}
123 
124 			/* Memory barrier between read of packet count and call
125 			 * to ice_get_tx_pending()
126 			 */
127 			smp_rmb();
128 			tx_ring->tx_stats.prev_pkt =
129 			    ice_get_tx_pending(tx_ring) ? packets : -1;
130 		}
131 	}
132 }
133 
134 /**
135  * ice_init_mac_fltr - Set initial MAC filters
136  * @pf: board private structure
137  *
138  * Set initial set of MAC filters for PF VSI; configure filters for permanent
139  * address and broadcast address. If an error is encountered, netdevice will be
140  * unregistered.
141  */
142 static int ice_init_mac_fltr(struct ice_pf *pf)
143 {
144 	enum ice_status status;
145 	struct ice_vsi *vsi;
146 	u8 *perm_addr;
147 
148 	vsi = ice_get_main_vsi(pf);
149 	if (!vsi)
150 		return -EINVAL;
151 
152 	perm_addr = vsi->port_info->mac.perm_addr;
153 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
154 	if (status)
155 		return -EIO;
156 
157 	return 0;
158 }
159 
160 /**
161  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
162  * @netdev: the net device on which the sync is happening
163  * @addr: MAC address to sync
164  *
165  * This is a callback function which is called by the in kernel device sync
166  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
167  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
168  * MAC filters from the hardware.
169  */
170 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
171 {
172 	struct ice_netdev_priv *np = netdev_priv(netdev);
173 	struct ice_vsi *vsi = np->vsi;
174 
175 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
176 				     ICE_FWD_TO_VSI))
177 		return -EINVAL;
178 
179 	return 0;
180 }
181 
182 /**
183  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
184  * @netdev: the net device on which the unsync is happening
185  * @addr: MAC address to unsync
186  *
187  * This is a callback function which is called by the in kernel device unsync
188  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
189  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
190  * delete the MAC filters from the hardware.
191  */
192 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
193 {
194 	struct ice_netdev_priv *np = netdev_priv(netdev);
195 	struct ice_vsi *vsi = np->vsi;
196 
197 	/* Under some circumstances, we might receive a request to delete our
198 	 * own device address from our uc list. Because we store the device
199 	 * address in the VSI's MAC filter list, we need to ignore such
200 	 * requests and not delete our device address from this list.
201 	 */
202 	if (ether_addr_equal(addr, netdev->dev_addr))
203 		return 0;
204 
205 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
206 				     ICE_FWD_TO_VSI))
207 		return -EINVAL;
208 
209 	return 0;
210 }
211 
212 /**
213  * ice_vsi_fltr_changed - check if filter state changed
214  * @vsi: VSI to be checked
215  *
216  * returns true if filter state has changed, false otherwise.
217  */
218 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
219 {
220 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
221 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
222 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
223 }
224 
225 /**
226  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
227  * @vsi: the VSI being configured
228  * @promisc_m: mask of promiscuous config bits
229  * @set_promisc: enable or disable promisc flag request
230  *
231  */
232 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
233 {
234 	struct ice_hw *hw = &vsi->back->hw;
235 	enum ice_status status = 0;
236 
237 	if (vsi->type != ICE_VSI_PF)
238 		return 0;
239 
240 	if (vsi->num_vlan > 1) {
241 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
242 						  set_promisc);
243 	} else {
244 		if (set_promisc)
245 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
246 						     0);
247 		else
248 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
249 						       0);
250 	}
251 
252 	if (status)
253 		return -EIO;
254 
255 	return 0;
256 }
257 
258 /**
259  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
260  * @vsi: ptr to the VSI
261  *
262  * Push any outstanding VSI filter changes through the AdminQ.
263  */
264 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
265 {
266 	struct device *dev = ice_pf_to_dev(vsi->back);
267 	struct net_device *netdev = vsi->netdev;
268 	bool promisc_forced_on = false;
269 	struct ice_pf *pf = vsi->back;
270 	struct ice_hw *hw = &pf->hw;
271 	enum ice_status status = 0;
272 	u32 changed_flags = 0;
273 	u8 promisc_m;
274 	int err = 0;
275 
276 	if (!vsi->netdev)
277 		return -EINVAL;
278 
279 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
280 		usleep_range(1000, 2000);
281 
282 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
283 	vsi->current_netdev_flags = vsi->netdev->flags;
284 
285 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
286 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
287 
288 	if (ice_vsi_fltr_changed(vsi)) {
289 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
290 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
291 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
292 
293 		/* grab the netdev's addr_list_lock */
294 		netif_addr_lock_bh(netdev);
295 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
296 			      ice_add_mac_to_unsync_list);
297 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
298 			      ice_add_mac_to_unsync_list);
299 		/* our temp lists are populated. release lock */
300 		netif_addr_unlock_bh(netdev);
301 	}
302 
303 	/* Remove MAC addresses in the unsync list */
304 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
305 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
306 	if (status) {
307 		netdev_err(netdev, "Failed to delete MAC filters\n");
308 		/* if we failed because of alloc failures, just bail */
309 		if (status == ICE_ERR_NO_MEMORY) {
310 			err = -ENOMEM;
311 			goto out;
312 		}
313 	}
314 
315 	/* Add MAC addresses in the sync list */
316 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
317 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
318 	/* If filter is added successfully or already exists, do not go into
319 	 * 'if' condition and report it as error. Instead continue processing
320 	 * rest of the function.
321 	 */
322 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
323 		netdev_err(netdev, "Failed to add MAC filters\n");
324 		/* If there is no more space for new umac filters, VSI
325 		 * should go into promiscuous mode. There should be some
326 		 * space reserved for promiscuous filters.
327 		 */
328 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
329 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
330 				      vsi->state)) {
331 			promisc_forced_on = true;
332 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
333 				    vsi->vsi_num);
334 		} else {
335 			err = -EIO;
336 			goto out;
337 		}
338 	}
339 	/* check for changes in promiscuous modes */
340 	if (changed_flags & IFF_ALLMULTI) {
341 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
342 			if (vsi->num_vlan > 1)
343 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
344 			else
345 				promisc_m = ICE_MCAST_PROMISC_BITS;
346 
347 			err = ice_cfg_promisc(vsi, promisc_m, true);
348 			if (err) {
349 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
350 					   vsi->vsi_num);
351 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
352 				goto out_promisc;
353 			}
354 		} else {
355 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
356 			if (vsi->num_vlan > 1)
357 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
358 			else
359 				promisc_m = ICE_MCAST_PROMISC_BITS;
360 
361 			err = ice_cfg_promisc(vsi, promisc_m, false);
362 			if (err) {
363 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
364 					   vsi->vsi_num);
365 				vsi->current_netdev_flags |= IFF_ALLMULTI;
366 				goto out_promisc;
367 			}
368 		}
369 	}
370 
371 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
372 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
373 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
374 		if (vsi->current_netdev_flags & IFF_PROMISC) {
375 			/* Apply Rx filter rule to get traffic from wire */
376 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
377 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
378 				if (err && err != -EEXIST) {
379 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
380 						   err, vsi->vsi_num);
381 					vsi->current_netdev_flags &=
382 						~IFF_PROMISC;
383 					goto out_promisc;
384 				}
385 				ice_cfg_vlan_pruning(vsi, false, false);
386 			}
387 		} else {
388 			/* Clear Rx filter to remove traffic from wire */
389 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
390 				err = ice_clear_dflt_vsi(pf->first_sw);
391 				if (err) {
392 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
393 						   err, vsi->vsi_num);
394 					vsi->current_netdev_flags |=
395 						IFF_PROMISC;
396 					goto out_promisc;
397 				}
398 				if (vsi->num_vlan > 1)
399 					ice_cfg_vlan_pruning(vsi, true, false);
400 			}
401 		}
402 	}
403 	goto exit;
404 
405 out_promisc:
406 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
407 	goto exit;
408 out:
409 	/* if something went wrong then set the changed flag so we try again */
410 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
411 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
412 exit:
413 	clear_bit(ICE_CFG_BUSY, vsi->state);
414 	return err;
415 }
416 
417 /**
418  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
419  * @pf: board private structure
420  */
421 static void ice_sync_fltr_subtask(struct ice_pf *pf)
422 {
423 	int v;
424 
425 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
426 		return;
427 
428 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
429 
430 	ice_for_each_vsi(pf, v)
431 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
432 		    ice_vsi_sync_fltr(pf->vsi[v])) {
433 			/* come back and try again later */
434 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
435 			break;
436 		}
437 }
438 
439 /**
440  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
441  * @pf: the PF
442  * @locked: is the rtnl_lock already held
443  */
444 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
445 {
446 	int node;
447 	int v;
448 
449 	ice_for_each_vsi(pf, v)
450 		if (pf->vsi[v])
451 			ice_dis_vsi(pf->vsi[v], locked);
452 
453 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
454 		pf->pf_agg_node[node].num_vsis = 0;
455 
456 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
457 		pf->vf_agg_node[node].num_vsis = 0;
458 }
459 
460 /**
461  * ice_prepare_for_reset - prep for the core to reset
462  * @pf: board private structure
463  *
464  * Inform or close all dependent features in prep for reset.
465  */
466 static void
467 ice_prepare_for_reset(struct ice_pf *pf)
468 {
469 	struct ice_hw *hw = &pf->hw;
470 	unsigned int i;
471 
472 	/* already prepared for reset */
473 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
474 		return;
475 
476 	ice_unplug_aux_dev(pf);
477 
478 	/* Notify VFs of impending reset */
479 	if (ice_check_sq_alive(hw, &hw->mailboxq))
480 		ice_vc_notify_reset(pf);
481 
482 	/* Disable VFs until reset is completed */
483 	ice_for_each_vf(pf, i)
484 		ice_set_vf_state_qs_dis(&pf->vf[i]);
485 
486 	/* clear SW filtering DB */
487 	ice_clear_hw_tbls(hw);
488 	/* disable the VSIs and their queues that are not already DOWN */
489 	ice_pf_dis_all_vsi(pf, false);
490 
491 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
492 		ice_ptp_release(pf);
493 
494 	if (hw->port_info)
495 		ice_sched_clear_port(hw->port_info);
496 
497 	ice_shutdown_all_ctrlq(hw);
498 
499 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
500 }
501 
502 /**
503  * ice_do_reset - Initiate one of many types of resets
504  * @pf: board private structure
505  * @reset_type: reset type requested
506  * before this function was called.
507  */
508 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
509 {
510 	struct device *dev = ice_pf_to_dev(pf);
511 	struct ice_hw *hw = &pf->hw;
512 
513 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
514 
515 	ice_prepare_for_reset(pf);
516 
517 	/* trigger the reset */
518 	if (ice_reset(hw, reset_type)) {
519 		dev_err(dev, "reset %d failed\n", reset_type);
520 		set_bit(ICE_RESET_FAILED, pf->state);
521 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
522 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
523 		clear_bit(ICE_PFR_REQ, pf->state);
524 		clear_bit(ICE_CORER_REQ, pf->state);
525 		clear_bit(ICE_GLOBR_REQ, pf->state);
526 		wake_up(&pf->reset_wait_queue);
527 		return;
528 	}
529 
530 	/* PFR is a bit of a special case because it doesn't result in an OICR
531 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
532 	 * associated state bits.
533 	 */
534 	if (reset_type == ICE_RESET_PFR) {
535 		pf->pfr_count++;
536 		ice_rebuild(pf, reset_type);
537 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
538 		clear_bit(ICE_PFR_REQ, pf->state);
539 		wake_up(&pf->reset_wait_queue);
540 		ice_reset_all_vfs(pf, true);
541 	}
542 }
543 
544 /**
545  * ice_reset_subtask - Set up for resetting the device and driver
546  * @pf: board private structure
547  */
548 static void ice_reset_subtask(struct ice_pf *pf)
549 {
550 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
551 
552 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
553 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
554 	 * of reset is pending and sets bits in pf->state indicating the reset
555 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
556 	 * prepare for pending reset if not already (for PF software-initiated
557 	 * global resets the software should already be prepared for it as
558 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
559 	 * by firmware or software on other PFs, that bit is not set so prepare
560 	 * for the reset now), poll for reset done, rebuild and return.
561 	 */
562 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
563 		/* Perform the largest reset requested */
564 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
565 			reset_type = ICE_RESET_CORER;
566 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
567 			reset_type = ICE_RESET_GLOBR;
568 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
569 			reset_type = ICE_RESET_EMPR;
570 		/* return if no valid reset type requested */
571 		if (reset_type == ICE_RESET_INVAL)
572 			return;
573 		ice_prepare_for_reset(pf);
574 
575 		/* make sure we are ready to rebuild */
576 		if (ice_check_reset(&pf->hw)) {
577 			set_bit(ICE_RESET_FAILED, pf->state);
578 		} else {
579 			/* done with reset. start rebuild */
580 			pf->hw.reset_ongoing = false;
581 			ice_rebuild(pf, reset_type);
582 			/* clear bit to resume normal operations, but
583 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
584 			 */
585 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
586 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
587 			clear_bit(ICE_PFR_REQ, pf->state);
588 			clear_bit(ICE_CORER_REQ, pf->state);
589 			clear_bit(ICE_GLOBR_REQ, pf->state);
590 			wake_up(&pf->reset_wait_queue);
591 			ice_reset_all_vfs(pf, true);
592 		}
593 
594 		return;
595 	}
596 
597 	/* No pending resets to finish processing. Check for new resets */
598 	if (test_bit(ICE_PFR_REQ, pf->state))
599 		reset_type = ICE_RESET_PFR;
600 	if (test_bit(ICE_CORER_REQ, pf->state))
601 		reset_type = ICE_RESET_CORER;
602 	if (test_bit(ICE_GLOBR_REQ, pf->state))
603 		reset_type = ICE_RESET_GLOBR;
604 	/* If no valid reset type requested just return */
605 	if (reset_type == ICE_RESET_INVAL)
606 		return;
607 
608 	/* reset if not already down or busy */
609 	if (!test_bit(ICE_DOWN, pf->state) &&
610 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
611 		ice_do_reset(pf, reset_type);
612 	}
613 }
614 
615 /**
616  * ice_print_topo_conflict - print topology conflict message
617  * @vsi: the VSI whose topology status is being checked
618  */
619 static void ice_print_topo_conflict(struct ice_vsi *vsi)
620 {
621 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
622 	case ICE_AQ_LINK_TOPO_CONFLICT:
623 	case ICE_AQ_LINK_MEDIA_CONFLICT:
624 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
625 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
626 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
627 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
628 		break;
629 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
630 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
631 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
632 		else
633 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
634 		break;
635 	default:
636 		break;
637 	}
638 }
639 
640 /**
641  * ice_print_link_msg - print link up or down message
642  * @vsi: the VSI whose link status is being queried
643  * @isup: boolean for if the link is now up or down
644  */
645 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
646 {
647 	struct ice_aqc_get_phy_caps_data *caps;
648 	const char *an_advertised;
649 	enum ice_status status;
650 	const char *fec_req;
651 	const char *speed;
652 	const char *fec;
653 	const char *fc;
654 	const char *an;
655 
656 	if (!vsi)
657 		return;
658 
659 	if (vsi->current_isup == isup)
660 		return;
661 
662 	vsi->current_isup = isup;
663 
664 	if (!isup) {
665 		netdev_info(vsi->netdev, "NIC Link is Down\n");
666 		return;
667 	}
668 
669 	switch (vsi->port_info->phy.link_info.link_speed) {
670 	case ICE_AQ_LINK_SPEED_100GB:
671 		speed = "100 G";
672 		break;
673 	case ICE_AQ_LINK_SPEED_50GB:
674 		speed = "50 G";
675 		break;
676 	case ICE_AQ_LINK_SPEED_40GB:
677 		speed = "40 G";
678 		break;
679 	case ICE_AQ_LINK_SPEED_25GB:
680 		speed = "25 G";
681 		break;
682 	case ICE_AQ_LINK_SPEED_20GB:
683 		speed = "20 G";
684 		break;
685 	case ICE_AQ_LINK_SPEED_10GB:
686 		speed = "10 G";
687 		break;
688 	case ICE_AQ_LINK_SPEED_5GB:
689 		speed = "5 G";
690 		break;
691 	case ICE_AQ_LINK_SPEED_2500MB:
692 		speed = "2.5 G";
693 		break;
694 	case ICE_AQ_LINK_SPEED_1000MB:
695 		speed = "1 G";
696 		break;
697 	case ICE_AQ_LINK_SPEED_100MB:
698 		speed = "100 M";
699 		break;
700 	default:
701 		speed = "Unknown ";
702 		break;
703 	}
704 
705 	switch (vsi->port_info->fc.current_mode) {
706 	case ICE_FC_FULL:
707 		fc = "Rx/Tx";
708 		break;
709 	case ICE_FC_TX_PAUSE:
710 		fc = "Tx";
711 		break;
712 	case ICE_FC_RX_PAUSE:
713 		fc = "Rx";
714 		break;
715 	case ICE_FC_NONE:
716 		fc = "None";
717 		break;
718 	default:
719 		fc = "Unknown";
720 		break;
721 	}
722 
723 	/* Get FEC mode based on negotiated link info */
724 	switch (vsi->port_info->phy.link_info.fec_info) {
725 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
726 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
727 		fec = "RS-FEC";
728 		break;
729 	case ICE_AQ_LINK_25G_KR_FEC_EN:
730 		fec = "FC-FEC/BASE-R";
731 		break;
732 	default:
733 		fec = "NONE";
734 		break;
735 	}
736 
737 	/* check if autoneg completed, might be false due to not supported */
738 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
739 		an = "True";
740 	else
741 		an = "False";
742 
743 	/* Get FEC mode requested based on PHY caps last SW configuration */
744 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
745 	if (!caps) {
746 		fec_req = "Unknown";
747 		an_advertised = "Unknown";
748 		goto done;
749 	}
750 
751 	status = ice_aq_get_phy_caps(vsi->port_info, false,
752 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
753 	if (status)
754 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
755 
756 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
757 
758 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
759 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
760 		fec_req = "RS-FEC";
761 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
762 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
763 		fec_req = "FC-FEC/BASE-R";
764 	else
765 		fec_req = "NONE";
766 
767 	kfree(caps);
768 
769 done:
770 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
771 		    speed, fec_req, fec, an_advertised, an, fc);
772 	ice_print_topo_conflict(vsi);
773 }
774 
775 /**
776  * ice_vsi_link_event - update the VSI's netdev
777  * @vsi: the VSI on which the link event occurred
778  * @link_up: whether or not the VSI needs to be set up or down
779  */
780 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
781 {
782 	if (!vsi)
783 		return;
784 
785 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
786 		return;
787 
788 	if (vsi->type == ICE_VSI_PF) {
789 		if (link_up == netif_carrier_ok(vsi->netdev))
790 			return;
791 
792 		if (link_up) {
793 			netif_carrier_on(vsi->netdev);
794 			netif_tx_wake_all_queues(vsi->netdev);
795 		} else {
796 			netif_carrier_off(vsi->netdev);
797 			netif_tx_stop_all_queues(vsi->netdev);
798 		}
799 	}
800 }
801 
802 /**
803  * ice_set_dflt_mib - send a default config MIB to the FW
804  * @pf: private PF struct
805  *
806  * This function sends a default configuration MIB to the FW.
807  *
808  * If this function errors out at any point, the driver is still able to
809  * function.  The main impact is that LFC may not operate as expected.
810  * Therefore an error state in this function should be treated with a DBG
811  * message and continue on with driver rebuild/reenable.
812  */
813 static void ice_set_dflt_mib(struct ice_pf *pf)
814 {
815 	struct device *dev = ice_pf_to_dev(pf);
816 	u8 mib_type, *buf, *lldpmib = NULL;
817 	u16 len, typelen, offset = 0;
818 	struct ice_lldp_org_tlv *tlv;
819 	struct ice_hw *hw = &pf->hw;
820 	u32 ouisubtype;
821 
822 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
823 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
824 	if (!lldpmib) {
825 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
826 			__func__);
827 		return;
828 	}
829 
830 	/* Add ETS CFG TLV */
831 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
832 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
833 		   ICE_IEEE_ETS_TLV_LEN);
834 	tlv->typelen = htons(typelen);
835 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
836 		      ICE_IEEE_SUBTYPE_ETS_CFG);
837 	tlv->ouisubtype = htonl(ouisubtype);
838 
839 	buf = tlv->tlvinfo;
840 	buf[0] = 0;
841 
842 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
843 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
844 	 * Octets 13 - 20 are TSA values - leave as zeros
845 	 */
846 	buf[5] = 0x64;
847 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
848 	offset += len + 2;
849 	tlv = (struct ice_lldp_org_tlv *)
850 		((char *)tlv + sizeof(tlv->typelen) + len);
851 
852 	/* Add ETS REC TLV */
853 	buf = tlv->tlvinfo;
854 	tlv->typelen = htons(typelen);
855 
856 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
857 		      ICE_IEEE_SUBTYPE_ETS_REC);
858 	tlv->ouisubtype = htonl(ouisubtype);
859 
860 	/* First octet of buf is reserved
861 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
862 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
863 	 * Octets 13 - 20 are TSA value - leave as zeros
864 	 */
865 	buf[5] = 0x64;
866 	offset += len + 2;
867 	tlv = (struct ice_lldp_org_tlv *)
868 		((char *)tlv + sizeof(tlv->typelen) + len);
869 
870 	/* Add PFC CFG TLV */
871 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
872 		   ICE_IEEE_PFC_TLV_LEN);
873 	tlv->typelen = htons(typelen);
874 
875 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
876 		      ICE_IEEE_SUBTYPE_PFC_CFG);
877 	tlv->ouisubtype = htonl(ouisubtype);
878 
879 	/* Octet 1 left as all zeros - PFC disabled */
880 	buf[0] = 0x08;
881 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
882 	offset += len + 2;
883 
884 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
885 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
886 
887 	kfree(lldpmib);
888 }
889 
890 /**
891  * ice_check_module_power
892  * @pf: pointer to PF struct
893  * @link_cfg_err: bitmap from the link info structure
894  *
895  * check module power level returned by a previous call to aq_get_link_info
896  * and print error messages if module power level is not supported
897  */
898 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
899 {
900 	/* if module power level is supported, clear the flag */
901 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
902 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
903 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
904 		return;
905 	}
906 
907 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
908 	 * above block didn't clear this bit, there's nothing to do
909 	 */
910 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
911 		return;
912 
913 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
914 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
915 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
916 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
917 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
918 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
919 	}
920 }
921 
922 /**
923  * ice_link_event - process the link event
924  * @pf: PF that the link event is associated with
925  * @pi: port_info for the port that the link event is associated with
926  * @link_up: true if the physical link is up and false if it is down
927  * @link_speed: current link speed received from the link event
928  *
929  * Returns 0 on success and negative on failure
930  */
931 static int
932 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
933 	       u16 link_speed)
934 {
935 	struct device *dev = ice_pf_to_dev(pf);
936 	struct ice_phy_info *phy_info;
937 	enum ice_status status;
938 	struct ice_vsi *vsi;
939 	u16 old_link_speed;
940 	bool old_link;
941 
942 	phy_info = &pi->phy;
943 	phy_info->link_info_old = phy_info->link_info;
944 
945 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
946 	old_link_speed = phy_info->link_info_old.link_speed;
947 
948 	/* update the link info structures and re-enable link events,
949 	 * don't bail on failure due to other book keeping needed
950 	 */
951 	status = ice_update_link_info(pi);
952 	if (status)
953 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
954 			pi->lport, ice_stat_str(status),
955 			ice_aq_str(pi->hw->adminq.sq_last_status));
956 
957 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
958 
959 	/* Check if the link state is up after updating link info, and treat
960 	 * this event as an UP event since the link is actually UP now.
961 	 */
962 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
963 		link_up = true;
964 
965 	vsi = ice_get_main_vsi(pf);
966 	if (!vsi || !vsi->port_info)
967 		return -EINVAL;
968 
969 	/* turn off PHY if media was removed */
970 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
971 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
972 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
973 		ice_set_link(vsi, false);
974 	}
975 
976 	/* if the old link up/down and speed is the same as the new */
977 	if (link_up == old_link && link_speed == old_link_speed)
978 		return 0;
979 
980 	if (ice_is_dcb_active(pf)) {
981 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
982 			ice_dcb_rebuild(pf);
983 	} else {
984 		if (link_up)
985 			ice_set_dflt_mib(pf);
986 	}
987 	ice_vsi_link_event(vsi, link_up);
988 	ice_print_link_msg(vsi, link_up);
989 
990 	ice_vc_notify_link_state(pf);
991 
992 	return 0;
993 }
994 
995 /**
996  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
997  * @pf: board private structure
998  */
999 static void ice_watchdog_subtask(struct ice_pf *pf)
1000 {
1001 	int i;
1002 
1003 	/* if interface is down do nothing */
1004 	if (test_bit(ICE_DOWN, pf->state) ||
1005 	    test_bit(ICE_CFG_BUSY, pf->state))
1006 		return;
1007 
1008 	/* make sure we don't do these things too often */
1009 	if (time_before(jiffies,
1010 			pf->serv_tmr_prev + pf->serv_tmr_period))
1011 		return;
1012 
1013 	pf->serv_tmr_prev = jiffies;
1014 
1015 	/* Update the stats for active netdevs so the network stack
1016 	 * can look at updated numbers whenever it cares to
1017 	 */
1018 	ice_update_pf_stats(pf);
1019 	ice_for_each_vsi(pf, i)
1020 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1021 			ice_update_vsi_stats(pf->vsi[i]);
1022 }
1023 
1024 /**
1025  * ice_init_link_events - enable/initialize link events
1026  * @pi: pointer to the port_info instance
1027  *
1028  * Returns -EIO on failure, 0 on success
1029  */
1030 static int ice_init_link_events(struct ice_port_info *pi)
1031 {
1032 	u16 mask;
1033 
1034 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1035 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
1036 
1037 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1038 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1039 			pi->lport);
1040 		return -EIO;
1041 	}
1042 
1043 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1044 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1045 			pi->lport);
1046 		return -EIO;
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 /**
1053  * ice_handle_link_event - handle link event via ARQ
1054  * @pf: PF that the link event is associated with
1055  * @event: event structure containing link status info
1056  */
1057 static int
1058 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1059 {
1060 	struct ice_aqc_get_link_status_data *link_data;
1061 	struct ice_port_info *port_info;
1062 	int status;
1063 
1064 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1065 	port_info = pf->hw.port_info;
1066 	if (!port_info)
1067 		return -EINVAL;
1068 
1069 	status = ice_link_event(pf, port_info,
1070 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1071 				le16_to_cpu(link_data->link_speed));
1072 	if (status)
1073 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1074 			status);
1075 
1076 	return status;
1077 }
1078 
1079 enum ice_aq_task_state {
1080 	ICE_AQ_TASK_WAITING = 0,
1081 	ICE_AQ_TASK_COMPLETE,
1082 	ICE_AQ_TASK_CANCELED,
1083 };
1084 
1085 struct ice_aq_task {
1086 	struct hlist_node entry;
1087 
1088 	u16 opcode;
1089 	struct ice_rq_event_info *event;
1090 	enum ice_aq_task_state state;
1091 };
1092 
1093 /**
1094  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1095  * @pf: pointer to the PF private structure
1096  * @opcode: the opcode to wait for
1097  * @timeout: how long to wait, in jiffies
1098  * @event: storage for the event info
1099  *
1100  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1101  * current thread will be put to sleep until the specified event occurs or
1102  * until the given timeout is reached.
1103  *
1104  * To obtain only the descriptor contents, pass an event without an allocated
1105  * msg_buf. If the complete data buffer is desired, allocate the
1106  * event->msg_buf with enough space ahead of time.
1107  *
1108  * Returns: zero on success, or a negative error code on failure.
1109  */
1110 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1111 			  struct ice_rq_event_info *event)
1112 {
1113 	struct device *dev = ice_pf_to_dev(pf);
1114 	struct ice_aq_task *task;
1115 	unsigned long start;
1116 	long ret;
1117 	int err;
1118 
1119 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1120 	if (!task)
1121 		return -ENOMEM;
1122 
1123 	INIT_HLIST_NODE(&task->entry);
1124 	task->opcode = opcode;
1125 	task->event = event;
1126 	task->state = ICE_AQ_TASK_WAITING;
1127 
1128 	spin_lock_bh(&pf->aq_wait_lock);
1129 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1130 	spin_unlock_bh(&pf->aq_wait_lock);
1131 
1132 	start = jiffies;
1133 
1134 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1135 					       timeout);
1136 	switch (task->state) {
1137 	case ICE_AQ_TASK_WAITING:
1138 		err = ret < 0 ? ret : -ETIMEDOUT;
1139 		break;
1140 	case ICE_AQ_TASK_CANCELED:
1141 		err = ret < 0 ? ret : -ECANCELED;
1142 		break;
1143 	case ICE_AQ_TASK_COMPLETE:
1144 		err = ret < 0 ? ret : 0;
1145 		break;
1146 	default:
1147 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1148 		err = -EINVAL;
1149 		break;
1150 	}
1151 
1152 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1153 		jiffies_to_msecs(jiffies - start),
1154 		jiffies_to_msecs(timeout),
1155 		opcode);
1156 
1157 	spin_lock_bh(&pf->aq_wait_lock);
1158 	hlist_del(&task->entry);
1159 	spin_unlock_bh(&pf->aq_wait_lock);
1160 	kfree(task);
1161 
1162 	return err;
1163 }
1164 
1165 /**
1166  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1167  * @pf: pointer to the PF private structure
1168  * @opcode: the opcode of the event
1169  * @event: the event to check
1170  *
1171  * Loops over the current list of pending threads waiting for an AdminQ event.
1172  * For each matching task, copy the contents of the event into the task
1173  * structure and wake up the thread.
1174  *
1175  * If multiple threads wait for the same opcode, they will all be woken up.
1176  *
1177  * Note that event->msg_buf will only be duplicated if the event has a buffer
1178  * with enough space already allocated. Otherwise, only the descriptor and
1179  * message length will be copied.
1180  *
1181  * Returns: true if an event was found, false otherwise
1182  */
1183 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1184 				struct ice_rq_event_info *event)
1185 {
1186 	struct ice_aq_task *task;
1187 	bool found = false;
1188 
1189 	spin_lock_bh(&pf->aq_wait_lock);
1190 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1191 		if (task->state || task->opcode != opcode)
1192 			continue;
1193 
1194 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1195 		task->event->msg_len = event->msg_len;
1196 
1197 		/* Only copy the data buffer if a destination was set */
1198 		if (task->event->msg_buf &&
1199 		    task->event->buf_len > event->buf_len) {
1200 			memcpy(task->event->msg_buf, event->msg_buf,
1201 			       event->buf_len);
1202 			task->event->buf_len = event->buf_len;
1203 		}
1204 
1205 		task->state = ICE_AQ_TASK_COMPLETE;
1206 		found = true;
1207 	}
1208 	spin_unlock_bh(&pf->aq_wait_lock);
1209 
1210 	if (found)
1211 		wake_up(&pf->aq_wait_queue);
1212 }
1213 
1214 /**
1215  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1216  * @pf: the PF private structure
1217  *
1218  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1219  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1220  */
1221 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1222 {
1223 	struct ice_aq_task *task;
1224 
1225 	spin_lock_bh(&pf->aq_wait_lock);
1226 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1227 		task->state = ICE_AQ_TASK_CANCELED;
1228 	spin_unlock_bh(&pf->aq_wait_lock);
1229 
1230 	wake_up(&pf->aq_wait_queue);
1231 }
1232 
1233 /**
1234  * __ice_clean_ctrlq - helper function to clean controlq rings
1235  * @pf: ptr to struct ice_pf
1236  * @q_type: specific Control queue type
1237  */
1238 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1239 {
1240 	struct device *dev = ice_pf_to_dev(pf);
1241 	struct ice_rq_event_info event;
1242 	struct ice_hw *hw = &pf->hw;
1243 	struct ice_ctl_q_info *cq;
1244 	u16 pending, i = 0;
1245 	const char *qtype;
1246 	u32 oldval, val;
1247 
1248 	/* Do not clean control queue if/when PF reset fails */
1249 	if (test_bit(ICE_RESET_FAILED, pf->state))
1250 		return 0;
1251 
1252 	switch (q_type) {
1253 	case ICE_CTL_Q_ADMIN:
1254 		cq = &hw->adminq;
1255 		qtype = "Admin";
1256 		break;
1257 	case ICE_CTL_Q_SB:
1258 		cq = &hw->sbq;
1259 		qtype = "Sideband";
1260 		break;
1261 	case ICE_CTL_Q_MAILBOX:
1262 		cq = &hw->mailboxq;
1263 		qtype = "Mailbox";
1264 		/* we are going to try to detect a malicious VF, so set the
1265 		 * state to begin detection
1266 		 */
1267 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1268 		break;
1269 	default:
1270 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1271 		return 0;
1272 	}
1273 
1274 	/* check for error indications - PF_xx_AxQLEN register layout for
1275 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1276 	 */
1277 	val = rd32(hw, cq->rq.len);
1278 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1279 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1280 		oldval = val;
1281 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1282 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1283 				qtype);
1284 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1285 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1286 				qtype);
1287 		}
1288 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1289 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1290 				qtype);
1291 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1292 			 PF_FW_ARQLEN_ARQCRIT_M);
1293 		if (oldval != val)
1294 			wr32(hw, cq->rq.len, val);
1295 	}
1296 
1297 	val = rd32(hw, cq->sq.len);
1298 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1299 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1300 		oldval = val;
1301 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1302 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1303 				qtype);
1304 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1305 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1306 				qtype);
1307 		}
1308 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1309 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1310 				qtype);
1311 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1312 			 PF_FW_ATQLEN_ATQCRIT_M);
1313 		if (oldval != val)
1314 			wr32(hw, cq->sq.len, val);
1315 	}
1316 
1317 	event.buf_len = cq->rq_buf_size;
1318 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1319 	if (!event.msg_buf)
1320 		return 0;
1321 
1322 	do {
1323 		enum ice_status ret;
1324 		u16 opcode;
1325 
1326 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1327 		if (ret == ICE_ERR_AQ_NO_WORK)
1328 			break;
1329 		if (ret) {
1330 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1331 				ice_stat_str(ret));
1332 			break;
1333 		}
1334 
1335 		opcode = le16_to_cpu(event.desc.opcode);
1336 
1337 		/* Notify any thread that might be waiting for this event */
1338 		ice_aq_check_events(pf, opcode, &event);
1339 
1340 		switch (opcode) {
1341 		case ice_aqc_opc_get_link_status:
1342 			if (ice_handle_link_event(pf, &event))
1343 				dev_err(dev, "Could not handle link event\n");
1344 			break;
1345 		case ice_aqc_opc_event_lan_overflow:
1346 			ice_vf_lan_overflow_event(pf, &event);
1347 			break;
1348 		case ice_mbx_opc_send_msg_to_pf:
1349 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1350 				ice_vc_process_vf_msg(pf, &event);
1351 			break;
1352 		case ice_aqc_opc_fw_logging:
1353 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1354 			break;
1355 		case ice_aqc_opc_lldp_set_mib_change:
1356 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1357 			break;
1358 		default:
1359 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1360 				qtype, opcode);
1361 			break;
1362 		}
1363 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1364 
1365 	kfree(event.msg_buf);
1366 
1367 	return pending && (i == ICE_DFLT_IRQ_WORK);
1368 }
1369 
1370 /**
1371  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1372  * @hw: pointer to hardware info
1373  * @cq: control queue information
1374  *
1375  * returns true if there are pending messages in a queue, false if there aren't
1376  */
1377 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1378 {
1379 	u16 ntu;
1380 
1381 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1382 	return cq->rq.next_to_clean != ntu;
1383 }
1384 
1385 /**
1386  * ice_clean_adminq_subtask - clean the AdminQ rings
1387  * @pf: board private structure
1388  */
1389 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1390 {
1391 	struct ice_hw *hw = &pf->hw;
1392 
1393 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1394 		return;
1395 
1396 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1397 		return;
1398 
1399 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1400 
1401 	/* There might be a situation where new messages arrive to a control
1402 	 * queue between processing the last message and clearing the
1403 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1404 	 * ice_ctrlq_pending) and process new messages if any.
1405 	 */
1406 	if (ice_ctrlq_pending(hw, &hw->adminq))
1407 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1408 
1409 	ice_flush(hw);
1410 }
1411 
1412 /**
1413  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1414  * @pf: board private structure
1415  */
1416 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1417 {
1418 	struct ice_hw *hw = &pf->hw;
1419 
1420 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1421 		return;
1422 
1423 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1424 		return;
1425 
1426 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1427 
1428 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1429 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1430 
1431 	ice_flush(hw);
1432 }
1433 
1434 /**
1435  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1436  * @pf: board private structure
1437  */
1438 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1439 {
1440 	struct ice_hw *hw = &pf->hw;
1441 
1442 	/* Nothing to do here if sideband queue is not supported */
1443 	if (!ice_is_sbq_supported(hw)) {
1444 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1445 		return;
1446 	}
1447 
1448 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1449 		return;
1450 
1451 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1452 		return;
1453 
1454 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1455 
1456 	if (ice_ctrlq_pending(hw, &hw->sbq))
1457 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1458 
1459 	ice_flush(hw);
1460 }
1461 
1462 /**
1463  * ice_service_task_schedule - schedule the service task to wake up
1464  * @pf: board private structure
1465  *
1466  * If not already scheduled, this puts the task into the work queue.
1467  */
1468 void ice_service_task_schedule(struct ice_pf *pf)
1469 {
1470 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1471 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1472 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1473 		queue_work(ice_wq, &pf->serv_task);
1474 }
1475 
1476 /**
1477  * ice_service_task_complete - finish up the service task
1478  * @pf: board private structure
1479  */
1480 static void ice_service_task_complete(struct ice_pf *pf)
1481 {
1482 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1483 
1484 	/* force memory (pf->state) to sync before next service task */
1485 	smp_mb__before_atomic();
1486 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1487 }
1488 
1489 /**
1490  * ice_service_task_stop - stop service task and cancel works
1491  * @pf: board private structure
1492  *
1493  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1494  * 1 otherwise.
1495  */
1496 static int ice_service_task_stop(struct ice_pf *pf)
1497 {
1498 	int ret;
1499 
1500 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1501 
1502 	if (pf->serv_tmr.function)
1503 		del_timer_sync(&pf->serv_tmr);
1504 	if (pf->serv_task.func)
1505 		cancel_work_sync(&pf->serv_task);
1506 
1507 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1508 	return ret;
1509 }
1510 
1511 /**
1512  * ice_service_task_restart - restart service task and schedule works
1513  * @pf: board private structure
1514  *
1515  * This function is needed for suspend and resume works (e.g WoL scenario)
1516  */
1517 static void ice_service_task_restart(struct ice_pf *pf)
1518 {
1519 	clear_bit(ICE_SERVICE_DIS, pf->state);
1520 	ice_service_task_schedule(pf);
1521 }
1522 
1523 /**
1524  * ice_service_timer - timer callback to schedule service task
1525  * @t: pointer to timer_list
1526  */
1527 static void ice_service_timer(struct timer_list *t)
1528 {
1529 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1530 
1531 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1532 	ice_service_task_schedule(pf);
1533 }
1534 
1535 /**
1536  * ice_handle_mdd_event - handle malicious driver detect event
1537  * @pf: pointer to the PF structure
1538  *
1539  * Called from service task. OICR interrupt handler indicates MDD event.
1540  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1541  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1542  * disable the queue, the PF can be configured to reset the VF using ethtool
1543  * private flag mdd-auto-reset-vf.
1544  */
1545 static void ice_handle_mdd_event(struct ice_pf *pf)
1546 {
1547 	struct device *dev = ice_pf_to_dev(pf);
1548 	struct ice_hw *hw = &pf->hw;
1549 	unsigned int i;
1550 	u32 reg;
1551 
1552 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1553 		/* Since the VF MDD event logging is rate limited, check if
1554 		 * there are pending MDD events.
1555 		 */
1556 		ice_print_vfs_mdd_events(pf);
1557 		return;
1558 	}
1559 
1560 	/* find what triggered an MDD event */
1561 	reg = rd32(hw, GL_MDET_TX_PQM);
1562 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1563 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1564 				GL_MDET_TX_PQM_PF_NUM_S;
1565 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1566 				GL_MDET_TX_PQM_VF_NUM_S;
1567 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1568 				GL_MDET_TX_PQM_MAL_TYPE_S;
1569 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1570 				GL_MDET_TX_PQM_QNUM_S);
1571 
1572 		if (netif_msg_tx_err(pf))
1573 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1574 				 event, queue, pf_num, vf_num);
1575 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1576 	}
1577 
1578 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1579 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1580 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1581 				GL_MDET_TX_TCLAN_PF_NUM_S;
1582 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1583 				GL_MDET_TX_TCLAN_VF_NUM_S;
1584 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1585 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1586 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1587 				GL_MDET_TX_TCLAN_QNUM_S);
1588 
1589 		if (netif_msg_tx_err(pf))
1590 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1591 				 event, queue, pf_num, vf_num);
1592 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1593 	}
1594 
1595 	reg = rd32(hw, GL_MDET_RX);
1596 	if (reg & GL_MDET_RX_VALID_M) {
1597 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1598 				GL_MDET_RX_PF_NUM_S;
1599 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1600 				GL_MDET_RX_VF_NUM_S;
1601 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1602 				GL_MDET_RX_MAL_TYPE_S;
1603 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1604 				GL_MDET_RX_QNUM_S);
1605 
1606 		if (netif_msg_rx_err(pf))
1607 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1608 				 event, queue, pf_num, vf_num);
1609 		wr32(hw, GL_MDET_RX, 0xffffffff);
1610 	}
1611 
1612 	/* check to see if this PF caused an MDD event */
1613 	reg = rd32(hw, PF_MDET_TX_PQM);
1614 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1615 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1616 		if (netif_msg_tx_err(pf))
1617 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1618 	}
1619 
1620 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1621 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1622 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1623 		if (netif_msg_tx_err(pf))
1624 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1625 	}
1626 
1627 	reg = rd32(hw, PF_MDET_RX);
1628 	if (reg & PF_MDET_RX_VALID_M) {
1629 		wr32(hw, PF_MDET_RX, 0xFFFF);
1630 		if (netif_msg_rx_err(pf))
1631 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1632 	}
1633 
1634 	/* Check to see if one of the VFs caused an MDD event, and then
1635 	 * increment counters and set print pending
1636 	 */
1637 	ice_for_each_vf(pf, i) {
1638 		struct ice_vf *vf = &pf->vf[i];
1639 
1640 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1641 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1642 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1643 			vf->mdd_tx_events.count++;
1644 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1645 			if (netif_msg_tx_err(pf))
1646 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1647 					 i);
1648 		}
1649 
1650 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1651 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1652 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1653 			vf->mdd_tx_events.count++;
1654 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1655 			if (netif_msg_tx_err(pf))
1656 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1657 					 i);
1658 		}
1659 
1660 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1661 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1662 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1663 			vf->mdd_tx_events.count++;
1664 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1665 			if (netif_msg_tx_err(pf))
1666 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1667 					 i);
1668 		}
1669 
1670 		reg = rd32(hw, VP_MDET_RX(i));
1671 		if (reg & VP_MDET_RX_VALID_M) {
1672 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1673 			vf->mdd_rx_events.count++;
1674 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1675 			if (netif_msg_rx_err(pf))
1676 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1677 					 i);
1678 
1679 			/* Since the queue is disabled on VF Rx MDD events, the
1680 			 * PF can be configured to reset the VF through ethtool
1681 			 * private flag mdd-auto-reset-vf.
1682 			 */
1683 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1684 				/* VF MDD event counters will be cleared by
1685 				 * reset, so print the event prior to reset.
1686 				 */
1687 				ice_print_vf_rx_mdd_event(vf);
1688 				ice_reset_vf(&pf->vf[i], false);
1689 			}
1690 		}
1691 	}
1692 
1693 	ice_print_vfs_mdd_events(pf);
1694 }
1695 
1696 /**
1697  * ice_force_phys_link_state - Force the physical link state
1698  * @vsi: VSI to force the physical link state to up/down
1699  * @link_up: true/false indicates to set the physical link to up/down
1700  *
1701  * Force the physical link state by getting the current PHY capabilities from
1702  * hardware and setting the PHY config based on the determined capabilities. If
1703  * link changes a link event will be triggered because both the Enable Automatic
1704  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1705  *
1706  * Returns 0 on success, negative on failure
1707  */
1708 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1709 {
1710 	struct ice_aqc_get_phy_caps_data *pcaps;
1711 	struct ice_aqc_set_phy_cfg_data *cfg;
1712 	struct ice_port_info *pi;
1713 	struct device *dev;
1714 	int retcode;
1715 
1716 	if (!vsi || !vsi->port_info || !vsi->back)
1717 		return -EINVAL;
1718 	if (vsi->type != ICE_VSI_PF)
1719 		return 0;
1720 
1721 	dev = ice_pf_to_dev(vsi->back);
1722 
1723 	pi = vsi->port_info;
1724 
1725 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1726 	if (!pcaps)
1727 		return -ENOMEM;
1728 
1729 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1730 				      NULL);
1731 	if (retcode) {
1732 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1733 			vsi->vsi_num, retcode);
1734 		retcode = -EIO;
1735 		goto out;
1736 	}
1737 
1738 	/* No change in link */
1739 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1740 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1741 		goto out;
1742 
1743 	/* Use the current user PHY configuration. The current user PHY
1744 	 * configuration is initialized during probe from PHY capabilities
1745 	 * software mode, and updated on set PHY configuration.
1746 	 */
1747 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1748 	if (!cfg) {
1749 		retcode = -ENOMEM;
1750 		goto out;
1751 	}
1752 
1753 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1754 	if (link_up)
1755 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1756 	else
1757 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1758 
1759 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1760 	if (retcode) {
1761 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1762 			vsi->vsi_num, retcode);
1763 		retcode = -EIO;
1764 	}
1765 
1766 	kfree(cfg);
1767 out:
1768 	kfree(pcaps);
1769 	return retcode;
1770 }
1771 
1772 /**
1773  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1774  * @pi: port info structure
1775  *
1776  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1777  */
1778 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1779 {
1780 	struct ice_aqc_get_phy_caps_data *pcaps;
1781 	struct ice_pf *pf = pi->hw->back;
1782 	enum ice_status status;
1783 	int err = 0;
1784 
1785 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1786 	if (!pcaps)
1787 		return -ENOMEM;
1788 
1789 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1790 				     NULL);
1791 
1792 	if (status) {
1793 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1794 		err = -EIO;
1795 		goto out;
1796 	}
1797 
1798 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1799 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1800 
1801 out:
1802 	kfree(pcaps);
1803 	return err;
1804 }
1805 
1806 /**
1807  * ice_init_link_dflt_override - Initialize link default override
1808  * @pi: port info structure
1809  *
1810  * Initialize link default override and PHY total port shutdown during probe
1811  */
1812 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1813 {
1814 	struct ice_link_default_override_tlv *ldo;
1815 	struct ice_pf *pf = pi->hw->back;
1816 
1817 	ldo = &pf->link_dflt_override;
1818 	if (ice_get_link_default_override(ldo, pi))
1819 		return;
1820 
1821 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1822 		return;
1823 
1824 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1825 	 * ethtool private flag) for ports with Port Disable bit set.
1826 	 */
1827 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1828 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1829 }
1830 
1831 /**
1832  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1833  * @pi: port info structure
1834  *
1835  * If default override is enabled, initialize the user PHY cfg speed and FEC
1836  * settings using the default override mask from the NVM.
1837  *
1838  * The PHY should only be configured with the default override settings the
1839  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1840  * is used to indicate that the user PHY cfg default override is initialized
1841  * and the PHY has not been configured with the default override settings. The
1842  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1843  * configured.
1844  *
1845  * This function should be called only if the FW doesn't support default
1846  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1847  */
1848 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1849 {
1850 	struct ice_link_default_override_tlv *ldo;
1851 	struct ice_aqc_set_phy_cfg_data *cfg;
1852 	struct ice_phy_info *phy = &pi->phy;
1853 	struct ice_pf *pf = pi->hw->back;
1854 
1855 	ldo = &pf->link_dflt_override;
1856 
1857 	/* If link default override is enabled, use to mask NVM PHY capabilities
1858 	 * for speed and FEC default configuration.
1859 	 */
1860 	cfg = &phy->curr_user_phy_cfg;
1861 
1862 	if (ldo->phy_type_low || ldo->phy_type_high) {
1863 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1864 				    cpu_to_le64(ldo->phy_type_low);
1865 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1866 				     cpu_to_le64(ldo->phy_type_high);
1867 	}
1868 	cfg->link_fec_opt = ldo->fec_options;
1869 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1870 
1871 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1872 }
1873 
1874 /**
1875  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1876  * @pi: port info structure
1877  *
1878  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1879  * mode to default. The PHY defaults are from get PHY capabilities topology
1880  * with media so call when media is first available. An error is returned if
1881  * called when media is not available. The PHY initialization completed state is
1882  * set here.
1883  *
1884  * These configurations are used when setting PHY
1885  * configuration. The user PHY configuration is updated on set PHY
1886  * configuration. Returns 0 on success, negative on failure
1887  */
1888 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1889 {
1890 	struct ice_aqc_get_phy_caps_data *pcaps;
1891 	struct ice_phy_info *phy = &pi->phy;
1892 	struct ice_pf *pf = pi->hw->back;
1893 	enum ice_status status;
1894 	int err = 0;
1895 
1896 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1897 		return -EIO;
1898 
1899 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1900 	if (!pcaps)
1901 		return -ENOMEM;
1902 
1903 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1904 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1905 					     pcaps, NULL);
1906 	else
1907 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1908 					     pcaps, NULL);
1909 	if (status) {
1910 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1911 		err = -EIO;
1912 		goto err_out;
1913 	}
1914 
1915 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1916 
1917 	/* check if lenient mode is supported and enabled */
1918 	if (ice_fw_supports_link_override(pi->hw) &&
1919 	    !(pcaps->module_compliance_enforcement &
1920 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1921 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1922 
1923 		/* if the FW supports default PHY configuration mode, then the driver
1924 		 * does not have to apply link override settings. If not,
1925 		 * initialize user PHY configuration with link override values
1926 		 */
1927 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1928 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1929 			ice_init_phy_cfg_dflt_override(pi);
1930 			goto out;
1931 		}
1932 	}
1933 
1934 	/* if link default override is not enabled, set user flow control and
1935 	 * FEC settings based on what get_phy_caps returned
1936 	 */
1937 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1938 						      pcaps->link_fec_options);
1939 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1940 
1941 out:
1942 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1943 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1944 err_out:
1945 	kfree(pcaps);
1946 	return err;
1947 }
1948 
1949 /**
1950  * ice_configure_phy - configure PHY
1951  * @vsi: VSI of PHY
1952  *
1953  * Set the PHY configuration. If the current PHY configuration is the same as
1954  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1955  * configure the based get PHY capabilities for topology with media.
1956  */
1957 static int ice_configure_phy(struct ice_vsi *vsi)
1958 {
1959 	struct device *dev = ice_pf_to_dev(vsi->back);
1960 	struct ice_port_info *pi = vsi->port_info;
1961 	struct ice_aqc_get_phy_caps_data *pcaps;
1962 	struct ice_aqc_set_phy_cfg_data *cfg;
1963 	struct ice_phy_info *phy = &pi->phy;
1964 	struct ice_pf *pf = vsi->back;
1965 	enum ice_status status;
1966 	int err = 0;
1967 
1968 	/* Ensure we have media as we cannot configure a medialess port */
1969 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1970 		return -EPERM;
1971 
1972 	ice_print_topo_conflict(vsi);
1973 
1974 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
1975 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1976 		return -EPERM;
1977 
1978 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1979 		return ice_force_phys_link_state(vsi, true);
1980 
1981 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1982 	if (!pcaps)
1983 		return -ENOMEM;
1984 
1985 	/* Get current PHY config */
1986 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1987 				     NULL);
1988 	if (status) {
1989 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1990 			vsi->vsi_num, ice_stat_str(status));
1991 		err = -EIO;
1992 		goto done;
1993 	}
1994 
1995 	/* If PHY enable link is configured and configuration has not changed,
1996 	 * there's nothing to do
1997 	 */
1998 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1999 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2000 		goto done;
2001 
2002 	/* Use PHY topology as baseline for configuration */
2003 	memset(pcaps, 0, sizeof(*pcaps));
2004 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2005 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2006 					     pcaps, NULL);
2007 	else
2008 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2009 					     pcaps, NULL);
2010 	if (status) {
2011 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
2012 			vsi->vsi_num, ice_stat_str(status));
2013 		err = -EIO;
2014 		goto done;
2015 	}
2016 
2017 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2018 	if (!cfg) {
2019 		err = -ENOMEM;
2020 		goto done;
2021 	}
2022 
2023 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2024 
2025 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2026 	 * ice_init_phy_user_cfg_ldo.
2027 	 */
2028 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2029 			       vsi->back->state)) {
2030 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2031 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2032 	} else {
2033 		u64 phy_low = 0, phy_high = 0;
2034 
2035 		ice_update_phy_type(&phy_low, &phy_high,
2036 				    pi->phy.curr_user_speed_req);
2037 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2038 		cfg->phy_type_high = pcaps->phy_type_high &
2039 				     cpu_to_le64(phy_high);
2040 	}
2041 
2042 	/* Can't provide what was requested; use PHY capabilities */
2043 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2044 		cfg->phy_type_low = pcaps->phy_type_low;
2045 		cfg->phy_type_high = pcaps->phy_type_high;
2046 	}
2047 
2048 	/* FEC */
2049 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2050 
2051 	/* Can't provide what was requested; use PHY capabilities */
2052 	if (cfg->link_fec_opt !=
2053 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2054 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2055 		cfg->link_fec_opt = pcaps->link_fec_options;
2056 	}
2057 
2058 	/* Flow Control - always supported; no need to check against
2059 	 * capabilities
2060 	 */
2061 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2062 
2063 	/* Enable link and link update */
2064 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2065 
2066 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2067 	if (status) {
2068 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2069 			vsi->vsi_num, ice_stat_str(status));
2070 		err = -EIO;
2071 	}
2072 
2073 	kfree(cfg);
2074 done:
2075 	kfree(pcaps);
2076 	return err;
2077 }
2078 
2079 /**
2080  * ice_check_media_subtask - Check for media
2081  * @pf: pointer to PF struct
2082  *
2083  * If media is available, then initialize PHY user configuration if it is not
2084  * been, and configure the PHY if the interface is up.
2085  */
2086 static void ice_check_media_subtask(struct ice_pf *pf)
2087 {
2088 	struct ice_port_info *pi;
2089 	struct ice_vsi *vsi;
2090 	int err;
2091 
2092 	/* No need to check for media if it's already present */
2093 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2094 		return;
2095 
2096 	vsi = ice_get_main_vsi(pf);
2097 	if (!vsi)
2098 		return;
2099 
2100 	/* Refresh link info and check if media is present */
2101 	pi = vsi->port_info;
2102 	err = ice_update_link_info(pi);
2103 	if (err)
2104 		return;
2105 
2106 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2107 
2108 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2109 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2110 			ice_init_phy_user_cfg(pi);
2111 
2112 		/* PHY settings are reset on media insertion, reconfigure
2113 		 * PHY to preserve settings.
2114 		 */
2115 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2116 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2117 			return;
2118 
2119 		err = ice_configure_phy(vsi);
2120 		if (!err)
2121 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2122 
2123 		/* A Link Status Event will be generated; the event handler
2124 		 * will complete bringing the interface up
2125 		 */
2126 	}
2127 }
2128 
2129 /**
2130  * ice_service_task - manage and run subtasks
2131  * @work: pointer to work_struct contained by the PF struct
2132  */
2133 static void ice_service_task(struct work_struct *work)
2134 {
2135 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2136 	unsigned long start_time = jiffies;
2137 
2138 	/* subtasks */
2139 
2140 	/* process reset requests first */
2141 	ice_reset_subtask(pf);
2142 
2143 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2144 	if (ice_is_reset_in_progress(pf->state) ||
2145 	    test_bit(ICE_SUSPENDED, pf->state) ||
2146 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2147 		ice_service_task_complete(pf);
2148 		return;
2149 	}
2150 
2151 	ice_clean_adminq_subtask(pf);
2152 	ice_check_media_subtask(pf);
2153 	ice_check_for_hang_subtask(pf);
2154 	ice_sync_fltr_subtask(pf);
2155 	ice_handle_mdd_event(pf);
2156 	ice_watchdog_subtask(pf);
2157 
2158 	if (ice_is_safe_mode(pf)) {
2159 		ice_service_task_complete(pf);
2160 		return;
2161 	}
2162 
2163 	ice_process_vflr_event(pf);
2164 	ice_clean_mailboxq_subtask(pf);
2165 	ice_clean_sbq_subtask(pf);
2166 	ice_sync_arfs_fltrs(pf);
2167 	ice_flush_fdir_ctx(pf);
2168 
2169 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2170 	ice_service_task_complete(pf);
2171 
2172 	/* If the tasks have taken longer than one service timer period
2173 	 * or there is more work to be done, reset the service timer to
2174 	 * schedule the service task now.
2175 	 */
2176 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2177 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2178 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2179 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2180 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2181 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2182 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2183 		mod_timer(&pf->serv_tmr, jiffies);
2184 }
2185 
2186 /**
2187  * ice_set_ctrlq_len - helper function to set controlq length
2188  * @hw: pointer to the HW instance
2189  */
2190 static void ice_set_ctrlq_len(struct ice_hw *hw)
2191 {
2192 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2193 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2194 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2195 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2196 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2197 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2198 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2199 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2200 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2201 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2202 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2203 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2204 }
2205 
2206 /**
2207  * ice_schedule_reset - schedule a reset
2208  * @pf: board private structure
2209  * @reset: reset being requested
2210  */
2211 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2212 {
2213 	struct device *dev = ice_pf_to_dev(pf);
2214 
2215 	/* bail out if earlier reset has failed */
2216 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2217 		dev_dbg(dev, "earlier reset has failed\n");
2218 		return -EIO;
2219 	}
2220 	/* bail if reset/recovery already in progress */
2221 	if (ice_is_reset_in_progress(pf->state)) {
2222 		dev_dbg(dev, "Reset already in progress\n");
2223 		return -EBUSY;
2224 	}
2225 
2226 	ice_unplug_aux_dev(pf);
2227 
2228 	switch (reset) {
2229 	case ICE_RESET_PFR:
2230 		set_bit(ICE_PFR_REQ, pf->state);
2231 		break;
2232 	case ICE_RESET_CORER:
2233 		set_bit(ICE_CORER_REQ, pf->state);
2234 		break;
2235 	case ICE_RESET_GLOBR:
2236 		set_bit(ICE_GLOBR_REQ, pf->state);
2237 		break;
2238 	default:
2239 		return -EINVAL;
2240 	}
2241 
2242 	ice_service_task_schedule(pf);
2243 	return 0;
2244 }
2245 
2246 /**
2247  * ice_irq_affinity_notify - Callback for affinity changes
2248  * @notify: context as to what irq was changed
2249  * @mask: the new affinity mask
2250  *
2251  * This is a callback function used by the irq_set_affinity_notifier function
2252  * so that we may register to receive changes to the irq affinity masks.
2253  */
2254 static void
2255 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2256 			const cpumask_t *mask)
2257 {
2258 	struct ice_q_vector *q_vector =
2259 		container_of(notify, struct ice_q_vector, affinity_notify);
2260 
2261 	cpumask_copy(&q_vector->affinity_mask, mask);
2262 }
2263 
2264 /**
2265  * ice_irq_affinity_release - Callback for affinity notifier release
2266  * @ref: internal core kernel usage
2267  *
2268  * This is a callback function used by the irq_set_affinity_notifier function
2269  * to inform the current notification subscriber that they will no longer
2270  * receive notifications.
2271  */
2272 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2273 
2274 /**
2275  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2276  * @vsi: the VSI being configured
2277  */
2278 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2279 {
2280 	struct ice_hw *hw = &vsi->back->hw;
2281 	int i;
2282 
2283 	ice_for_each_q_vector(vsi, i)
2284 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2285 
2286 	ice_flush(hw);
2287 	return 0;
2288 }
2289 
2290 /**
2291  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2292  * @vsi: the VSI being configured
2293  * @basename: name for the vector
2294  */
2295 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2296 {
2297 	int q_vectors = vsi->num_q_vectors;
2298 	struct ice_pf *pf = vsi->back;
2299 	int base = vsi->base_vector;
2300 	struct device *dev;
2301 	int rx_int_idx = 0;
2302 	int tx_int_idx = 0;
2303 	int vector, err;
2304 	int irq_num;
2305 
2306 	dev = ice_pf_to_dev(pf);
2307 	for (vector = 0; vector < q_vectors; vector++) {
2308 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2309 
2310 		irq_num = pf->msix_entries[base + vector].vector;
2311 
2312 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2313 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2314 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2315 			tx_int_idx++;
2316 		} else if (q_vector->rx.rx_ring) {
2317 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2318 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2319 		} else if (q_vector->tx.tx_ring) {
2320 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2321 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2322 		} else {
2323 			/* skip this unused q_vector */
2324 			continue;
2325 		}
2326 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2327 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2328 					       IRQF_SHARED, q_vector->name,
2329 					       q_vector);
2330 		else
2331 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2332 					       0, q_vector->name, q_vector);
2333 		if (err) {
2334 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2335 				   err);
2336 			goto free_q_irqs;
2337 		}
2338 
2339 		/* register for affinity change notifications */
2340 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2341 			struct irq_affinity_notify *affinity_notify;
2342 
2343 			affinity_notify = &q_vector->affinity_notify;
2344 			affinity_notify->notify = ice_irq_affinity_notify;
2345 			affinity_notify->release = ice_irq_affinity_release;
2346 			irq_set_affinity_notifier(irq_num, affinity_notify);
2347 		}
2348 
2349 		/* assign the mask for this irq */
2350 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2351 	}
2352 
2353 	vsi->irqs_ready = true;
2354 	return 0;
2355 
2356 free_q_irqs:
2357 	while (vector) {
2358 		vector--;
2359 		irq_num = pf->msix_entries[base + vector].vector;
2360 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2361 			irq_set_affinity_notifier(irq_num, NULL);
2362 		irq_set_affinity_hint(irq_num, NULL);
2363 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2364 	}
2365 	return err;
2366 }
2367 
2368 /**
2369  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2370  * @vsi: VSI to setup Tx rings used by XDP
2371  *
2372  * Return 0 on success and negative value on error
2373  */
2374 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2375 {
2376 	struct device *dev = ice_pf_to_dev(vsi->back);
2377 	struct ice_tx_desc *tx_desc;
2378 	int i, j;
2379 
2380 	ice_for_each_xdp_txq(vsi, i) {
2381 		u16 xdp_q_idx = vsi->alloc_txq + i;
2382 		struct ice_tx_ring *xdp_ring;
2383 
2384 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2385 
2386 		if (!xdp_ring)
2387 			goto free_xdp_rings;
2388 
2389 		xdp_ring->q_index = xdp_q_idx;
2390 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2391 		xdp_ring->vsi = vsi;
2392 		xdp_ring->netdev = NULL;
2393 		xdp_ring->next_dd = ICE_TX_THRESH - 1;
2394 		xdp_ring->next_rs = ICE_TX_THRESH - 1;
2395 		xdp_ring->dev = dev;
2396 		xdp_ring->count = vsi->num_tx_desc;
2397 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2398 		if (ice_setup_tx_ring(xdp_ring))
2399 			goto free_xdp_rings;
2400 		ice_set_ring_xdp(xdp_ring);
2401 		xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
2402 		spin_lock_init(&xdp_ring->tx_lock);
2403 		for (j = 0; j < xdp_ring->count; j++) {
2404 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2405 			tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
2406 		}
2407 	}
2408 
2409 	ice_for_each_rxq(vsi, i) {
2410 		if (static_key_enabled(&ice_xdp_locking_key))
2411 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2412 		else
2413 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
2414 	}
2415 
2416 	return 0;
2417 
2418 free_xdp_rings:
2419 	for (; i >= 0; i--)
2420 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2421 			ice_free_tx_ring(vsi->xdp_rings[i]);
2422 	return -ENOMEM;
2423 }
2424 
2425 /**
2426  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2427  * @vsi: VSI to set the bpf prog on
2428  * @prog: the bpf prog pointer
2429  */
2430 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2431 {
2432 	struct bpf_prog *old_prog;
2433 	int i;
2434 
2435 	old_prog = xchg(&vsi->xdp_prog, prog);
2436 	if (old_prog)
2437 		bpf_prog_put(old_prog);
2438 
2439 	ice_for_each_rxq(vsi, i)
2440 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2441 }
2442 
2443 /**
2444  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2445  * @vsi: VSI to bring up Tx rings used by XDP
2446  * @prog: bpf program that will be assigned to VSI
2447  *
2448  * Return 0 on success and negative value on error
2449  */
2450 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2451 {
2452 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2453 	int xdp_rings_rem = vsi->num_xdp_txq;
2454 	struct ice_pf *pf = vsi->back;
2455 	struct ice_qs_cfg xdp_qs_cfg = {
2456 		.qs_mutex = &pf->avail_q_mutex,
2457 		.pf_map = pf->avail_txqs,
2458 		.pf_map_size = pf->max_pf_txqs,
2459 		.q_count = vsi->num_xdp_txq,
2460 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2461 		.vsi_map = vsi->txq_map,
2462 		.vsi_map_offset = vsi->alloc_txq,
2463 		.mapping_mode = ICE_VSI_MAP_CONTIG
2464 	};
2465 	enum ice_status status;
2466 	struct device *dev;
2467 	int i, v_idx;
2468 
2469 	dev = ice_pf_to_dev(pf);
2470 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2471 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2472 	if (!vsi->xdp_rings)
2473 		return -ENOMEM;
2474 
2475 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2476 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2477 		goto err_map_xdp;
2478 
2479 	if (static_key_enabled(&ice_xdp_locking_key))
2480 		netdev_warn(vsi->netdev,
2481 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2482 
2483 	if (ice_xdp_alloc_setup_rings(vsi))
2484 		goto clear_xdp_rings;
2485 
2486 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2487 	ice_for_each_q_vector(vsi, v_idx) {
2488 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2489 		int xdp_rings_per_v, q_id, q_base;
2490 
2491 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2492 					       vsi->num_q_vectors - v_idx);
2493 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2494 
2495 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2496 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2497 
2498 			xdp_ring->q_vector = q_vector;
2499 			xdp_ring->next = q_vector->tx.tx_ring;
2500 			q_vector->tx.tx_ring = xdp_ring;
2501 		}
2502 		xdp_rings_rem -= xdp_rings_per_v;
2503 	}
2504 
2505 	/* omit the scheduler update if in reset path; XDP queues will be
2506 	 * taken into account at the end of ice_vsi_rebuild, where
2507 	 * ice_cfg_vsi_lan is being called
2508 	 */
2509 	if (ice_is_reset_in_progress(pf->state))
2510 		return 0;
2511 
2512 	/* tell the Tx scheduler that right now we have
2513 	 * additional queues
2514 	 */
2515 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2516 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2517 
2518 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2519 				 max_txqs);
2520 	if (status) {
2521 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2522 			ice_stat_str(status));
2523 		goto clear_xdp_rings;
2524 	}
2525 	ice_vsi_assign_bpf_prog(vsi, prog);
2526 
2527 	return 0;
2528 clear_xdp_rings:
2529 	ice_for_each_xdp_txq(vsi, i)
2530 		if (vsi->xdp_rings[i]) {
2531 			kfree_rcu(vsi->xdp_rings[i], rcu);
2532 			vsi->xdp_rings[i] = NULL;
2533 		}
2534 
2535 err_map_xdp:
2536 	mutex_lock(&pf->avail_q_mutex);
2537 	ice_for_each_xdp_txq(vsi, i) {
2538 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2539 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2540 	}
2541 	mutex_unlock(&pf->avail_q_mutex);
2542 
2543 	devm_kfree(dev, vsi->xdp_rings);
2544 	return -ENOMEM;
2545 }
2546 
2547 /**
2548  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2549  * @vsi: VSI to remove XDP rings
2550  *
2551  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2552  * resources
2553  */
2554 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2555 {
2556 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2557 	struct ice_pf *pf = vsi->back;
2558 	int i, v_idx;
2559 
2560 	/* q_vectors are freed in reset path so there's no point in detaching
2561 	 * rings; in case of rebuild being triggered not from reset bits
2562 	 * in pf->state won't be set, so additionally check first q_vector
2563 	 * against NULL
2564 	 */
2565 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2566 		goto free_qmap;
2567 
2568 	ice_for_each_q_vector(vsi, v_idx) {
2569 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2570 		struct ice_tx_ring *ring;
2571 
2572 		ice_for_each_tx_ring(ring, q_vector->tx)
2573 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2574 				break;
2575 
2576 		/* restore the value of last node prior to XDP setup */
2577 		q_vector->tx.tx_ring = ring;
2578 	}
2579 
2580 free_qmap:
2581 	mutex_lock(&pf->avail_q_mutex);
2582 	ice_for_each_xdp_txq(vsi, i) {
2583 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2584 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2585 	}
2586 	mutex_unlock(&pf->avail_q_mutex);
2587 
2588 	ice_for_each_xdp_txq(vsi, i)
2589 		if (vsi->xdp_rings[i]) {
2590 			if (vsi->xdp_rings[i]->desc)
2591 				ice_free_tx_ring(vsi->xdp_rings[i]);
2592 			kfree_rcu(vsi->xdp_rings[i], rcu);
2593 			vsi->xdp_rings[i] = NULL;
2594 		}
2595 
2596 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2597 	vsi->xdp_rings = NULL;
2598 
2599 	if (static_key_enabled(&ice_xdp_locking_key))
2600 		static_branch_dec(&ice_xdp_locking_key);
2601 
2602 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2603 		return 0;
2604 
2605 	ice_vsi_assign_bpf_prog(vsi, NULL);
2606 
2607 	/* notify Tx scheduler that we destroyed XDP queues and bring
2608 	 * back the old number of child nodes
2609 	 */
2610 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2611 		max_txqs[i] = vsi->num_txq;
2612 
2613 	/* change number of XDP Tx queues to 0 */
2614 	vsi->num_xdp_txq = 0;
2615 
2616 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2617 			       max_txqs);
2618 }
2619 
2620 /**
2621  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2622  * @vsi: VSI to schedule napi on
2623  */
2624 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2625 {
2626 	int i;
2627 
2628 	ice_for_each_rxq(vsi, i) {
2629 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2630 
2631 		if (rx_ring->xsk_pool)
2632 			napi_schedule(&rx_ring->q_vector->napi);
2633 	}
2634 }
2635 
2636 /**
2637  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2638  * @vsi: VSI to determine the count of XDP Tx qs
2639  *
2640  * returns 0 if Tx qs count is higher than at least half of CPU count,
2641  * -ENOMEM otherwise
2642  */
2643 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2644 {
2645 	u16 avail = ice_get_avail_txq_count(vsi->back);
2646 	u16 cpus = num_possible_cpus();
2647 
2648 	if (avail < cpus / 2)
2649 		return -ENOMEM;
2650 
2651 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2652 
2653 	if (vsi->num_xdp_txq < cpus)
2654 		static_branch_inc(&ice_xdp_locking_key);
2655 
2656 	return 0;
2657 }
2658 
2659 /**
2660  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2661  * @vsi: VSI to setup XDP for
2662  * @prog: XDP program
2663  * @extack: netlink extended ack
2664  */
2665 static int
2666 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2667 		   struct netlink_ext_ack *extack)
2668 {
2669 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2670 	bool if_running = netif_running(vsi->netdev);
2671 	int ret = 0, xdp_ring_err = 0;
2672 
2673 	if (frame_size > vsi->rx_buf_len) {
2674 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2675 		return -EOPNOTSUPP;
2676 	}
2677 
2678 	/* need to stop netdev while setting up the program for Rx rings */
2679 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2680 		ret = ice_down(vsi);
2681 		if (ret) {
2682 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2683 			return ret;
2684 		}
2685 	}
2686 
2687 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2688 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2689 		if (xdp_ring_err) {
2690 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2691 		} else {
2692 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2693 			if (xdp_ring_err)
2694 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2695 		}
2696 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2697 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2698 		if (xdp_ring_err)
2699 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2700 	} else {
2701 		ice_vsi_assign_bpf_prog(vsi, prog);
2702 	}
2703 
2704 	if (if_running)
2705 		ret = ice_up(vsi);
2706 
2707 	if (!ret && prog)
2708 		ice_vsi_rx_napi_schedule(vsi);
2709 
2710 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2711 }
2712 
2713 /**
2714  * ice_xdp_safe_mode - XDP handler for safe mode
2715  * @dev: netdevice
2716  * @xdp: XDP command
2717  */
2718 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2719 			     struct netdev_bpf *xdp)
2720 {
2721 	NL_SET_ERR_MSG_MOD(xdp->extack,
2722 			   "Please provide working DDP firmware package in order to use XDP\n"
2723 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2724 	return -EOPNOTSUPP;
2725 }
2726 
2727 /**
2728  * ice_xdp - implements XDP handler
2729  * @dev: netdevice
2730  * @xdp: XDP command
2731  */
2732 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2733 {
2734 	struct ice_netdev_priv *np = netdev_priv(dev);
2735 	struct ice_vsi *vsi = np->vsi;
2736 
2737 	if (vsi->type != ICE_VSI_PF) {
2738 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2739 		return -EINVAL;
2740 	}
2741 
2742 	switch (xdp->command) {
2743 	case XDP_SETUP_PROG:
2744 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2745 	case XDP_SETUP_XSK_POOL:
2746 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2747 					  xdp->xsk.queue_id);
2748 	default:
2749 		return -EINVAL;
2750 	}
2751 }
2752 
2753 /**
2754  * ice_ena_misc_vector - enable the non-queue interrupts
2755  * @pf: board private structure
2756  */
2757 static void ice_ena_misc_vector(struct ice_pf *pf)
2758 {
2759 	struct ice_hw *hw = &pf->hw;
2760 	u32 val;
2761 
2762 	/* Disable anti-spoof detection interrupt to prevent spurious event
2763 	 * interrupts during a function reset. Anti-spoof functionally is
2764 	 * still supported.
2765 	 */
2766 	val = rd32(hw, GL_MDCK_TX_TDPU);
2767 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2768 	wr32(hw, GL_MDCK_TX_TDPU, val);
2769 
2770 	/* clear things first */
2771 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2772 	rd32(hw, PFINT_OICR);		/* read to clear */
2773 
2774 	val = (PFINT_OICR_ECC_ERR_M |
2775 	       PFINT_OICR_MAL_DETECT_M |
2776 	       PFINT_OICR_GRST_M |
2777 	       PFINT_OICR_PCI_EXCEPTION_M |
2778 	       PFINT_OICR_VFLR_M |
2779 	       PFINT_OICR_HMC_ERR_M |
2780 	       PFINT_OICR_PE_PUSH_M |
2781 	       PFINT_OICR_PE_CRITERR_M);
2782 
2783 	wr32(hw, PFINT_OICR_ENA, val);
2784 
2785 	/* SW_ITR_IDX = 0, but don't change INTENA */
2786 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2787 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2788 }
2789 
2790 /**
2791  * ice_misc_intr - misc interrupt handler
2792  * @irq: interrupt number
2793  * @data: pointer to a q_vector
2794  */
2795 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2796 {
2797 	struct ice_pf *pf = (struct ice_pf *)data;
2798 	struct ice_hw *hw = &pf->hw;
2799 	irqreturn_t ret = IRQ_NONE;
2800 	struct device *dev;
2801 	u32 oicr, ena_mask;
2802 
2803 	dev = ice_pf_to_dev(pf);
2804 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2805 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2806 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2807 
2808 	oicr = rd32(hw, PFINT_OICR);
2809 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2810 
2811 	if (oicr & PFINT_OICR_SWINT_M) {
2812 		ena_mask &= ~PFINT_OICR_SWINT_M;
2813 		pf->sw_int_count++;
2814 	}
2815 
2816 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2817 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2818 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2819 	}
2820 	if (oicr & PFINT_OICR_VFLR_M) {
2821 		/* disable any further VFLR event notifications */
2822 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2823 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2824 
2825 			reg &= ~PFINT_OICR_VFLR_M;
2826 			wr32(hw, PFINT_OICR_ENA, reg);
2827 		} else {
2828 			ena_mask &= ~PFINT_OICR_VFLR_M;
2829 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2830 		}
2831 	}
2832 
2833 	if (oicr & PFINT_OICR_GRST_M) {
2834 		u32 reset;
2835 
2836 		/* we have a reset warning */
2837 		ena_mask &= ~PFINT_OICR_GRST_M;
2838 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2839 			GLGEN_RSTAT_RESET_TYPE_S;
2840 
2841 		if (reset == ICE_RESET_CORER)
2842 			pf->corer_count++;
2843 		else if (reset == ICE_RESET_GLOBR)
2844 			pf->globr_count++;
2845 		else if (reset == ICE_RESET_EMPR)
2846 			pf->empr_count++;
2847 		else
2848 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2849 
2850 		/* If a reset cycle isn't already in progress, we set a bit in
2851 		 * pf->state so that the service task can start a reset/rebuild.
2852 		 */
2853 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2854 			if (reset == ICE_RESET_CORER)
2855 				set_bit(ICE_CORER_RECV, pf->state);
2856 			else if (reset == ICE_RESET_GLOBR)
2857 				set_bit(ICE_GLOBR_RECV, pf->state);
2858 			else
2859 				set_bit(ICE_EMPR_RECV, pf->state);
2860 
2861 			/* There are couple of different bits at play here.
2862 			 * hw->reset_ongoing indicates whether the hardware is
2863 			 * in reset. This is set to true when a reset interrupt
2864 			 * is received and set back to false after the driver
2865 			 * has determined that the hardware is out of reset.
2866 			 *
2867 			 * ICE_RESET_OICR_RECV in pf->state indicates
2868 			 * that a post reset rebuild is required before the
2869 			 * driver is operational again. This is set above.
2870 			 *
2871 			 * As this is the start of the reset/rebuild cycle, set
2872 			 * both to indicate that.
2873 			 */
2874 			hw->reset_ongoing = true;
2875 		}
2876 	}
2877 
2878 	if (oicr & PFINT_OICR_TSYN_TX_M) {
2879 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2880 		ice_ptp_process_ts(pf);
2881 	}
2882 
2883 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
2884 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2885 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
2886 
2887 		/* Save EVENTs from GTSYN register */
2888 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
2889 						     GLTSYN_STAT_EVENT1_M |
2890 						     GLTSYN_STAT_EVENT2_M);
2891 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
2892 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
2893 	}
2894 
2895 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2896 	if (oicr & ICE_AUX_CRIT_ERR) {
2897 		struct iidc_event *event;
2898 
2899 		ena_mask &= ~ICE_AUX_CRIT_ERR;
2900 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2901 		if (event) {
2902 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2903 			/* report the entire OICR value to AUX driver */
2904 			event->reg = oicr;
2905 			ice_send_event_to_aux(pf, event);
2906 			kfree(event);
2907 		}
2908 	}
2909 
2910 	/* Report any remaining unexpected interrupts */
2911 	oicr &= ena_mask;
2912 	if (oicr) {
2913 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2914 		/* If a critical error is pending there is no choice but to
2915 		 * reset the device.
2916 		 */
2917 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2918 			    PFINT_OICR_ECC_ERR_M)) {
2919 			set_bit(ICE_PFR_REQ, pf->state);
2920 			ice_service_task_schedule(pf);
2921 		}
2922 	}
2923 	ret = IRQ_HANDLED;
2924 
2925 	ice_service_task_schedule(pf);
2926 	ice_irq_dynamic_ena(hw, NULL, NULL);
2927 
2928 	return ret;
2929 }
2930 
2931 /**
2932  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2933  * @hw: pointer to HW structure
2934  */
2935 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2936 {
2937 	/* disable Admin queue Interrupt causes */
2938 	wr32(hw, PFINT_FW_CTL,
2939 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2940 
2941 	/* disable Mailbox queue Interrupt causes */
2942 	wr32(hw, PFINT_MBX_CTL,
2943 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2944 
2945 	wr32(hw, PFINT_SB_CTL,
2946 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2947 
2948 	/* disable Control queue Interrupt causes */
2949 	wr32(hw, PFINT_OICR_CTL,
2950 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2951 
2952 	ice_flush(hw);
2953 }
2954 
2955 /**
2956  * ice_free_irq_msix_misc - Unroll misc vector setup
2957  * @pf: board private structure
2958  */
2959 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2960 {
2961 	struct ice_hw *hw = &pf->hw;
2962 
2963 	ice_dis_ctrlq_interrupts(hw);
2964 
2965 	/* disable OICR interrupt */
2966 	wr32(hw, PFINT_OICR_ENA, 0);
2967 	ice_flush(hw);
2968 
2969 	if (pf->msix_entries) {
2970 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2971 		devm_free_irq(ice_pf_to_dev(pf),
2972 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2973 	}
2974 
2975 	pf->num_avail_sw_msix += 1;
2976 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2977 }
2978 
2979 /**
2980  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2981  * @hw: pointer to HW structure
2982  * @reg_idx: HW vector index to associate the control queue interrupts with
2983  */
2984 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2985 {
2986 	u32 val;
2987 
2988 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2989 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2990 	wr32(hw, PFINT_OICR_CTL, val);
2991 
2992 	/* enable Admin queue Interrupt causes */
2993 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2994 	       PFINT_FW_CTL_CAUSE_ENA_M);
2995 	wr32(hw, PFINT_FW_CTL, val);
2996 
2997 	/* enable Mailbox queue Interrupt causes */
2998 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2999 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3000 	wr32(hw, PFINT_MBX_CTL, val);
3001 
3002 	/* This enables Sideband queue Interrupt causes */
3003 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3004 	       PFINT_SB_CTL_CAUSE_ENA_M);
3005 	wr32(hw, PFINT_SB_CTL, val);
3006 
3007 	ice_flush(hw);
3008 }
3009 
3010 /**
3011  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3012  * @pf: board private structure
3013  *
3014  * This sets up the handler for MSIX 0, which is used to manage the
3015  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3016  * when in MSI or Legacy interrupt mode.
3017  */
3018 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3019 {
3020 	struct device *dev = ice_pf_to_dev(pf);
3021 	struct ice_hw *hw = &pf->hw;
3022 	int oicr_idx, err = 0;
3023 
3024 	if (!pf->int_name[0])
3025 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3026 			 dev_driver_string(dev), dev_name(dev));
3027 
3028 	/* Do not request IRQ but do enable OICR interrupt since settings are
3029 	 * lost during reset. Note that this function is called only during
3030 	 * rebuild path and not while reset is in progress.
3031 	 */
3032 	if (ice_is_reset_in_progress(pf->state))
3033 		goto skip_req_irq;
3034 
3035 	/* reserve one vector in irq_tracker for misc interrupts */
3036 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3037 	if (oicr_idx < 0)
3038 		return oicr_idx;
3039 
3040 	pf->num_avail_sw_msix -= 1;
3041 	pf->oicr_idx = (u16)oicr_idx;
3042 
3043 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3044 			       ice_misc_intr, 0, pf->int_name, pf);
3045 	if (err) {
3046 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3047 			pf->int_name, err);
3048 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3049 		pf->num_avail_sw_msix += 1;
3050 		return err;
3051 	}
3052 
3053 skip_req_irq:
3054 	ice_ena_misc_vector(pf);
3055 
3056 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3057 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3058 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3059 
3060 	ice_flush(hw);
3061 	ice_irq_dynamic_ena(hw, NULL, NULL);
3062 
3063 	return 0;
3064 }
3065 
3066 /**
3067  * ice_napi_add - register NAPI handler for the VSI
3068  * @vsi: VSI for which NAPI handler is to be registered
3069  *
3070  * This function is only called in the driver's load path. Registering the NAPI
3071  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3072  * reset/rebuild, etc.)
3073  */
3074 static void ice_napi_add(struct ice_vsi *vsi)
3075 {
3076 	int v_idx;
3077 
3078 	if (!vsi->netdev)
3079 		return;
3080 
3081 	ice_for_each_q_vector(vsi, v_idx)
3082 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3083 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3084 }
3085 
3086 /**
3087  * ice_set_ops - set netdev and ethtools ops for the given netdev
3088  * @netdev: netdev instance
3089  */
3090 static void ice_set_ops(struct net_device *netdev)
3091 {
3092 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3093 
3094 	if (ice_is_safe_mode(pf)) {
3095 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3096 		ice_set_ethtool_safe_mode_ops(netdev);
3097 		return;
3098 	}
3099 
3100 	netdev->netdev_ops = &ice_netdev_ops;
3101 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3102 	ice_set_ethtool_ops(netdev);
3103 }
3104 
3105 /**
3106  * ice_set_netdev_features - set features for the given netdev
3107  * @netdev: netdev instance
3108  */
3109 static void ice_set_netdev_features(struct net_device *netdev)
3110 {
3111 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3112 	netdev_features_t csumo_features;
3113 	netdev_features_t vlano_features;
3114 	netdev_features_t dflt_features;
3115 	netdev_features_t tso_features;
3116 
3117 	if (ice_is_safe_mode(pf)) {
3118 		/* safe mode */
3119 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3120 		netdev->hw_features = netdev->features;
3121 		return;
3122 	}
3123 
3124 	dflt_features = NETIF_F_SG	|
3125 			NETIF_F_HIGHDMA	|
3126 			NETIF_F_NTUPLE	|
3127 			NETIF_F_RXHASH;
3128 
3129 	csumo_features = NETIF_F_RXCSUM	  |
3130 			 NETIF_F_IP_CSUM  |
3131 			 NETIF_F_SCTP_CRC |
3132 			 NETIF_F_IPV6_CSUM;
3133 
3134 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3135 			 NETIF_F_HW_VLAN_CTAG_TX     |
3136 			 NETIF_F_HW_VLAN_CTAG_RX;
3137 
3138 	tso_features = NETIF_F_TSO			|
3139 		       NETIF_F_TSO_ECN			|
3140 		       NETIF_F_TSO6			|
3141 		       NETIF_F_GSO_GRE			|
3142 		       NETIF_F_GSO_UDP_TUNNEL		|
3143 		       NETIF_F_GSO_GRE_CSUM		|
3144 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3145 		       NETIF_F_GSO_PARTIAL		|
3146 		       NETIF_F_GSO_IPXIP4		|
3147 		       NETIF_F_GSO_IPXIP6		|
3148 		       NETIF_F_GSO_UDP_L4;
3149 
3150 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3151 					NETIF_F_GSO_GRE_CSUM;
3152 	/* set features that user can change */
3153 	netdev->hw_features = dflt_features | csumo_features |
3154 			      vlano_features | tso_features;
3155 
3156 	/* add support for HW_CSUM on packets with MPLS header */
3157 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3158 
3159 	/* enable features */
3160 	netdev->features |= netdev->hw_features;
3161 
3162 	netdev->hw_features |= NETIF_F_HW_TC;
3163 
3164 	/* encap and VLAN devices inherit default, csumo and tso features */
3165 	netdev->hw_enc_features |= dflt_features | csumo_features |
3166 				   tso_features;
3167 	netdev->vlan_features |= dflt_features | csumo_features |
3168 				 tso_features;
3169 }
3170 
3171 /**
3172  * ice_cfg_netdev - Allocate, configure and register a netdev
3173  * @vsi: the VSI associated with the new netdev
3174  *
3175  * Returns 0 on success, negative value on failure
3176  */
3177 static int ice_cfg_netdev(struct ice_vsi *vsi)
3178 {
3179 	struct ice_netdev_priv *np;
3180 	struct net_device *netdev;
3181 	u8 mac_addr[ETH_ALEN];
3182 
3183 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3184 				    vsi->alloc_rxq);
3185 	if (!netdev)
3186 		return -ENOMEM;
3187 
3188 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3189 	vsi->netdev = netdev;
3190 	np = netdev_priv(netdev);
3191 	np->vsi = vsi;
3192 
3193 	ice_set_netdev_features(netdev);
3194 
3195 	ice_set_ops(netdev);
3196 
3197 	if (vsi->type == ICE_VSI_PF) {
3198 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3199 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3200 		eth_hw_addr_set(netdev, mac_addr);
3201 		ether_addr_copy(netdev->perm_addr, mac_addr);
3202 	}
3203 
3204 	netdev->priv_flags |= IFF_UNICAST_FLT;
3205 
3206 	/* Setup netdev TC information */
3207 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3208 
3209 	/* setup watchdog timeout value to be 5 second */
3210 	netdev->watchdog_timeo = 5 * HZ;
3211 
3212 	netdev->min_mtu = ETH_MIN_MTU;
3213 	netdev->max_mtu = ICE_MAX_MTU;
3214 
3215 	return 0;
3216 }
3217 
3218 /**
3219  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3220  * @lut: Lookup table
3221  * @rss_table_size: Lookup table size
3222  * @rss_size: Range of queue number for hashing
3223  */
3224 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3225 {
3226 	u16 i;
3227 
3228 	for (i = 0; i < rss_table_size; i++)
3229 		lut[i] = i % rss_size;
3230 }
3231 
3232 /**
3233  * ice_pf_vsi_setup - Set up a PF VSI
3234  * @pf: board private structure
3235  * @pi: pointer to the port_info instance
3236  *
3237  * Returns pointer to the successfully allocated VSI software struct
3238  * on success, otherwise returns NULL on failure.
3239  */
3240 static struct ice_vsi *
3241 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3242 {
3243 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3244 }
3245 
3246 /**
3247  * ice_ctrl_vsi_setup - Set up a control VSI
3248  * @pf: board private structure
3249  * @pi: pointer to the port_info instance
3250  *
3251  * Returns pointer to the successfully allocated VSI software struct
3252  * on success, otherwise returns NULL on failure.
3253  */
3254 static struct ice_vsi *
3255 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3256 {
3257 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3258 }
3259 
3260 /**
3261  * ice_lb_vsi_setup - Set up a loopback VSI
3262  * @pf: board private structure
3263  * @pi: pointer to the port_info instance
3264  *
3265  * Returns pointer to the successfully allocated VSI software struct
3266  * on success, otherwise returns NULL on failure.
3267  */
3268 struct ice_vsi *
3269 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3270 {
3271 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3272 }
3273 
3274 /**
3275  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3276  * @netdev: network interface to be adjusted
3277  * @proto: unused protocol
3278  * @vid: VLAN ID to be added
3279  *
3280  * net_device_ops implementation for adding VLAN IDs
3281  */
3282 static int
3283 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3284 		    u16 vid)
3285 {
3286 	struct ice_netdev_priv *np = netdev_priv(netdev);
3287 	struct ice_vsi *vsi = np->vsi;
3288 	int ret;
3289 
3290 	/* VLAN 0 is added by default during load/reset */
3291 	if (!vid)
3292 		return 0;
3293 
3294 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3295 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3296 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3297 		if (ret)
3298 			return ret;
3299 	}
3300 
3301 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3302 	 * packets aren't pruned by the device's internal switch on Rx
3303 	 */
3304 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3305 	if (!ret)
3306 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3307 
3308 	return ret;
3309 }
3310 
3311 /**
3312  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3313  * @netdev: network interface to be adjusted
3314  * @proto: unused protocol
3315  * @vid: VLAN ID to be removed
3316  *
3317  * net_device_ops implementation for removing VLAN IDs
3318  */
3319 static int
3320 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3321 		     u16 vid)
3322 {
3323 	struct ice_netdev_priv *np = netdev_priv(netdev);
3324 	struct ice_vsi *vsi = np->vsi;
3325 	int ret;
3326 
3327 	/* don't allow removal of VLAN 0 */
3328 	if (!vid)
3329 		return 0;
3330 
3331 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3332 	 * information
3333 	 */
3334 	ret = ice_vsi_kill_vlan(vsi, vid);
3335 	if (ret)
3336 		return ret;
3337 
3338 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3339 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3340 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3341 
3342 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3343 	return ret;
3344 }
3345 
3346 /**
3347  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3348  * @pf: board private structure
3349  *
3350  * Returns 0 on success, negative value on failure
3351  */
3352 static int ice_setup_pf_sw(struct ice_pf *pf)
3353 {
3354 	struct ice_vsi *vsi;
3355 	int status = 0;
3356 
3357 	if (ice_is_reset_in_progress(pf->state))
3358 		return -EBUSY;
3359 
3360 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3361 	if (!vsi)
3362 		return -ENOMEM;
3363 
3364 	status = ice_cfg_netdev(vsi);
3365 	if (status) {
3366 		status = -ENODEV;
3367 		goto unroll_vsi_setup;
3368 	}
3369 	/* netdev has to be configured before setting frame size */
3370 	ice_vsi_cfg_frame_size(vsi);
3371 
3372 	/* Setup DCB netlink interface */
3373 	ice_dcbnl_setup(vsi);
3374 
3375 	/* registering the NAPI handler requires both the queues and
3376 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3377 	 * and ice_cfg_netdev() respectively
3378 	 */
3379 	ice_napi_add(vsi);
3380 
3381 	status = ice_set_cpu_rx_rmap(vsi);
3382 	if (status) {
3383 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3384 			vsi->vsi_num, status);
3385 		status = -EINVAL;
3386 		goto unroll_napi_add;
3387 	}
3388 	status = ice_init_mac_fltr(pf);
3389 	if (status)
3390 		goto free_cpu_rx_map;
3391 
3392 	return status;
3393 
3394 free_cpu_rx_map:
3395 	ice_free_cpu_rx_rmap(vsi);
3396 
3397 unroll_napi_add:
3398 	if (vsi) {
3399 		ice_napi_del(vsi);
3400 		if (vsi->netdev) {
3401 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3402 			free_netdev(vsi->netdev);
3403 			vsi->netdev = NULL;
3404 		}
3405 	}
3406 
3407 unroll_vsi_setup:
3408 	ice_vsi_release(vsi);
3409 	return status;
3410 }
3411 
3412 /**
3413  * ice_get_avail_q_count - Get count of queues in use
3414  * @pf_qmap: bitmap to get queue use count from
3415  * @lock: pointer to a mutex that protects access to pf_qmap
3416  * @size: size of the bitmap
3417  */
3418 static u16
3419 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3420 {
3421 	unsigned long bit;
3422 	u16 count = 0;
3423 
3424 	mutex_lock(lock);
3425 	for_each_clear_bit(bit, pf_qmap, size)
3426 		count++;
3427 	mutex_unlock(lock);
3428 
3429 	return count;
3430 }
3431 
3432 /**
3433  * ice_get_avail_txq_count - Get count of Tx queues in use
3434  * @pf: pointer to an ice_pf instance
3435  */
3436 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3437 {
3438 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3439 				     pf->max_pf_txqs);
3440 }
3441 
3442 /**
3443  * ice_get_avail_rxq_count - Get count of Rx queues in use
3444  * @pf: pointer to an ice_pf instance
3445  */
3446 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3447 {
3448 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3449 				     pf->max_pf_rxqs);
3450 }
3451 
3452 /**
3453  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3454  * @pf: board private structure to initialize
3455  */
3456 static void ice_deinit_pf(struct ice_pf *pf)
3457 {
3458 	ice_service_task_stop(pf);
3459 	mutex_destroy(&pf->sw_mutex);
3460 	mutex_destroy(&pf->tc_mutex);
3461 	mutex_destroy(&pf->avail_q_mutex);
3462 
3463 	if (pf->avail_txqs) {
3464 		bitmap_free(pf->avail_txqs);
3465 		pf->avail_txqs = NULL;
3466 	}
3467 
3468 	if (pf->avail_rxqs) {
3469 		bitmap_free(pf->avail_rxqs);
3470 		pf->avail_rxqs = NULL;
3471 	}
3472 
3473 	if (pf->ptp.clock)
3474 		ptp_clock_unregister(pf->ptp.clock);
3475 }
3476 
3477 /**
3478  * ice_set_pf_caps - set PFs capability flags
3479  * @pf: pointer to the PF instance
3480  */
3481 static void ice_set_pf_caps(struct ice_pf *pf)
3482 {
3483 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3484 
3485 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3486 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3487 	if (func_caps->common_cap.rdma) {
3488 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3489 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3490 	}
3491 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3492 	if (func_caps->common_cap.dcb)
3493 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3494 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3495 	if (func_caps->common_cap.sr_iov_1_1) {
3496 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3497 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3498 					      ICE_MAX_VF_COUNT);
3499 	}
3500 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3501 	if (func_caps->common_cap.rss_table_size)
3502 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3503 
3504 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3505 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3506 		u16 unused;
3507 
3508 		/* ctrl_vsi_idx will be set to a valid value when flow director
3509 		 * is setup by ice_init_fdir
3510 		 */
3511 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3512 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3513 		/* force guaranteed filter pool for PF */
3514 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3515 				       func_caps->fd_fltr_guar);
3516 		/* force shared filter pool for PF */
3517 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3518 				       func_caps->fd_fltr_best_effort);
3519 	}
3520 
3521 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3522 	if (func_caps->common_cap.ieee_1588)
3523 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3524 
3525 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3526 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3527 }
3528 
3529 /**
3530  * ice_init_pf - Initialize general software structures (struct ice_pf)
3531  * @pf: board private structure to initialize
3532  */
3533 static int ice_init_pf(struct ice_pf *pf)
3534 {
3535 	ice_set_pf_caps(pf);
3536 
3537 	mutex_init(&pf->sw_mutex);
3538 	mutex_init(&pf->tc_mutex);
3539 
3540 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3541 	spin_lock_init(&pf->aq_wait_lock);
3542 	init_waitqueue_head(&pf->aq_wait_queue);
3543 
3544 	init_waitqueue_head(&pf->reset_wait_queue);
3545 
3546 	/* setup service timer and periodic service task */
3547 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3548 	pf->serv_tmr_period = HZ;
3549 	INIT_WORK(&pf->serv_task, ice_service_task);
3550 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3551 
3552 	mutex_init(&pf->avail_q_mutex);
3553 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3554 	if (!pf->avail_txqs)
3555 		return -ENOMEM;
3556 
3557 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3558 	if (!pf->avail_rxqs) {
3559 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3560 		pf->avail_txqs = NULL;
3561 		return -ENOMEM;
3562 	}
3563 
3564 	return 0;
3565 }
3566 
3567 /**
3568  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3569  * @pf: board private structure
3570  *
3571  * compute the number of MSIX vectors required (v_budget) and request from
3572  * the OS. Return the number of vectors reserved or negative on failure
3573  */
3574 static int ice_ena_msix_range(struct ice_pf *pf)
3575 {
3576 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3577 	struct device *dev = ice_pf_to_dev(pf);
3578 	int needed, err, i;
3579 
3580 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3581 	num_cpus = num_online_cpus();
3582 
3583 	/* reserve for LAN miscellaneous handler */
3584 	needed = ICE_MIN_LAN_OICR_MSIX;
3585 	if (v_left < needed)
3586 		goto no_hw_vecs_left_err;
3587 	v_budget += needed;
3588 	v_left -= needed;
3589 
3590 	/* reserve for flow director */
3591 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3592 		needed = ICE_FDIR_MSIX;
3593 		if (v_left < needed)
3594 			goto no_hw_vecs_left_err;
3595 		v_budget += needed;
3596 		v_left -= needed;
3597 	}
3598 
3599 	/* reserve for switchdev */
3600 	needed = ICE_ESWITCH_MSIX;
3601 	if (v_left < needed)
3602 		goto no_hw_vecs_left_err;
3603 	v_budget += needed;
3604 	v_left -= needed;
3605 
3606 	/* total used for non-traffic vectors */
3607 	v_other = v_budget;
3608 
3609 	/* reserve vectors for LAN traffic */
3610 	needed = num_cpus;
3611 	if (v_left < needed)
3612 		goto no_hw_vecs_left_err;
3613 	pf->num_lan_msix = needed;
3614 	v_budget += needed;
3615 	v_left -= needed;
3616 
3617 	/* reserve vectors for RDMA auxiliary driver */
3618 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3619 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3620 		if (v_left < needed)
3621 			goto no_hw_vecs_left_err;
3622 		pf->num_rdma_msix = needed;
3623 		v_budget += needed;
3624 		v_left -= needed;
3625 	}
3626 
3627 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3628 					sizeof(*pf->msix_entries), GFP_KERNEL);
3629 	if (!pf->msix_entries) {
3630 		err = -ENOMEM;
3631 		goto exit_err;
3632 	}
3633 
3634 	for (i = 0; i < v_budget; i++)
3635 		pf->msix_entries[i].entry = i;
3636 
3637 	/* actually reserve the vectors */
3638 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3639 					 ICE_MIN_MSIX, v_budget);
3640 	if (v_actual < 0) {
3641 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3642 		err = v_actual;
3643 		goto msix_err;
3644 	}
3645 
3646 	if (v_actual < v_budget) {
3647 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3648 			 v_budget, v_actual);
3649 
3650 		if (v_actual < ICE_MIN_MSIX) {
3651 			/* error if we can't get minimum vectors */
3652 			pci_disable_msix(pf->pdev);
3653 			err = -ERANGE;
3654 			goto msix_err;
3655 		} else {
3656 			int v_remain = v_actual - v_other;
3657 			int v_rdma = 0, v_min_rdma = 0;
3658 
3659 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3660 				/* Need at least 1 interrupt in addition to
3661 				 * AEQ MSIX
3662 				 */
3663 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3664 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3665 			}
3666 
3667 			if (v_actual == ICE_MIN_MSIX ||
3668 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3669 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3670 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3671 
3672 				pf->num_rdma_msix = 0;
3673 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3674 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3675 				   (v_remain - v_rdma < v_rdma)) {
3676 				/* Support minimum RDMA and give remaining
3677 				 * vectors to LAN MSIX
3678 				 */
3679 				pf->num_rdma_msix = v_min_rdma;
3680 				pf->num_lan_msix = v_remain - v_min_rdma;
3681 			} else {
3682 				/* Split remaining MSIX with RDMA after
3683 				 * accounting for AEQ MSIX
3684 				 */
3685 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3686 						    ICE_RDMA_NUM_AEQ_MSIX;
3687 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3688 			}
3689 
3690 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3691 				   pf->num_lan_msix);
3692 
3693 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3694 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3695 					   pf->num_rdma_msix);
3696 		}
3697 	}
3698 
3699 	return v_actual;
3700 
3701 msix_err:
3702 	devm_kfree(dev, pf->msix_entries);
3703 	goto exit_err;
3704 
3705 no_hw_vecs_left_err:
3706 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3707 		needed, v_left);
3708 	err = -ERANGE;
3709 exit_err:
3710 	pf->num_rdma_msix = 0;
3711 	pf->num_lan_msix = 0;
3712 	return err;
3713 }
3714 
3715 /**
3716  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3717  * @pf: board private structure
3718  */
3719 static void ice_dis_msix(struct ice_pf *pf)
3720 {
3721 	pci_disable_msix(pf->pdev);
3722 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3723 	pf->msix_entries = NULL;
3724 }
3725 
3726 /**
3727  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3728  * @pf: board private structure
3729  */
3730 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3731 {
3732 	ice_dis_msix(pf);
3733 
3734 	if (pf->irq_tracker) {
3735 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3736 		pf->irq_tracker = NULL;
3737 	}
3738 }
3739 
3740 /**
3741  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3742  * @pf: board private structure to initialize
3743  */
3744 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3745 {
3746 	int vectors;
3747 
3748 	vectors = ice_ena_msix_range(pf);
3749 
3750 	if (vectors < 0)
3751 		return vectors;
3752 
3753 	/* set up vector assignment tracking */
3754 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3755 				       struct_size(pf->irq_tracker, list, vectors),
3756 				       GFP_KERNEL);
3757 	if (!pf->irq_tracker) {
3758 		ice_dis_msix(pf);
3759 		return -ENOMEM;
3760 	}
3761 
3762 	/* populate SW interrupts pool with number of OS granted IRQs. */
3763 	pf->num_avail_sw_msix = (u16)vectors;
3764 	pf->irq_tracker->num_entries = (u16)vectors;
3765 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3766 
3767 	return 0;
3768 }
3769 
3770 /**
3771  * ice_is_wol_supported - check if WoL is supported
3772  * @hw: pointer to hardware info
3773  *
3774  * Check if WoL is supported based on the HW configuration.
3775  * Returns true if NVM supports and enables WoL for this port, false otherwise
3776  */
3777 bool ice_is_wol_supported(struct ice_hw *hw)
3778 {
3779 	u16 wol_ctrl;
3780 
3781 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3782 	 * word) indicates WoL is not supported on the corresponding PF ID.
3783 	 */
3784 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3785 		return false;
3786 
3787 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3788 }
3789 
3790 /**
3791  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3792  * @vsi: VSI being changed
3793  * @new_rx: new number of Rx queues
3794  * @new_tx: new number of Tx queues
3795  *
3796  * Only change the number of queues if new_tx, or new_rx is non-0.
3797  *
3798  * Returns 0 on success.
3799  */
3800 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3801 {
3802 	struct ice_pf *pf = vsi->back;
3803 	int err = 0, timeout = 50;
3804 
3805 	if (!new_rx && !new_tx)
3806 		return -EINVAL;
3807 
3808 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3809 		timeout--;
3810 		if (!timeout)
3811 			return -EBUSY;
3812 		usleep_range(1000, 2000);
3813 	}
3814 
3815 	if (new_tx)
3816 		vsi->req_txq = (u16)new_tx;
3817 	if (new_rx)
3818 		vsi->req_rxq = (u16)new_rx;
3819 
3820 	/* set for the next time the netdev is started */
3821 	if (!netif_running(vsi->netdev)) {
3822 		ice_vsi_rebuild(vsi, false);
3823 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3824 		goto done;
3825 	}
3826 
3827 	ice_vsi_close(vsi);
3828 	ice_vsi_rebuild(vsi, false);
3829 	ice_pf_dcb_recfg(pf);
3830 	ice_vsi_open(vsi);
3831 done:
3832 	clear_bit(ICE_CFG_BUSY, pf->state);
3833 	return err;
3834 }
3835 
3836 /**
3837  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3838  * @pf: PF to configure
3839  *
3840  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3841  * VSI can still Tx/Rx VLAN tagged packets.
3842  */
3843 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3844 {
3845 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3846 	struct ice_vsi_ctx *ctxt;
3847 	enum ice_status status;
3848 	struct ice_hw *hw;
3849 
3850 	if (!vsi)
3851 		return;
3852 
3853 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3854 	if (!ctxt)
3855 		return;
3856 
3857 	hw = &pf->hw;
3858 	ctxt->info = vsi->info;
3859 
3860 	ctxt->info.valid_sections =
3861 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3862 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3863 			    ICE_AQ_VSI_PROP_SW_VALID);
3864 
3865 	/* disable VLAN anti-spoof */
3866 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3867 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3868 
3869 	/* disable VLAN pruning and keep all other settings */
3870 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3871 
3872 	/* allow all VLANs on Tx and don't strip on Rx */
3873 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3874 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3875 
3876 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3877 	if (status) {
3878 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3879 			ice_stat_str(status),
3880 			ice_aq_str(hw->adminq.sq_last_status));
3881 	} else {
3882 		vsi->info.sec_flags = ctxt->info.sec_flags;
3883 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3884 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3885 	}
3886 
3887 	kfree(ctxt);
3888 }
3889 
3890 /**
3891  * ice_log_pkg_init - log result of DDP package load
3892  * @hw: pointer to hardware info
3893  * @status: status of package load
3894  */
3895 static void
3896 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3897 {
3898 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3899 	struct device *dev = ice_pf_to_dev(pf);
3900 
3901 	switch (*status) {
3902 	case ICE_SUCCESS:
3903 		/* The package download AdminQ command returned success because
3904 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3905 		 * already a package loaded on the device.
3906 		 */
3907 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3908 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3909 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3910 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3911 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3912 			    sizeof(hw->pkg_name))) {
3913 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3914 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3915 					 hw->active_pkg_name,
3916 					 hw->active_pkg_ver.major,
3917 					 hw->active_pkg_ver.minor,
3918 					 hw->active_pkg_ver.update,
3919 					 hw->active_pkg_ver.draft);
3920 			else
3921 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3922 					 hw->active_pkg_name,
3923 					 hw->active_pkg_ver.major,
3924 					 hw->active_pkg_ver.minor,
3925 					 hw->active_pkg_ver.update,
3926 					 hw->active_pkg_ver.draft);
3927 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3928 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3929 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3930 				hw->active_pkg_name,
3931 				hw->active_pkg_ver.major,
3932 				hw->active_pkg_ver.minor,
3933 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3934 			*status = ICE_ERR_NOT_SUPPORTED;
3935 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3936 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3937 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3938 				 hw->active_pkg_name,
3939 				 hw->active_pkg_ver.major,
3940 				 hw->active_pkg_ver.minor,
3941 				 hw->active_pkg_ver.update,
3942 				 hw->active_pkg_ver.draft,
3943 				 hw->pkg_name,
3944 				 hw->pkg_ver.major,
3945 				 hw->pkg_ver.minor,
3946 				 hw->pkg_ver.update,
3947 				 hw->pkg_ver.draft);
3948 		} else {
3949 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3950 			*status = ICE_ERR_NOT_SUPPORTED;
3951 		}
3952 		break;
3953 	case ICE_ERR_FW_DDP_MISMATCH:
3954 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3955 		break;
3956 	case ICE_ERR_BUF_TOO_SHORT:
3957 	case ICE_ERR_CFG:
3958 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3959 		break;
3960 	case ICE_ERR_NOT_SUPPORTED:
3961 		/* Package File version not supported */
3962 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3963 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3964 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3965 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3966 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3967 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3968 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3969 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3970 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3971 		break;
3972 	case ICE_ERR_AQ_ERROR:
3973 		switch (hw->pkg_dwnld_status) {
3974 		case ICE_AQ_RC_ENOSEC:
3975 		case ICE_AQ_RC_EBADSIG:
3976 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3977 			return;
3978 		case ICE_AQ_RC_ESVN:
3979 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3980 			return;
3981 		case ICE_AQ_RC_EBADMAN:
3982 		case ICE_AQ_RC_EBADBUF:
3983 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3984 			/* poll for reset to complete */
3985 			if (ice_check_reset(hw))
3986 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3987 			return;
3988 		default:
3989 			break;
3990 		}
3991 		fallthrough;
3992 	default:
3993 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3994 			*status);
3995 		break;
3996 	}
3997 }
3998 
3999 /**
4000  * ice_load_pkg - load/reload the DDP Package file
4001  * @firmware: firmware structure when firmware requested or NULL for reload
4002  * @pf: pointer to the PF instance
4003  *
4004  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4005  * initialize HW tables.
4006  */
4007 static void
4008 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4009 {
4010 	enum ice_status status = ICE_ERR_PARAM;
4011 	struct device *dev = ice_pf_to_dev(pf);
4012 	struct ice_hw *hw = &pf->hw;
4013 
4014 	/* Load DDP Package */
4015 	if (firmware && !hw->pkg_copy) {
4016 		status = ice_copy_and_init_pkg(hw, firmware->data,
4017 					       firmware->size);
4018 		ice_log_pkg_init(hw, &status);
4019 	} else if (!firmware && hw->pkg_copy) {
4020 		/* Reload package during rebuild after CORER/GLOBR reset */
4021 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4022 		ice_log_pkg_init(hw, &status);
4023 	} else {
4024 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4025 	}
4026 
4027 	if (status) {
4028 		/* Safe Mode */
4029 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4030 		return;
4031 	}
4032 
4033 	/* Successful download package is the precondition for advanced
4034 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4035 	 */
4036 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4037 }
4038 
4039 /**
4040  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4041  * @pf: pointer to the PF structure
4042  *
4043  * There is no error returned here because the driver should be able to handle
4044  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4045  * specifically with Tx.
4046  */
4047 static void ice_verify_cacheline_size(struct ice_pf *pf)
4048 {
4049 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4050 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4051 			 ICE_CACHE_LINE_BYTES);
4052 }
4053 
4054 /**
4055  * ice_send_version - update firmware with driver version
4056  * @pf: PF struct
4057  *
4058  * Returns ICE_SUCCESS on success, else error code
4059  */
4060 static enum ice_status ice_send_version(struct ice_pf *pf)
4061 {
4062 	struct ice_driver_ver dv;
4063 
4064 	dv.major_ver = 0xff;
4065 	dv.minor_ver = 0xff;
4066 	dv.build_ver = 0xff;
4067 	dv.subbuild_ver = 0;
4068 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4069 		sizeof(dv.driver_string));
4070 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4071 }
4072 
4073 /**
4074  * ice_init_fdir - Initialize flow director VSI and configuration
4075  * @pf: pointer to the PF instance
4076  *
4077  * returns 0 on success, negative on error
4078  */
4079 static int ice_init_fdir(struct ice_pf *pf)
4080 {
4081 	struct device *dev = ice_pf_to_dev(pf);
4082 	struct ice_vsi *ctrl_vsi;
4083 	int err;
4084 
4085 	/* Side Band Flow Director needs to have a control VSI.
4086 	 * Allocate it and store it in the PF.
4087 	 */
4088 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4089 	if (!ctrl_vsi) {
4090 		dev_dbg(dev, "could not create control VSI\n");
4091 		return -ENOMEM;
4092 	}
4093 
4094 	err = ice_vsi_open_ctrl(ctrl_vsi);
4095 	if (err) {
4096 		dev_dbg(dev, "could not open control VSI\n");
4097 		goto err_vsi_open;
4098 	}
4099 
4100 	mutex_init(&pf->hw.fdir_fltr_lock);
4101 
4102 	err = ice_fdir_create_dflt_rules(pf);
4103 	if (err)
4104 		goto err_fdir_rule;
4105 
4106 	return 0;
4107 
4108 err_fdir_rule:
4109 	ice_fdir_release_flows(&pf->hw);
4110 	ice_vsi_close(ctrl_vsi);
4111 err_vsi_open:
4112 	ice_vsi_release(ctrl_vsi);
4113 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4114 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4115 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4116 	}
4117 	return err;
4118 }
4119 
4120 /**
4121  * ice_get_opt_fw_name - return optional firmware file name or NULL
4122  * @pf: pointer to the PF instance
4123  */
4124 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4125 {
4126 	/* Optional firmware name same as default with additional dash
4127 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4128 	 */
4129 	struct pci_dev *pdev = pf->pdev;
4130 	char *opt_fw_filename;
4131 	u64 dsn;
4132 
4133 	/* Determine the name of the optional file using the DSN (two
4134 	 * dwords following the start of the DSN Capability).
4135 	 */
4136 	dsn = pci_get_dsn(pdev);
4137 	if (!dsn)
4138 		return NULL;
4139 
4140 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4141 	if (!opt_fw_filename)
4142 		return NULL;
4143 
4144 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4145 		 ICE_DDP_PKG_PATH, dsn);
4146 
4147 	return opt_fw_filename;
4148 }
4149 
4150 /**
4151  * ice_request_fw - Device initialization routine
4152  * @pf: pointer to the PF instance
4153  */
4154 static void ice_request_fw(struct ice_pf *pf)
4155 {
4156 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4157 	const struct firmware *firmware = NULL;
4158 	struct device *dev = ice_pf_to_dev(pf);
4159 	int err = 0;
4160 
4161 	/* optional device-specific DDP (if present) overrides the default DDP
4162 	 * package file. kernel logs a debug message if the file doesn't exist,
4163 	 * and warning messages for other errors.
4164 	 */
4165 	if (opt_fw_filename) {
4166 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4167 		if (err) {
4168 			kfree(opt_fw_filename);
4169 			goto dflt_pkg_load;
4170 		}
4171 
4172 		/* request for firmware was successful. Download to device */
4173 		ice_load_pkg(firmware, pf);
4174 		kfree(opt_fw_filename);
4175 		release_firmware(firmware);
4176 		return;
4177 	}
4178 
4179 dflt_pkg_load:
4180 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4181 	if (err) {
4182 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4183 		return;
4184 	}
4185 
4186 	/* request for firmware was successful. Download to device */
4187 	ice_load_pkg(firmware, pf);
4188 	release_firmware(firmware);
4189 }
4190 
4191 /**
4192  * ice_print_wake_reason - show the wake up cause in the log
4193  * @pf: pointer to the PF struct
4194  */
4195 static void ice_print_wake_reason(struct ice_pf *pf)
4196 {
4197 	u32 wus = pf->wakeup_reason;
4198 	const char *wake_str;
4199 
4200 	/* if no wake event, nothing to print */
4201 	if (!wus)
4202 		return;
4203 
4204 	if (wus & PFPM_WUS_LNKC_M)
4205 		wake_str = "Link\n";
4206 	else if (wus & PFPM_WUS_MAG_M)
4207 		wake_str = "Magic Packet\n";
4208 	else if (wus & PFPM_WUS_MNG_M)
4209 		wake_str = "Management\n";
4210 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4211 		wake_str = "Firmware Reset\n";
4212 	else
4213 		wake_str = "Unknown\n";
4214 
4215 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4216 }
4217 
4218 /**
4219  * ice_register_netdev - register netdev and devlink port
4220  * @pf: pointer to the PF struct
4221  */
4222 static int ice_register_netdev(struct ice_pf *pf)
4223 {
4224 	struct ice_vsi *vsi;
4225 	int err = 0;
4226 
4227 	vsi = ice_get_main_vsi(pf);
4228 	if (!vsi || !vsi->netdev)
4229 		return -EIO;
4230 
4231 	err = register_netdev(vsi->netdev);
4232 	if (err)
4233 		goto err_register_netdev;
4234 
4235 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4236 	netif_carrier_off(vsi->netdev);
4237 	netif_tx_stop_all_queues(vsi->netdev);
4238 	err = ice_devlink_create_pf_port(pf);
4239 	if (err)
4240 		goto err_devlink_create;
4241 
4242 	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4243 
4244 	return 0;
4245 err_devlink_create:
4246 	unregister_netdev(vsi->netdev);
4247 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4248 err_register_netdev:
4249 	free_netdev(vsi->netdev);
4250 	vsi->netdev = NULL;
4251 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4252 	return err;
4253 }
4254 
4255 /**
4256  * ice_probe - Device initialization routine
4257  * @pdev: PCI device information struct
4258  * @ent: entry in ice_pci_tbl
4259  *
4260  * Returns 0 on success, negative on failure
4261  */
4262 static int
4263 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4264 {
4265 	struct device *dev = &pdev->dev;
4266 	struct ice_pf *pf;
4267 	struct ice_hw *hw;
4268 	int i, err;
4269 
4270 	if (pdev->is_virtfn) {
4271 		dev_err(dev, "can't probe a virtual function\n");
4272 		return -EINVAL;
4273 	}
4274 
4275 	/* this driver uses devres, see
4276 	 * Documentation/driver-api/driver-model/devres.rst
4277 	 */
4278 	err = pcim_enable_device(pdev);
4279 	if (err)
4280 		return err;
4281 
4282 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4283 	if (err) {
4284 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4285 		return err;
4286 	}
4287 
4288 	pf = ice_allocate_pf(dev);
4289 	if (!pf)
4290 		return -ENOMEM;
4291 
4292 	/* set up for high or low DMA */
4293 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4294 	if (err)
4295 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4296 	if (err) {
4297 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4298 		return err;
4299 	}
4300 
4301 	pci_enable_pcie_error_reporting(pdev);
4302 	pci_set_master(pdev);
4303 
4304 	pf->pdev = pdev;
4305 	pci_set_drvdata(pdev, pf);
4306 	set_bit(ICE_DOWN, pf->state);
4307 	/* Disable service task until DOWN bit is cleared */
4308 	set_bit(ICE_SERVICE_DIS, pf->state);
4309 
4310 	hw = &pf->hw;
4311 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4312 	pci_save_state(pdev);
4313 
4314 	hw->back = pf;
4315 	hw->vendor_id = pdev->vendor;
4316 	hw->device_id = pdev->device;
4317 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4318 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4319 	hw->subsystem_device_id = pdev->subsystem_device;
4320 	hw->bus.device = PCI_SLOT(pdev->devfn);
4321 	hw->bus.func = PCI_FUNC(pdev->devfn);
4322 	ice_set_ctrlq_len(hw);
4323 
4324 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4325 
4326 #ifndef CONFIG_DYNAMIC_DEBUG
4327 	if (debug < -1)
4328 		hw->debug_mask = debug;
4329 #endif
4330 
4331 	err = ice_init_hw(hw);
4332 	if (err) {
4333 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4334 		err = -EIO;
4335 		goto err_exit_unroll;
4336 	}
4337 
4338 	ice_init_feature_support(pf);
4339 
4340 	ice_request_fw(pf);
4341 
4342 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4343 	 * set in pf->state, which will cause ice_is_safe_mode to return
4344 	 * true
4345 	 */
4346 	if (ice_is_safe_mode(pf)) {
4347 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4348 		/* we already got function/device capabilities but these don't
4349 		 * reflect what the driver needs to do in safe mode. Instead of
4350 		 * adding conditional logic everywhere to ignore these
4351 		 * device/function capabilities, override them.
4352 		 */
4353 		ice_set_safe_mode_caps(hw);
4354 	}
4355 
4356 	err = ice_init_pf(pf);
4357 	if (err) {
4358 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4359 		goto err_init_pf_unroll;
4360 	}
4361 
4362 	ice_devlink_init_regions(pf);
4363 
4364 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4365 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4366 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4367 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4368 	i = 0;
4369 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4370 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4371 			pf->hw.tnl.valid_count[TNL_VXLAN];
4372 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4373 			UDP_TUNNEL_TYPE_VXLAN;
4374 		i++;
4375 	}
4376 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4377 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4378 			pf->hw.tnl.valid_count[TNL_GENEVE];
4379 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4380 			UDP_TUNNEL_TYPE_GENEVE;
4381 		i++;
4382 	}
4383 
4384 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4385 	if (!pf->num_alloc_vsi) {
4386 		err = -EIO;
4387 		goto err_init_pf_unroll;
4388 	}
4389 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4390 		dev_warn(&pf->pdev->dev,
4391 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4392 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4393 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4394 	}
4395 
4396 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4397 			       GFP_KERNEL);
4398 	if (!pf->vsi) {
4399 		err = -ENOMEM;
4400 		goto err_init_pf_unroll;
4401 	}
4402 
4403 	err = ice_init_interrupt_scheme(pf);
4404 	if (err) {
4405 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4406 		err = -EIO;
4407 		goto err_init_vsi_unroll;
4408 	}
4409 
4410 	/* In case of MSIX we are going to setup the misc vector right here
4411 	 * to handle admin queue events etc. In case of legacy and MSI
4412 	 * the misc functionality and queue processing is combined in
4413 	 * the same vector and that gets setup at open.
4414 	 */
4415 	err = ice_req_irq_msix_misc(pf);
4416 	if (err) {
4417 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4418 		goto err_init_interrupt_unroll;
4419 	}
4420 
4421 	/* create switch struct for the switch element created by FW on boot */
4422 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4423 	if (!pf->first_sw) {
4424 		err = -ENOMEM;
4425 		goto err_msix_misc_unroll;
4426 	}
4427 
4428 	if (hw->evb_veb)
4429 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4430 	else
4431 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4432 
4433 	pf->first_sw->pf = pf;
4434 
4435 	/* record the sw_id available for later use */
4436 	pf->first_sw->sw_id = hw->port_info->sw_id;
4437 
4438 	err = ice_setup_pf_sw(pf);
4439 	if (err) {
4440 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4441 		goto err_alloc_sw_unroll;
4442 	}
4443 
4444 	clear_bit(ICE_SERVICE_DIS, pf->state);
4445 
4446 	/* tell the firmware we are up */
4447 	err = ice_send_version(pf);
4448 	if (err) {
4449 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4450 			UTS_RELEASE, err);
4451 		goto err_send_version_unroll;
4452 	}
4453 
4454 	/* since everything is good, start the service timer */
4455 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4456 
4457 	err = ice_init_link_events(pf->hw.port_info);
4458 	if (err) {
4459 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4460 		goto err_send_version_unroll;
4461 	}
4462 
4463 	/* not a fatal error if this fails */
4464 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4465 	if (err)
4466 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4467 
4468 	/* not a fatal error if this fails */
4469 	err = ice_update_link_info(pf->hw.port_info);
4470 	if (err)
4471 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4472 
4473 	ice_init_link_dflt_override(pf->hw.port_info);
4474 
4475 	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
4476 
4477 	/* if media available, initialize PHY settings */
4478 	if (pf->hw.port_info->phy.link_info.link_info &
4479 	    ICE_AQ_MEDIA_AVAILABLE) {
4480 		/* not a fatal error if this fails */
4481 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4482 		if (err)
4483 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4484 
4485 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4486 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4487 
4488 			if (vsi)
4489 				ice_configure_phy(vsi);
4490 		}
4491 	} else {
4492 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4493 	}
4494 
4495 	ice_verify_cacheline_size(pf);
4496 
4497 	/* Save wakeup reason register for later use */
4498 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4499 
4500 	/* check for a power management event */
4501 	ice_print_wake_reason(pf);
4502 
4503 	/* clear wake status, all bits */
4504 	wr32(hw, PFPM_WUS, U32_MAX);
4505 
4506 	/* Disable WoL at init, wait for user to enable */
4507 	device_set_wakeup_enable(dev, false);
4508 
4509 	if (ice_is_safe_mode(pf)) {
4510 		ice_set_safe_mode_vlan_cfg(pf);
4511 		goto probe_done;
4512 	}
4513 
4514 	/* initialize DDP driven features */
4515 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4516 		ice_ptp_init(pf);
4517 
4518 	/* Note: Flow director init failure is non-fatal to load */
4519 	if (ice_init_fdir(pf))
4520 		dev_err(dev, "could not initialize flow director\n");
4521 
4522 	/* Note: DCB init failure is non-fatal to load */
4523 	if (ice_init_pf_dcb(pf, false)) {
4524 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4525 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4526 	} else {
4527 		ice_cfg_lldp_mib_change(&pf->hw, true);
4528 	}
4529 
4530 	if (ice_init_lag(pf))
4531 		dev_warn(dev, "Failed to init link aggregation support\n");
4532 
4533 	/* print PCI link speed and width */
4534 	pcie_print_link_status(pf->pdev);
4535 
4536 probe_done:
4537 	err = ice_register_netdev(pf);
4538 	if (err)
4539 		goto err_netdev_reg;
4540 
4541 	/* ready to go, so clear down state bit */
4542 	clear_bit(ICE_DOWN, pf->state);
4543 	if (ice_is_aux_ena(pf)) {
4544 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4545 		if (pf->aux_idx < 0) {
4546 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4547 			err = -ENOMEM;
4548 			goto err_netdev_reg;
4549 		}
4550 
4551 		err = ice_init_rdma(pf);
4552 		if (err) {
4553 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4554 			err = -EIO;
4555 			goto err_init_aux_unroll;
4556 		}
4557 	} else {
4558 		dev_warn(dev, "RDMA is not supported on this device\n");
4559 	}
4560 
4561 	ice_devlink_register(pf);
4562 	return 0;
4563 
4564 err_init_aux_unroll:
4565 	pf->adev = NULL;
4566 	ida_free(&ice_aux_ida, pf->aux_idx);
4567 err_netdev_reg:
4568 err_send_version_unroll:
4569 	ice_vsi_release_all(pf);
4570 err_alloc_sw_unroll:
4571 	set_bit(ICE_SERVICE_DIS, pf->state);
4572 	set_bit(ICE_DOWN, pf->state);
4573 	devm_kfree(dev, pf->first_sw);
4574 err_msix_misc_unroll:
4575 	ice_free_irq_msix_misc(pf);
4576 err_init_interrupt_unroll:
4577 	ice_clear_interrupt_scheme(pf);
4578 err_init_vsi_unroll:
4579 	devm_kfree(dev, pf->vsi);
4580 err_init_pf_unroll:
4581 	ice_deinit_pf(pf);
4582 	ice_devlink_destroy_regions(pf);
4583 	ice_deinit_hw(hw);
4584 err_exit_unroll:
4585 	pci_disable_pcie_error_reporting(pdev);
4586 	pci_disable_device(pdev);
4587 	return err;
4588 }
4589 
4590 /**
4591  * ice_set_wake - enable or disable Wake on LAN
4592  * @pf: pointer to the PF struct
4593  *
4594  * Simple helper for WoL control
4595  */
4596 static void ice_set_wake(struct ice_pf *pf)
4597 {
4598 	struct ice_hw *hw = &pf->hw;
4599 	bool wol = pf->wol_ena;
4600 
4601 	/* clear wake state, otherwise new wake events won't fire */
4602 	wr32(hw, PFPM_WUS, U32_MAX);
4603 
4604 	/* enable / disable APM wake up, no RMW needed */
4605 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4606 
4607 	/* set magic packet filter enabled */
4608 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4609 }
4610 
4611 /**
4612  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4613  * @pf: pointer to the PF struct
4614  *
4615  * Issue firmware command to enable multicast magic wake, making
4616  * sure that any locally administered address (LAA) is used for
4617  * wake, and that PF reset doesn't undo the LAA.
4618  */
4619 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4620 {
4621 	struct device *dev = ice_pf_to_dev(pf);
4622 	struct ice_hw *hw = &pf->hw;
4623 	enum ice_status status;
4624 	u8 mac_addr[ETH_ALEN];
4625 	struct ice_vsi *vsi;
4626 	u8 flags;
4627 
4628 	if (!pf->wol_ena)
4629 		return;
4630 
4631 	vsi = ice_get_main_vsi(pf);
4632 	if (!vsi)
4633 		return;
4634 
4635 	/* Get current MAC address in case it's an LAA */
4636 	if (vsi->netdev)
4637 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4638 	else
4639 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4640 
4641 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4642 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4643 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4644 
4645 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4646 	if (status)
4647 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4648 			ice_stat_str(status),
4649 			ice_aq_str(hw->adminq.sq_last_status));
4650 }
4651 
4652 /**
4653  * ice_remove - Device removal routine
4654  * @pdev: PCI device information struct
4655  */
4656 static void ice_remove(struct pci_dev *pdev)
4657 {
4658 	struct ice_pf *pf = pci_get_drvdata(pdev);
4659 	int i;
4660 
4661 	ice_devlink_unregister(pf);
4662 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4663 		if (!ice_is_reset_in_progress(pf->state))
4664 			break;
4665 		msleep(100);
4666 	}
4667 
4668 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4669 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4670 		ice_free_vfs(pf);
4671 	}
4672 
4673 	ice_service_task_stop(pf);
4674 
4675 	ice_aq_cancel_waiting_tasks(pf);
4676 	ice_unplug_aux_dev(pf);
4677 	ida_free(&ice_aux_ida, pf->aux_idx);
4678 	set_bit(ICE_DOWN, pf->state);
4679 
4680 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4681 	ice_deinit_lag(pf);
4682 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4683 		ice_ptp_release(pf);
4684 	if (!ice_is_safe_mode(pf))
4685 		ice_remove_arfs(pf);
4686 	ice_setup_mc_magic_wake(pf);
4687 	ice_vsi_release_all(pf);
4688 	ice_set_wake(pf);
4689 	ice_free_irq_msix_misc(pf);
4690 	ice_for_each_vsi(pf, i) {
4691 		if (!pf->vsi[i])
4692 			continue;
4693 		ice_vsi_free_q_vectors(pf->vsi[i]);
4694 	}
4695 	ice_deinit_pf(pf);
4696 	ice_devlink_destroy_regions(pf);
4697 	ice_deinit_hw(&pf->hw);
4698 
4699 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4700 	 * do it via ice_schedule_reset() since there is no need to rebuild
4701 	 * and the service task is already stopped.
4702 	 */
4703 	ice_reset(&pf->hw, ICE_RESET_PFR);
4704 	pci_wait_for_pending_transaction(pdev);
4705 	ice_clear_interrupt_scheme(pf);
4706 	pci_disable_pcie_error_reporting(pdev);
4707 	pci_disable_device(pdev);
4708 }
4709 
4710 /**
4711  * ice_shutdown - PCI callback for shutting down device
4712  * @pdev: PCI device information struct
4713  */
4714 static void ice_shutdown(struct pci_dev *pdev)
4715 {
4716 	struct ice_pf *pf = pci_get_drvdata(pdev);
4717 
4718 	ice_remove(pdev);
4719 
4720 	if (system_state == SYSTEM_POWER_OFF) {
4721 		pci_wake_from_d3(pdev, pf->wol_ena);
4722 		pci_set_power_state(pdev, PCI_D3hot);
4723 	}
4724 }
4725 
4726 #ifdef CONFIG_PM
4727 /**
4728  * ice_prepare_for_shutdown - prep for PCI shutdown
4729  * @pf: board private structure
4730  *
4731  * Inform or close all dependent features in prep for PCI device shutdown
4732  */
4733 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4734 {
4735 	struct ice_hw *hw = &pf->hw;
4736 	u32 v;
4737 
4738 	/* Notify VFs of impending reset */
4739 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4740 		ice_vc_notify_reset(pf);
4741 
4742 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4743 
4744 	/* disable the VSIs and their queues that are not already DOWN */
4745 	ice_pf_dis_all_vsi(pf, false);
4746 
4747 	ice_for_each_vsi(pf, v)
4748 		if (pf->vsi[v])
4749 			pf->vsi[v]->vsi_num = 0;
4750 
4751 	ice_shutdown_all_ctrlq(hw);
4752 }
4753 
4754 /**
4755  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4756  * @pf: board private structure to reinitialize
4757  *
4758  * This routine reinitialize interrupt scheme that was cleared during
4759  * power management suspend callback.
4760  *
4761  * This should be called during resume routine to re-allocate the q_vectors
4762  * and reacquire interrupts.
4763  */
4764 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4765 {
4766 	struct device *dev = ice_pf_to_dev(pf);
4767 	int ret, v;
4768 
4769 	/* Since we clear MSIX flag during suspend, we need to
4770 	 * set it back during resume...
4771 	 */
4772 
4773 	ret = ice_init_interrupt_scheme(pf);
4774 	if (ret) {
4775 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4776 		return ret;
4777 	}
4778 
4779 	/* Remap vectors and rings, after successful re-init interrupts */
4780 	ice_for_each_vsi(pf, v) {
4781 		if (!pf->vsi[v])
4782 			continue;
4783 
4784 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4785 		if (ret)
4786 			goto err_reinit;
4787 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4788 	}
4789 
4790 	ret = ice_req_irq_msix_misc(pf);
4791 	if (ret) {
4792 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4793 			ret);
4794 		goto err_reinit;
4795 	}
4796 
4797 	return 0;
4798 
4799 err_reinit:
4800 	while (v--)
4801 		if (pf->vsi[v])
4802 			ice_vsi_free_q_vectors(pf->vsi[v]);
4803 
4804 	return ret;
4805 }
4806 
4807 /**
4808  * ice_suspend
4809  * @dev: generic device information structure
4810  *
4811  * Power Management callback to quiesce the device and prepare
4812  * for D3 transition.
4813  */
4814 static int __maybe_unused ice_suspend(struct device *dev)
4815 {
4816 	struct pci_dev *pdev = to_pci_dev(dev);
4817 	struct ice_pf *pf;
4818 	int disabled, v;
4819 
4820 	pf = pci_get_drvdata(pdev);
4821 
4822 	if (!ice_pf_state_is_nominal(pf)) {
4823 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4824 		return -EBUSY;
4825 	}
4826 
4827 	/* Stop watchdog tasks until resume completion.
4828 	 * Even though it is most likely that the service task is
4829 	 * disabled if the device is suspended or down, the service task's
4830 	 * state is controlled by a different state bit, and we should
4831 	 * store and honor whatever state that bit is in at this point.
4832 	 */
4833 	disabled = ice_service_task_stop(pf);
4834 
4835 	ice_unplug_aux_dev(pf);
4836 
4837 	/* Already suspended?, then there is nothing to do */
4838 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4839 		if (!disabled)
4840 			ice_service_task_restart(pf);
4841 		return 0;
4842 	}
4843 
4844 	if (test_bit(ICE_DOWN, pf->state) ||
4845 	    ice_is_reset_in_progress(pf->state)) {
4846 		dev_err(dev, "can't suspend device in reset or already down\n");
4847 		if (!disabled)
4848 			ice_service_task_restart(pf);
4849 		return 0;
4850 	}
4851 
4852 	ice_setup_mc_magic_wake(pf);
4853 
4854 	ice_prepare_for_shutdown(pf);
4855 
4856 	ice_set_wake(pf);
4857 
4858 	/* Free vectors, clear the interrupt scheme and release IRQs
4859 	 * for proper hibernation, especially with large number of CPUs.
4860 	 * Otherwise hibernation might fail when mapping all the vectors back
4861 	 * to CPU0.
4862 	 */
4863 	ice_free_irq_msix_misc(pf);
4864 	ice_for_each_vsi(pf, v) {
4865 		if (!pf->vsi[v])
4866 			continue;
4867 		ice_vsi_free_q_vectors(pf->vsi[v]);
4868 	}
4869 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4870 	ice_clear_interrupt_scheme(pf);
4871 
4872 	pci_save_state(pdev);
4873 	pci_wake_from_d3(pdev, pf->wol_ena);
4874 	pci_set_power_state(pdev, PCI_D3hot);
4875 	return 0;
4876 }
4877 
4878 /**
4879  * ice_resume - PM callback for waking up from D3
4880  * @dev: generic device information structure
4881  */
4882 static int __maybe_unused ice_resume(struct device *dev)
4883 {
4884 	struct pci_dev *pdev = to_pci_dev(dev);
4885 	enum ice_reset_req reset_type;
4886 	struct ice_pf *pf;
4887 	struct ice_hw *hw;
4888 	int ret;
4889 
4890 	pci_set_power_state(pdev, PCI_D0);
4891 	pci_restore_state(pdev);
4892 	pci_save_state(pdev);
4893 
4894 	if (!pci_device_is_present(pdev))
4895 		return -ENODEV;
4896 
4897 	ret = pci_enable_device_mem(pdev);
4898 	if (ret) {
4899 		dev_err(dev, "Cannot enable device after suspend\n");
4900 		return ret;
4901 	}
4902 
4903 	pf = pci_get_drvdata(pdev);
4904 	hw = &pf->hw;
4905 
4906 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4907 	ice_print_wake_reason(pf);
4908 
4909 	/* We cleared the interrupt scheme when we suspended, so we need to
4910 	 * restore it now to resume device functionality.
4911 	 */
4912 	ret = ice_reinit_interrupt_scheme(pf);
4913 	if (ret)
4914 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4915 
4916 	clear_bit(ICE_DOWN, pf->state);
4917 	/* Now perform PF reset and rebuild */
4918 	reset_type = ICE_RESET_PFR;
4919 	/* re-enable service task for reset, but allow reset to schedule it */
4920 	clear_bit(ICE_SERVICE_DIS, pf->state);
4921 
4922 	if (ice_schedule_reset(pf, reset_type))
4923 		dev_err(dev, "Reset during resume failed.\n");
4924 
4925 	clear_bit(ICE_SUSPENDED, pf->state);
4926 	ice_service_task_restart(pf);
4927 
4928 	/* Restart the service task */
4929 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4930 
4931 	return 0;
4932 }
4933 #endif /* CONFIG_PM */
4934 
4935 /**
4936  * ice_pci_err_detected - warning that PCI error has been detected
4937  * @pdev: PCI device information struct
4938  * @err: the type of PCI error
4939  *
4940  * Called to warn that something happened on the PCI bus and the error handling
4941  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4942  */
4943 static pci_ers_result_t
4944 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4945 {
4946 	struct ice_pf *pf = pci_get_drvdata(pdev);
4947 
4948 	if (!pf) {
4949 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4950 			__func__, err);
4951 		return PCI_ERS_RESULT_DISCONNECT;
4952 	}
4953 
4954 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4955 		ice_service_task_stop(pf);
4956 
4957 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4958 			set_bit(ICE_PFR_REQ, pf->state);
4959 			ice_prepare_for_reset(pf);
4960 		}
4961 	}
4962 
4963 	return PCI_ERS_RESULT_NEED_RESET;
4964 }
4965 
4966 /**
4967  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4968  * @pdev: PCI device information struct
4969  *
4970  * Called to determine if the driver can recover from the PCI slot reset by
4971  * using a register read to determine if the device is recoverable.
4972  */
4973 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4974 {
4975 	struct ice_pf *pf = pci_get_drvdata(pdev);
4976 	pci_ers_result_t result;
4977 	int err;
4978 	u32 reg;
4979 
4980 	err = pci_enable_device_mem(pdev);
4981 	if (err) {
4982 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4983 			err);
4984 		result = PCI_ERS_RESULT_DISCONNECT;
4985 	} else {
4986 		pci_set_master(pdev);
4987 		pci_restore_state(pdev);
4988 		pci_save_state(pdev);
4989 		pci_wake_from_d3(pdev, false);
4990 
4991 		/* Check for life */
4992 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4993 		if (!reg)
4994 			result = PCI_ERS_RESULT_RECOVERED;
4995 		else
4996 			result = PCI_ERS_RESULT_DISCONNECT;
4997 	}
4998 
4999 	err = pci_aer_clear_nonfatal_status(pdev);
5000 	if (err)
5001 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
5002 			err);
5003 		/* non-fatal, continue */
5004 
5005 	return result;
5006 }
5007 
5008 /**
5009  * ice_pci_err_resume - restart operations after PCI error recovery
5010  * @pdev: PCI device information struct
5011  *
5012  * Called to allow the driver to bring things back up after PCI error and/or
5013  * reset recovery have finished
5014  */
5015 static void ice_pci_err_resume(struct pci_dev *pdev)
5016 {
5017 	struct ice_pf *pf = pci_get_drvdata(pdev);
5018 
5019 	if (!pf) {
5020 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5021 			__func__);
5022 		return;
5023 	}
5024 
5025 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5026 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5027 			__func__);
5028 		return;
5029 	}
5030 
5031 	ice_restore_all_vfs_msi_state(pdev);
5032 
5033 	ice_do_reset(pf, ICE_RESET_PFR);
5034 	ice_service_task_restart(pf);
5035 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5036 }
5037 
5038 /**
5039  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5040  * @pdev: PCI device information struct
5041  */
5042 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5043 {
5044 	struct ice_pf *pf = pci_get_drvdata(pdev);
5045 
5046 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5047 		ice_service_task_stop(pf);
5048 
5049 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5050 			set_bit(ICE_PFR_REQ, pf->state);
5051 			ice_prepare_for_reset(pf);
5052 		}
5053 	}
5054 }
5055 
5056 /**
5057  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5058  * @pdev: PCI device information struct
5059  */
5060 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5061 {
5062 	ice_pci_err_resume(pdev);
5063 }
5064 
5065 /* ice_pci_tbl - PCI Device ID Table
5066  *
5067  * Wildcard entries (PCI_ANY_ID) should come last
5068  * Last entry must be all 0s
5069  *
5070  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5071  *   Class, Class Mask, private data (not used) }
5072  */
5073 static const struct pci_device_id ice_pci_tbl[] = {
5074 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5075 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5076 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5077 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5078 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5079 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5080 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5081 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5082 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5083 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5084 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5085 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5086 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5087 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5088 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5089 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5090 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5091 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5092 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5093 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5094 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5095 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5096 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5097 	/* required last entry */
5098 	{ 0, }
5099 };
5100 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5101 
5102 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5103 
5104 static const struct pci_error_handlers ice_pci_err_handler = {
5105 	.error_detected = ice_pci_err_detected,
5106 	.slot_reset = ice_pci_err_slot_reset,
5107 	.reset_prepare = ice_pci_err_reset_prepare,
5108 	.reset_done = ice_pci_err_reset_done,
5109 	.resume = ice_pci_err_resume
5110 };
5111 
5112 static struct pci_driver ice_driver = {
5113 	.name = KBUILD_MODNAME,
5114 	.id_table = ice_pci_tbl,
5115 	.probe = ice_probe,
5116 	.remove = ice_remove,
5117 #ifdef CONFIG_PM
5118 	.driver.pm = &ice_pm_ops,
5119 #endif /* CONFIG_PM */
5120 	.shutdown = ice_shutdown,
5121 	.sriov_configure = ice_sriov_configure,
5122 	.err_handler = &ice_pci_err_handler
5123 };
5124 
5125 /**
5126  * ice_module_init - Driver registration routine
5127  *
5128  * ice_module_init is the first routine called when the driver is
5129  * loaded. All it does is register with the PCI subsystem.
5130  */
5131 static int __init ice_module_init(void)
5132 {
5133 	int status;
5134 
5135 	pr_info("%s\n", ice_driver_string);
5136 	pr_info("%s\n", ice_copyright);
5137 
5138 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5139 	if (!ice_wq) {
5140 		pr_err("Failed to create workqueue\n");
5141 		return -ENOMEM;
5142 	}
5143 
5144 	status = pci_register_driver(&ice_driver);
5145 	if (status) {
5146 		pr_err("failed to register PCI driver, err %d\n", status);
5147 		destroy_workqueue(ice_wq);
5148 	}
5149 
5150 	return status;
5151 }
5152 module_init(ice_module_init);
5153 
5154 /**
5155  * ice_module_exit - Driver exit cleanup routine
5156  *
5157  * ice_module_exit is called just before the driver is removed
5158  * from memory.
5159  */
5160 static void __exit ice_module_exit(void)
5161 {
5162 	pci_unregister_driver(&ice_driver);
5163 	destroy_workqueue(ice_wq);
5164 	pr_info("module unloaded\n");
5165 }
5166 module_exit(ice_module_exit);
5167 
5168 /**
5169  * ice_set_mac_address - NDO callback to set MAC address
5170  * @netdev: network interface device structure
5171  * @pi: pointer to an address structure
5172  *
5173  * Returns 0 on success, negative on failure
5174  */
5175 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5176 {
5177 	struct ice_netdev_priv *np = netdev_priv(netdev);
5178 	struct ice_vsi *vsi = np->vsi;
5179 	struct ice_pf *pf = vsi->back;
5180 	struct ice_hw *hw = &pf->hw;
5181 	struct sockaddr *addr = pi;
5182 	enum ice_status status;
5183 	u8 old_mac[ETH_ALEN];
5184 	u8 flags = 0;
5185 	int err = 0;
5186 	u8 *mac;
5187 
5188 	mac = (u8 *)addr->sa_data;
5189 
5190 	if (!is_valid_ether_addr(mac))
5191 		return -EADDRNOTAVAIL;
5192 
5193 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5194 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5195 		return 0;
5196 	}
5197 
5198 	if (test_bit(ICE_DOWN, pf->state) ||
5199 	    ice_is_reset_in_progress(pf->state)) {
5200 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5201 			   mac);
5202 		return -EBUSY;
5203 	}
5204 
5205 	netif_addr_lock_bh(netdev);
5206 	ether_addr_copy(old_mac, netdev->dev_addr);
5207 	/* change the netdev's MAC address */
5208 	eth_hw_addr_set(netdev, mac);
5209 	netif_addr_unlock_bh(netdev);
5210 
5211 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5212 	status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5213 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5214 		err = -EADDRNOTAVAIL;
5215 		goto err_update_filters;
5216 	}
5217 
5218 	/* Add filter for new MAC. If filter exists, return success */
5219 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5220 	if (status == ICE_ERR_ALREADY_EXISTS)
5221 		/* Although this MAC filter is already present in hardware it's
5222 		 * possible in some cases (e.g. bonding) that dev_addr was
5223 		 * modified outside of the driver and needs to be restored back
5224 		 * to this value.
5225 		 */
5226 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5227 	else if (status)
5228 		/* error if the new filter addition failed */
5229 		err = -EADDRNOTAVAIL;
5230 
5231 err_update_filters:
5232 	if (err) {
5233 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5234 			   mac);
5235 		netif_addr_lock_bh(netdev);
5236 		eth_hw_addr_set(netdev, old_mac);
5237 		netif_addr_unlock_bh(netdev);
5238 		return err;
5239 	}
5240 
5241 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5242 		   netdev->dev_addr);
5243 
5244 	/* write new MAC address to the firmware */
5245 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5246 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5247 	if (status) {
5248 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5249 			   mac, ice_stat_str(status));
5250 	}
5251 	return 0;
5252 }
5253 
5254 /**
5255  * ice_set_rx_mode - NDO callback to set the netdev filters
5256  * @netdev: network interface device structure
5257  */
5258 static void ice_set_rx_mode(struct net_device *netdev)
5259 {
5260 	struct ice_netdev_priv *np = netdev_priv(netdev);
5261 	struct ice_vsi *vsi = np->vsi;
5262 
5263 	if (!vsi)
5264 		return;
5265 
5266 	/* Set the flags to synchronize filters
5267 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5268 	 * flags
5269 	 */
5270 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5271 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5272 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5273 
5274 	/* schedule our worker thread which will take care of
5275 	 * applying the new filter changes
5276 	 */
5277 	ice_service_task_schedule(vsi->back);
5278 }
5279 
5280 /**
5281  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5282  * @netdev: network interface device structure
5283  * @queue_index: Queue ID
5284  * @maxrate: maximum bandwidth in Mbps
5285  */
5286 static int
5287 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5288 {
5289 	struct ice_netdev_priv *np = netdev_priv(netdev);
5290 	struct ice_vsi *vsi = np->vsi;
5291 	enum ice_status status;
5292 	u16 q_handle;
5293 	u8 tc;
5294 
5295 	/* Validate maxrate requested is within permitted range */
5296 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5297 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5298 			   maxrate, queue_index);
5299 		return -EINVAL;
5300 	}
5301 
5302 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5303 	tc = ice_dcb_get_tc(vsi, queue_index);
5304 
5305 	/* Set BW back to default, when user set maxrate to 0 */
5306 	if (!maxrate)
5307 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5308 					       q_handle, ICE_MAX_BW);
5309 	else
5310 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5311 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5312 	if (status) {
5313 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5314 			   ice_stat_str(status));
5315 		return -EIO;
5316 	}
5317 
5318 	return 0;
5319 }
5320 
5321 /**
5322  * ice_fdb_add - add an entry to the hardware database
5323  * @ndm: the input from the stack
5324  * @tb: pointer to array of nladdr (unused)
5325  * @dev: the net device pointer
5326  * @addr: the MAC address entry being added
5327  * @vid: VLAN ID
5328  * @flags: instructions from stack about fdb operation
5329  * @extack: netlink extended ack
5330  */
5331 static int
5332 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5333 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5334 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5335 {
5336 	int err;
5337 
5338 	if (vid) {
5339 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5340 		return -EINVAL;
5341 	}
5342 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5343 		netdev_err(dev, "FDB only supports static addresses\n");
5344 		return -EINVAL;
5345 	}
5346 
5347 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5348 		err = dev_uc_add_excl(dev, addr);
5349 	else if (is_multicast_ether_addr(addr))
5350 		err = dev_mc_add_excl(dev, addr);
5351 	else
5352 		err = -EINVAL;
5353 
5354 	/* Only return duplicate errors if NLM_F_EXCL is set */
5355 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5356 		err = 0;
5357 
5358 	return err;
5359 }
5360 
5361 /**
5362  * ice_fdb_del - delete an entry from the hardware database
5363  * @ndm: the input from the stack
5364  * @tb: pointer to array of nladdr (unused)
5365  * @dev: the net device pointer
5366  * @addr: the MAC address entry being added
5367  * @vid: VLAN ID
5368  */
5369 static int
5370 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5371 	    struct net_device *dev, const unsigned char *addr,
5372 	    __always_unused u16 vid)
5373 {
5374 	int err;
5375 
5376 	if (ndm->ndm_state & NUD_PERMANENT) {
5377 		netdev_err(dev, "FDB only supports static addresses\n");
5378 		return -EINVAL;
5379 	}
5380 
5381 	if (is_unicast_ether_addr(addr))
5382 		err = dev_uc_del(dev, addr);
5383 	else if (is_multicast_ether_addr(addr))
5384 		err = dev_mc_del(dev, addr);
5385 	else
5386 		err = -EINVAL;
5387 
5388 	return err;
5389 }
5390 
5391 /**
5392  * ice_set_features - set the netdev feature flags
5393  * @netdev: ptr to the netdev being adjusted
5394  * @features: the feature set that the stack is suggesting
5395  */
5396 static int
5397 ice_set_features(struct net_device *netdev, netdev_features_t features)
5398 {
5399 	struct ice_netdev_priv *np = netdev_priv(netdev);
5400 	struct ice_vsi *vsi = np->vsi;
5401 	struct ice_pf *pf = vsi->back;
5402 	int ret = 0;
5403 
5404 	/* Don't set any netdev advanced features with device in Safe Mode */
5405 	if (ice_is_safe_mode(vsi->back)) {
5406 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5407 		return ret;
5408 	}
5409 
5410 	/* Do not change setting during reset */
5411 	if (ice_is_reset_in_progress(pf->state)) {
5412 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5413 		return -EBUSY;
5414 	}
5415 
5416 	/* Multiple features can be changed in one call so keep features in
5417 	 * separate if/else statements to guarantee each feature is checked
5418 	 */
5419 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5420 		ice_vsi_manage_rss_lut(vsi, true);
5421 	else if (!(features & NETIF_F_RXHASH) &&
5422 		 netdev->features & NETIF_F_RXHASH)
5423 		ice_vsi_manage_rss_lut(vsi, false);
5424 
5425 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5426 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5427 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5428 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5429 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5430 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5431 
5432 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5433 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5434 		ret = ice_vsi_manage_vlan_insertion(vsi);
5435 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5436 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5437 		ret = ice_vsi_manage_vlan_insertion(vsi);
5438 
5439 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5440 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5441 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5442 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5443 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5444 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5445 
5446 	if ((features & NETIF_F_NTUPLE) &&
5447 	    !(netdev->features & NETIF_F_NTUPLE)) {
5448 		ice_vsi_manage_fdir(vsi, true);
5449 		ice_init_arfs(vsi);
5450 	} else if (!(features & NETIF_F_NTUPLE) &&
5451 		 (netdev->features & NETIF_F_NTUPLE)) {
5452 		ice_vsi_manage_fdir(vsi, false);
5453 		ice_clear_arfs(vsi);
5454 	}
5455 
5456 	return ret;
5457 }
5458 
5459 /**
5460  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5461  * @vsi: VSI to setup VLAN properties for
5462  */
5463 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5464 {
5465 	int ret = 0;
5466 
5467 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5468 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5469 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5470 		ret = ice_vsi_manage_vlan_insertion(vsi);
5471 
5472 	return ret;
5473 }
5474 
5475 /**
5476  * ice_vsi_cfg - Setup the VSI
5477  * @vsi: the VSI being configured
5478  *
5479  * Return 0 on success and negative value on error
5480  */
5481 int ice_vsi_cfg(struct ice_vsi *vsi)
5482 {
5483 	int err;
5484 
5485 	if (vsi->netdev) {
5486 		ice_set_rx_mode(vsi->netdev);
5487 
5488 		err = ice_vsi_vlan_setup(vsi);
5489 
5490 		if (err)
5491 			return err;
5492 	}
5493 	ice_vsi_cfg_dcb_rings(vsi);
5494 
5495 	err = ice_vsi_cfg_lan_txqs(vsi);
5496 	if (!err && ice_is_xdp_ena_vsi(vsi))
5497 		err = ice_vsi_cfg_xdp_txqs(vsi);
5498 	if (!err)
5499 		err = ice_vsi_cfg_rxqs(vsi);
5500 
5501 	return err;
5502 }
5503 
5504 /* THEORY OF MODERATION:
5505  * The below code creates custom DIM profiles for use by this driver, because
5506  * the ice driver hardware works differently than the hardware that DIMLIB was
5507  * originally made for. ice hardware doesn't have packet count limits that
5508  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5509  * and this code adds that capability to be used by the driver when it's using
5510  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5511  * for how to "respond" to traffic and interrupts, so this driver uses a
5512  * slightly different set of moderation parameters to get best performance.
5513  */
5514 struct ice_dim {
5515 	/* the throttle rate for interrupts, basically worst case delay before
5516 	 * an initial interrupt fires, value is stored in microseconds.
5517 	 */
5518 	u16 itr;
5519 	/* the rate limit for interrupts, which can cap a delay from a small
5520 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5521 	 * could yield as much as 500,000 interrupts per second, but with a
5522 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5523 	 * is stored in microseconds.
5524 	 */
5525 	u16 intrl;
5526 };
5527 
5528 /* Make a different profile for Rx that doesn't allow quite so aggressive
5529  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5530  * second. The INTRL/rate parameters here are only useful to cap small ITR
5531  * values, which is why for larger ITR's - like 128, which can only generate
5532  * 8k interrupts per second, there is no point to rate limit and the values
5533  * are set to zero. The rate limit values do affect latency, and so must
5534  * be reasonably small so to not impact latency sensitive tests.
5535  */
5536 static const struct ice_dim rx_profile[] = {
5537 	{2, 10},
5538 	{8, 16},
5539 	{32, 0},
5540 	{96, 0},
5541 	{128, 0}
5542 };
5543 
5544 /* The transmit profile, which has the same sorts of values
5545  * as the previous struct
5546  */
5547 static const struct ice_dim tx_profile[] = {
5548 	{2, 10},
5549 	{8, 16},
5550 	{64, 0},
5551 	{128, 0},
5552 	{256, 0}
5553 };
5554 
5555 static void ice_tx_dim_work(struct work_struct *work)
5556 {
5557 	struct ice_ring_container *rc;
5558 	struct ice_q_vector *q_vector;
5559 	struct dim *dim;
5560 	u16 itr, intrl;
5561 
5562 	dim = container_of(work, struct dim, work);
5563 	rc = container_of(dim, struct ice_ring_container, dim);
5564 	q_vector = container_of(rc, struct ice_q_vector, tx);
5565 
5566 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5567 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5568 
5569 	/* look up the values in our local table */
5570 	itr = tx_profile[dim->profile_ix].itr;
5571 	intrl = tx_profile[dim->profile_ix].intrl;
5572 
5573 	ice_trace(tx_dim_work, q_vector, dim);
5574 	ice_write_itr(rc, itr);
5575 	ice_write_intrl(q_vector, intrl);
5576 
5577 	dim->state = DIM_START_MEASURE;
5578 }
5579 
5580 static void ice_rx_dim_work(struct work_struct *work)
5581 {
5582 	struct ice_ring_container *rc;
5583 	struct ice_q_vector *q_vector;
5584 	struct dim *dim;
5585 	u16 itr, intrl;
5586 
5587 	dim = container_of(work, struct dim, work);
5588 	rc = container_of(dim, struct ice_ring_container, dim);
5589 	q_vector = container_of(rc, struct ice_q_vector, rx);
5590 
5591 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5592 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5593 
5594 	/* look up the values in our local table */
5595 	itr = rx_profile[dim->profile_ix].itr;
5596 	intrl = rx_profile[dim->profile_ix].intrl;
5597 
5598 	ice_trace(rx_dim_work, q_vector, dim);
5599 	ice_write_itr(rc, itr);
5600 	ice_write_intrl(q_vector, intrl);
5601 
5602 	dim->state = DIM_START_MEASURE;
5603 }
5604 
5605 /**
5606  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5607  * @vsi: the VSI being configured
5608  */
5609 static void ice_napi_enable_all(struct ice_vsi *vsi)
5610 {
5611 	int q_idx;
5612 
5613 	if (!vsi->netdev)
5614 		return;
5615 
5616 	ice_for_each_q_vector(vsi, q_idx) {
5617 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5618 
5619 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5620 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5621 
5622 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5623 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5624 
5625 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
5626 			napi_enable(&q_vector->napi);
5627 	}
5628 }
5629 
5630 /**
5631  * ice_up_complete - Finish the last steps of bringing up a connection
5632  * @vsi: The VSI being configured
5633  *
5634  * Return 0 on success and negative value on error
5635  */
5636 static int ice_up_complete(struct ice_vsi *vsi)
5637 {
5638 	struct ice_pf *pf = vsi->back;
5639 	int err;
5640 
5641 	ice_vsi_cfg_msix(vsi);
5642 
5643 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5644 	 * Tx queue group list was configured and the context bits were
5645 	 * programmed using ice_vsi_cfg_txqs
5646 	 */
5647 	err = ice_vsi_start_all_rx_rings(vsi);
5648 	if (err)
5649 		return err;
5650 
5651 	clear_bit(ICE_VSI_DOWN, vsi->state);
5652 	ice_napi_enable_all(vsi);
5653 	ice_vsi_ena_irq(vsi);
5654 
5655 	if (vsi->port_info &&
5656 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5657 	    vsi->netdev) {
5658 		ice_print_link_msg(vsi, true);
5659 		netif_tx_start_all_queues(vsi->netdev);
5660 		netif_carrier_on(vsi->netdev);
5661 	}
5662 
5663 	ice_service_task_schedule(pf);
5664 
5665 	return 0;
5666 }
5667 
5668 /**
5669  * ice_up - Bring the connection back up after being down
5670  * @vsi: VSI being configured
5671  */
5672 int ice_up(struct ice_vsi *vsi)
5673 {
5674 	int err;
5675 
5676 	err = ice_vsi_cfg(vsi);
5677 	if (!err)
5678 		err = ice_up_complete(vsi);
5679 
5680 	return err;
5681 }
5682 
5683 /**
5684  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5685  * @syncp: pointer to u64_stats_sync
5686  * @stats: stats that pkts and bytes count will be taken from
5687  * @pkts: packets stats counter
5688  * @bytes: bytes stats counter
5689  *
5690  * This function fetches stats from the ring considering the atomic operations
5691  * that needs to be performed to read u64 values in 32 bit machine.
5692  */
5693 static void
5694 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
5695 			     u64 *pkts, u64 *bytes)
5696 {
5697 	unsigned int start;
5698 
5699 	do {
5700 		start = u64_stats_fetch_begin_irq(syncp);
5701 		*pkts = stats.pkts;
5702 		*bytes = stats.bytes;
5703 	} while (u64_stats_fetch_retry_irq(syncp, start));
5704 }
5705 
5706 /**
5707  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5708  * @vsi: the VSI to be updated
5709  * @rings: rings to work on
5710  * @count: number of rings
5711  */
5712 static void
5713 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
5714 			     u16 count)
5715 {
5716 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5717 	u16 i;
5718 
5719 	for (i = 0; i < count; i++) {
5720 		struct ice_tx_ring *ring;
5721 		u64 pkts = 0, bytes = 0;
5722 
5723 		ring = READ_ONCE(rings[i]);
5724 		if (ring)
5725 			ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5726 		vsi_stats->tx_packets += pkts;
5727 		vsi_stats->tx_bytes += bytes;
5728 		vsi->tx_restart += ring->tx_stats.restart_q;
5729 		vsi->tx_busy += ring->tx_stats.tx_busy;
5730 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5731 	}
5732 }
5733 
5734 /**
5735  * ice_update_vsi_ring_stats - Update VSI stats counters
5736  * @vsi: the VSI to be updated
5737  */
5738 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5739 {
5740 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5741 	u64 pkts, bytes;
5742 	int i;
5743 
5744 	/* reset netdev stats */
5745 	vsi_stats->tx_packets = 0;
5746 	vsi_stats->tx_bytes = 0;
5747 	vsi_stats->rx_packets = 0;
5748 	vsi_stats->rx_bytes = 0;
5749 
5750 	/* reset non-netdev (extended) stats */
5751 	vsi->tx_restart = 0;
5752 	vsi->tx_busy = 0;
5753 	vsi->tx_linearize = 0;
5754 	vsi->rx_buf_failed = 0;
5755 	vsi->rx_page_failed = 0;
5756 
5757 	rcu_read_lock();
5758 
5759 	/* update Tx rings counters */
5760 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5761 
5762 	/* update Rx rings counters */
5763 	ice_for_each_rxq(vsi, i) {
5764 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5765 
5766 		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5767 		vsi_stats->rx_packets += pkts;
5768 		vsi_stats->rx_bytes += bytes;
5769 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5770 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5771 	}
5772 
5773 	/* update XDP Tx rings counters */
5774 	if (ice_is_xdp_ena_vsi(vsi))
5775 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5776 					     vsi->num_xdp_txq);
5777 
5778 	rcu_read_unlock();
5779 }
5780 
5781 /**
5782  * ice_update_vsi_stats - Update VSI stats counters
5783  * @vsi: the VSI to be updated
5784  */
5785 void ice_update_vsi_stats(struct ice_vsi *vsi)
5786 {
5787 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5788 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5789 	struct ice_pf *pf = vsi->back;
5790 
5791 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5792 	    test_bit(ICE_CFG_BUSY, pf->state))
5793 		return;
5794 
5795 	/* get stats as recorded by Tx/Rx rings */
5796 	ice_update_vsi_ring_stats(vsi);
5797 
5798 	/* get VSI stats as recorded by the hardware */
5799 	ice_update_eth_stats(vsi);
5800 
5801 	cur_ns->tx_errors = cur_es->tx_errors;
5802 	cur_ns->rx_dropped = cur_es->rx_discards;
5803 	cur_ns->tx_dropped = cur_es->tx_discards;
5804 	cur_ns->multicast = cur_es->rx_multicast;
5805 
5806 	/* update some more netdev stats if this is main VSI */
5807 	if (vsi->type == ICE_VSI_PF) {
5808 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5809 		cur_ns->rx_errors = pf->stats.crc_errors +
5810 				    pf->stats.illegal_bytes +
5811 				    pf->stats.rx_len_errors +
5812 				    pf->stats.rx_undersize +
5813 				    pf->hw_csum_rx_error +
5814 				    pf->stats.rx_jabber +
5815 				    pf->stats.rx_fragments +
5816 				    pf->stats.rx_oversize;
5817 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5818 		/* record drops from the port level */
5819 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5820 	}
5821 }
5822 
5823 /**
5824  * ice_update_pf_stats - Update PF port stats counters
5825  * @pf: PF whose stats needs to be updated
5826  */
5827 void ice_update_pf_stats(struct ice_pf *pf)
5828 {
5829 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5830 	struct ice_hw *hw = &pf->hw;
5831 	u16 fd_ctr_base;
5832 	u8 port;
5833 
5834 	port = hw->port_info->lport;
5835 	prev_ps = &pf->stats_prev;
5836 	cur_ps = &pf->stats;
5837 
5838 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5839 			  &prev_ps->eth.rx_bytes,
5840 			  &cur_ps->eth.rx_bytes);
5841 
5842 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5843 			  &prev_ps->eth.rx_unicast,
5844 			  &cur_ps->eth.rx_unicast);
5845 
5846 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5847 			  &prev_ps->eth.rx_multicast,
5848 			  &cur_ps->eth.rx_multicast);
5849 
5850 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5851 			  &prev_ps->eth.rx_broadcast,
5852 			  &cur_ps->eth.rx_broadcast);
5853 
5854 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5855 			  &prev_ps->eth.rx_discards,
5856 			  &cur_ps->eth.rx_discards);
5857 
5858 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5859 			  &prev_ps->eth.tx_bytes,
5860 			  &cur_ps->eth.tx_bytes);
5861 
5862 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5863 			  &prev_ps->eth.tx_unicast,
5864 			  &cur_ps->eth.tx_unicast);
5865 
5866 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5867 			  &prev_ps->eth.tx_multicast,
5868 			  &cur_ps->eth.tx_multicast);
5869 
5870 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5871 			  &prev_ps->eth.tx_broadcast,
5872 			  &cur_ps->eth.tx_broadcast);
5873 
5874 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5875 			  &prev_ps->tx_dropped_link_down,
5876 			  &cur_ps->tx_dropped_link_down);
5877 
5878 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5879 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5880 
5881 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5882 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5883 
5884 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5885 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5886 
5887 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5888 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5889 
5890 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5891 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5892 
5893 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5894 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5895 
5896 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5897 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5898 
5899 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5900 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5901 
5902 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5903 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5904 
5905 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5906 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5907 
5908 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5909 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5910 
5911 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5912 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5913 
5914 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5915 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5916 
5917 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5918 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5919 
5920 	fd_ctr_base = hw->fd_ctr_base;
5921 
5922 	ice_stat_update40(hw,
5923 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5924 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5925 			  &cur_ps->fd_sb_match);
5926 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5927 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5928 
5929 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5930 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5931 
5932 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5933 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5934 
5935 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5936 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5937 
5938 	ice_update_dcb_stats(pf);
5939 
5940 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5941 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5942 
5943 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5944 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5945 
5946 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5947 			  &prev_ps->mac_local_faults,
5948 			  &cur_ps->mac_local_faults);
5949 
5950 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5951 			  &prev_ps->mac_remote_faults,
5952 			  &cur_ps->mac_remote_faults);
5953 
5954 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5955 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5956 
5957 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5958 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5959 
5960 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5961 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5962 
5963 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5964 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5965 
5966 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5967 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5968 
5969 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5970 
5971 	pf->stat_prev_loaded = true;
5972 }
5973 
5974 /**
5975  * ice_get_stats64 - get statistics for network device structure
5976  * @netdev: network interface device structure
5977  * @stats: main device statistics structure
5978  */
5979 static
5980 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5981 {
5982 	struct ice_netdev_priv *np = netdev_priv(netdev);
5983 	struct rtnl_link_stats64 *vsi_stats;
5984 	struct ice_vsi *vsi = np->vsi;
5985 
5986 	vsi_stats = &vsi->net_stats;
5987 
5988 	if (!vsi->num_txq || !vsi->num_rxq)
5989 		return;
5990 
5991 	/* netdev packet/byte stats come from ring counter. These are obtained
5992 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5993 	 * But, only call the update routine and read the registers if VSI is
5994 	 * not down.
5995 	 */
5996 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5997 		ice_update_vsi_ring_stats(vsi);
5998 	stats->tx_packets = vsi_stats->tx_packets;
5999 	stats->tx_bytes = vsi_stats->tx_bytes;
6000 	stats->rx_packets = vsi_stats->rx_packets;
6001 	stats->rx_bytes = vsi_stats->rx_bytes;
6002 
6003 	/* The rest of the stats can be read from the hardware but instead we
6004 	 * just return values that the watchdog task has already obtained from
6005 	 * the hardware.
6006 	 */
6007 	stats->multicast = vsi_stats->multicast;
6008 	stats->tx_errors = vsi_stats->tx_errors;
6009 	stats->tx_dropped = vsi_stats->tx_dropped;
6010 	stats->rx_errors = vsi_stats->rx_errors;
6011 	stats->rx_dropped = vsi_stats->rx_dropped;
6012 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6013 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6014 }
6015 
6016 /**
6017  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6018  * @vsi: VSI having NAPI disabled
6019  */
6020 static void ice_napi_disable_all(struct ice_vsi *vsi)
6021 {
6022 	int q_idx;
6023 
6024 	if (!vsi->netdev)
6025 		return;
6026 
6027 	ice_for_each_q_vector(vsi, q_idx) {
6028 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6029 
6030 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6031 			napi_disable(&q_vector->napi);
6032 
6033 		cancel_work_sync(&q_vector->tx.dim.work);
6034 		cancel_work_sync(&q_vector->rx.dim.work);
6035 	}
6036 }
6037 
6038 /**
6039  * ice_down - Shutdown the connection
6040  * @vsi: The VSI being stopped
6041  */
6042 int ice_down(struct ice_vsi *vsi)
6043 {
6044 	int i, tx_err, rx_err, link_err = 0;
6045 
6046 	/* Caller of this function is expected to set the
6047 	 * vsi->state ICE_DOWN bit
6048 	 */
6049 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6050 		netif_carrier_off(vsi->netdev);
6051 		netif_tx_disable(vsi->netdev);
6052 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6053 		ice_eswitch_stop_all_tx_queues(vsi->back);
6054 	}
6055 
6056 	ice_vsi_dis_irq(vsi);
6057 
6058 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6059 	if (tx_err)
6060 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6061 			   vsi->vsi_num, tx_err);
6062 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6063 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6064 		if (tx_err)
6065 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6066 				   vsi->vsi_num, tx_err);
6067 	}
6068 
6069 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6070 	if (rx_err)
6071 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6072 			   vsi->vsi_num, rx_err);
6073 
6074 	ice_napi_disable_all(vsi);
6075 
6076 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6077 		link_err = ice_force_phys_link_state(vsi, false);
6078 		if (link_err)
6079 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6080 				   vsi->vsi_num, link_err);
6081 	}
6082 
6083 	ice_for_each_txq(vsi, i)
6084 		ice_clean_tx_ring(vsi->tx_rings[i]);
6085 
6086 	ice_for_each_rxq(vsi, i)
6087 		ice_clean_rx_ring(vsi->rx_rings[i]);
6088 
6089 	if (tx_err || rx_err || link_err) {
6090 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6091 			   vsi->vsi_num, vsi->vsw->sw_id);
6092 		return -EIO;
6093 	}
6094 
6095 	return 0;
6096 }
6097 
6098 /**
6099  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6100  * @vsi: VSI having resources allocated
6101  *
6102  * Return 0 on success, negative on failure
6103  */
6104 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6105 {
6106 	int i, err = 0;
6107 
6108 	if (!vsi->num_txq) {
6109 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6110 			vsi->vsi_num);
6111 		return -EINVAL;
6112 	}
6113 
6114 	ice_for_each_txq(vsi, i) {
6115 		struct ice_tx_ring *ring = vsi->tx_rings[i];
6116 
6117 		if (!ring)
6118 			return -EINVAL;
6119 
6120 		if (vsi->netdev)
6121 			ring->netdev = vsi->netdev;
6122 		err = ice_setup_tx_ring(ring);
6123 		if (err)
6124 			break;
6125 	}
6126 
6127 	return err;
6128 }
6129 
6130 /**
6131  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6132  * @vsi: VSI having resources allocated
6133  *
6134  * Return 0 on success, negative on failure
6135  */
6136 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6137 {
6138 	int i, err = 0;
6139 
6140 	if (!vsi->num_rxq) {
6141 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6142 			vsi->vsi_num);
6143 		return -EINVAL;
6144 	}
6145 
6146 	ice_for_each_rxq(vsi, i) {
6147 		struct ice_rx_ring *ring = vsi->rx_rings[i];
6148 
6149 		if (!ring)
6150 			return -EINVAL;
6151 
6152 		if (vsi->netdev)
6153 			ring->netdev = vsi->netdev;
6154 		err = ice_setup_rx_ring(ring);
6155 		if (err)
6156 			break;
6157 	}
6158 
6159 	return err;
6160 }
6161 
6162 /**
6163  * ice_vsi_open_ctrl - open control VSI for use
6164  * @vsi: the VSI to open
6165  *
6166  * Initialization of the Control VSI
6167  *
6168  * Returns 0 on success, negative value on error
6169  */
6170 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6171 {
6172 	char int_name[ICE_INT_NAME_STR_LEN];
6173 	struct ice_pf *pf = vsi->back;
6174 	struct device *dev;
6175 	int err;
6176 
6177 	dev = ice_pf_to_dev(pf);
6178 	/* allocate descriptors */
6179 	err = ice_vsi_setup_tx_rings(vsi);
6180 	if (err)
6181 		goto err_setup_tx;
6182 
6183 	err = ice_vsi_setup_rx_rings(vsi);
6184 	if (err)
6185 		goto err_setup_rx;
6186 
6187 	err = ice_vsi_cfg(vsi);
6188 	if (err)
6189 		goto err_setup_rx;
6190 
6191 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6192 		 dev_driver_string(dev), dev_name(dev));
6193 	err = ice_vsi_req_irq_msix(vsi, int_name);
6194 	if (err)
6195 		goto err_setup_rx;
6196 
6197 	ice_vsi_cfg_msix(vsi);
6198 
6199 	err = ice_vsi_start_all_rx_rings(vsi);
6200 	if (err)
6201 		goto err_up_complete;
6202 
6203 	clear_bit(ICE_VSI_DOWN, vsi->state);
6204 	ice_vsi_ena_irq(vsi);
6205 
6206 	return 0;
6207 
6208 err_up_complete:
6209 	ice_down(vsi);
6210 err_setup_rx:
6211 	ice_vsi_free_rx_rings(vsi);
6212 err_setup_tx:
6213 	ice_vsi_free_tx_rings(vsi);
6214 
6215 	return err;
6216 }
6217 
6218 /**
6219  * ice_vsi_open - Called when a network interface is made active
6220  * @vsi: the VSI to open
6221  *
6222  * Initialization of the VSI
6223  *
6224  * Returns 0 on success, negative value on error
6225  */
6226 int ice_vsi_open(struct ice_vsi *vsi)
6227 {
6228 	char int_name[ICE_INT_NAME_STR_LEN];
6229 	struct ice_pf *pf = vsi->back;
6230 	int err;
6231 
6232 	/* allocate descriptors */
6233 	err = ice_vsi_setup_tx_rings(vsi);
6234 	if (err)
6235 		goto err_setup_tx;
6236 
6237 	err = ice_vsi_setup_rx_rings(vsi);
6238 	if (err)
6239 		goto err_setup_rx;
6240 
6241 	err = ice_vsi_cfg(vsi);
6242 	if (err)
6243 		goto err_setup_rx;
6244 
6245 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6246 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6247 	err = ice_vsi_req_irq_msix(vsi, int_name);
6248 	if (err)
6249 		goto err_setup_rx;
6250 
6251 	if (vsi->type == ICE_VSI_PF) {
6252 		/* Notify the stack of the actual queue counts. */
6253 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6254 		if (err)
6255 			goto err_set_qs;
6256 
6257 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6258 		if (err)
6259 			goto err_set_qs;
6260 	}
6261 
6262 	err = ice_up_complete(vsi);
6263 	if (err)
6264 		goto err_up_complete;
6265 
6266 	return 0;
6267 
6268 err_up_complete:
6269 	ice_down(vsi);
6270 err_set_qs:
6271 	ice_vsi_free_irq(vsi);
6272 err_setup_rx:
6273 	ice_vsi_free_rx_rings(vsi);
6274 err_setup_tx:
6275 	ice_vsi_free_tx_rings(vsi);
6276 
6277 	return err;
6278 }
6279 
6280 /**
6281  * ice_vsi_release_all - Delete all VSIs
6282  * @pf: PF from which all VSIs are being removed
6283  */
6284 static void ice_vsi_release_all(struct ice_pf *pf)
6285 {
6286 	int err, i;
6287 
6288 	if (!pf->vsi)
6289 		return;
6290 
6291 	ice_for_each_vsi(pf, i) {
6292 		if (!pf->vsi[i])
6293 			continue;
6294 
6295 		err = ice_vsi_release(pf->vsi[i]);
6296 		if (err)
6297 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6298 				i, err, pf->vsi[i]->vsi_num);
6299 	}
6300 }
6301 
6302 /**
6303  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6304  * @pf: pointer to the PF instance
6305  * @type: VSI type to rebuild
6306  *
6307  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6308  */
6309 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6310 {
6311 	struct device *dev = ice_pf_to_dev(pf);
6312 	enum ice_status status;
6313 	int i, err;
6314 
6315 	ice_for_each_vsi(pf, i) {
6316 		struct ice_vsi *vsi = pf->vsi[i];
6317 
6318 		if (!vsi || vsi->type != type)
6319 			continue;
6320 
6321 		/* rebuild the VSI */
6322 		err = ice_vsi_rebuild(vsi, true);
6323 		if (err) {
6324 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6325 				err, vsi->idx, ice_vsi_type_str(type));
6326 			return err;
6327 		}
6328 
6329 		/* replay filters for the VSI */
6330 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6331 		if (status) {
6332 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6333 				ice_stat_str(status), vsi->idx,
6334 				ice_vsi_type_str(type));
6335 			return -EIO;
6336 		}
6337 
6338 		/* Re-map HW VSI number, using VSI handle that has been
6339 		 * previously validated in ice_replay_vsi() call above
6340 		 */
6341 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6342 
6343 		/* enable the VSI */
6344 		err = ice_ena_vsi(vsi, false);
6345 		if (err) {
6346 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6347 				err, vsi->idx, ice_vsi_type_str(type));
6348 			return err;
6349 		}
6350 
6351 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6352 			 ice_vsi_type_str(type));
6353 	}
6354 
6355 	return 0;
6356 }
6357 
6358 /**
6359  * ice_update_pf_netdev_link - Update PF netdev link status
6360  * @pf: pointer to the PF instance
6361  */
6362 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6363 {
6364 	bool link_up;
6365 	int i;
6366 
6367 	ice_for_each_vsi(pf, i) {
6368 		struct ice_vsi *vsi = pf->vsi[i];
6369 
6370 		if (!vsi || vsi->type != ICE_VSI_PF)
6371 			return;
6372 
6373 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6374 		if (link_up) {
6375 			netif_carrier_on(pf->vsi[i]->netdev);
6376 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6377 		} else {
6378 			netif_carrier_off(pf->vsi[i]->netdev);
6379 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6380 		}
6381 	}
6382 }
6383 
6384 /**
6385  * ice_rebuild - rebuild after reset
6386  * @pf: PF to rebuild
6387  * @reset_type: type of reset
6388  *
6389  * Do not rebuild VF VSI in this flow because that is already handled via
6390  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6391  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6392  * to reset/rebuild all the VF VSI twice.
6393  */
6394 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6395 {
6396 	struct device *dev = ice_pf_to_dev(pf);
6397 	struct ice_hw *hw = &pf->hw;
6398 	enum ice_status ret;
6399 	int err;
6400 
6401 	if (test_bit(ICE_DOWN, pf->state))
6402 		goto clear_recovery;
6403 
6404 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6405 
6406 	ret = ice_init_all_ctrlq(hw);
6407 	if (ret) {
6408 		dev_err(dev, "control queues init failed %s\n",
6409 			ice_stat_str(ret));
6410 		goto err_init_ctrlq;
6411 	}
6412 
6413 	/* if DDP was previously loaded successfully */
6414 	if (!ice_is_safe_mode(pf)) {
6415 		/* reload the SW DB of filter tables */
6416 		if (reset_type == ICE_RESET_PFR)
6417 			ice_fill_blk_tbls(hw);
6418 		else
6419 			/* Reload DDP Package after CORER/GLOBR reset */
6420 			ice_load_pkg(NULL, pf);
6421 	}
6422 
6423 	ret = ice_clear_pf_cfg(hw);
6424 	if (ret) {
6425 		dev_err(dev, "clear PF configuration failed %s\n",
6426 			ice_stat_str(ret));
6427 		goto err_init_ctrlq;
6428 	}
6429 
6430 	if (pf->first_sw->dflt_vsi_ena)
6431 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6432 	/* clear the default VSI configuration if it exists */
6433 	pf->first_sw->dflt_vsi = NULL;
6434 	pf->first_sw->dflt_vsi_ena = false;
6435 
6436 	ice_clear_pxe_mode(hw);
6437 
6438 	ret = ice_init_nvm(hw);
6439 	if (ret) {
6440 		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6441 		goto err_init_ctrlq;
6442 	}
6443 
6444 	ret = ice_get_caps(hw);
6445 	if (ret) {
6446 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6447 		goto err_init_ctrlq;
6448 	}
6449 
6450 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6451 	if (ret) {
6452 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6453 		goto err_init_ctrlq;
6454 	}
6455 
6456 	err = ice_sched_init_port(hw->port_info);
6457 	if (err)
6458 		goto err_sched_init_port;
6459 
6460 	/* start misc vector */
6461 	err = ice_req_irq_msix_misc(pf);
6462 	if (err) {
6463 		dev_err(dev, "misc vector setup failed: %d\n", err);
6464 		goto err_sched_init_port;
6465 	}
6466 
6467 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6468 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6469 		if (!rd32(hw, PFQF_FD_SIZE)) {
6470 			u16 unused, guar, b_effort;
6471 
6472 			guar = hw->func_caps.fd_fltr_guar;
6473 			b_effort = hw->func_caps.fd_fltr_best_effort;
6474 
6475 			/* force guaranteed filter pool for PF */
6476 			ice_alloc_fd_guar_item(hw, &unused, guar);
6477 			/* force shared filter pool for PF */
6478 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6479 		}
6480 	}
6481 
6482 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6483 		ice_dcb_rebuild(pf);
6484 
6485 	/* If the PF previously had enabled PTP, PTP init needs to happen before
6486 	 * the VSI rebuild. If not, this causes the PTP link status events to
6487 	 * fail.
6488 	 */
6489 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6490 		ice_ptp_init(pf);
6491 
6492 	/* rebuild PF VSI */
6493 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6494 	if (err) {
6495 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6496 		goto err_vsi_rebuild;
6497 	}
6498 
6499 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
6500 	if (err) {
6501 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
6502 		goto err_vsi_rebuild;
6503 	}
6504 
6505 	/* If Flow Director is active */
6506 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6507 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6508 		if (err) {
6509 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6510 			goto err_vsi_rebuild;
6511 		}
6512 
6513 		/* replay HW Flow Director recipes */
6514 		if (hw->fdir_prof)
6515 			ice_fdir_replay_flows(hw);
6516 
6517 		/* replay Flow Director filters */
6518 		ice_fdir_replay_fltrs(pf);
6519 
6520 		ice_rebuild_arfs(pf);
6521 	}
6522 
6523 	ice_update_pf_netdev_link(pf);
6524 
6525 	/* tell the firmware we are up */
6526 	ret = ice_send_version(pf);
6527 	if (ret) {
6528 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6529 			ice_stat_str(ret));
6530 		goto err_vsi_rebuild;
6531 	}
6532 
6533 	ice_replay_post(hw);
6534 
6535 	/* if we get here, reset flow is successful */
6536 	clear_bit(ICE_RESET_FAILED, pf->state);
6537 
6538 	ice_plug_aux_dev(pf);
6539 	return;
6540 
6541 err_vsi_rebuild:
6542 err_sched_init_port:
6543 	ice_sched_cleanup_all(hw);
6544 err_init_ctrlq:
6545 	ice_shutdown_all_ctrlq(hw);
6546 	set_bit(ICE_RESET_FAILED, pf->state);
6547 clear_recovery:
6548 	/* set this bit in PF state to control service task scheduling */
6549 	set_bit(ICE_NEEDS_RESTART, pf->state);
6550 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6551 }
6552 
6553 /**
6554  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6555  * @vsi: Pointer to VSI structure
6556  */
6557 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6558 {
6559 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6560 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6561 	else
6562 		return ICE_RXBUF_3072;
6563 }
6564 
6565 /**
6566  * ice_change_mtu - NDO callback to change the MTU
6567  * @netdev: network interface device structure
6568  * @new_mtu: new value for maximum frame size
6569  *
6570  * Returns 0 on success, negative on failure
6571  */
6572 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6573 {
6574 	struct ice_netdev_priv *np = netdev_priv(netdev);
6575 	struct ice_vsi *vsi = np->vsi;
6576 	struct ice_pf *pf = vsi->back;
6577 	struct iidc_event *event;
6578 	u8 count = 0;
6579 	int err = 0;
6580 
6581 	if (new_mtu == (int)netdev->mtu) {
6582 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6583 		return 0;
6584 	}
6585 
6586 	if (ice_is_xdp_ena_vsi(vsi)) {
6587 		int frame_size = ice_max_xdp_frame_size(vsi);
6588 
6589 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6590 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6591 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6592 			return -EINVAL;
6593 		}
6594 	}
6595 
6596 	/* if a reset is in progress, wait for some time for it to complete */
6597 	do {
6598 		if (ice_is_reset_in_progress(pf->state)) {
6599 			count++;
6600 			usleep_range(1000, 2000);
6601 		} else {
6602 			break;
6603 		}
6604 
6605 	} while (count < 100);
6606 
6607 	if (count == 100) {
6608 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6609 		return -EBUSY;
6610 	}
6611 
6612 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6613 	if (!event)
6614 		return -ENOMEM;
6615 
6616 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6617 	ice_send_event_to_aux(pf, event);
6618 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6619 
6620 	netdev->mtu = (unsigned int)new_mtu;
6621 
6622 	/* if VSI is up, bring it down and then back up */
6623 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6624 		err = ice_down(vsi);
6625 		if (err) {
6626 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6627 			goto event_after;
6628 		}
6629 
6630 		err = ice_up(vsi);
6631 		if (err) {
6632 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6633 			goto event_after;
6634 		}
6635 	}
6636 
6637 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6638 event_after:
6639 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6640 	ice_send_event_to_aux(pf, event);
6641 	kfree(event);
6642 
6643 	return err;
6644 }
6645 
6646 /**
6647  * ice_eth_ioctl - Access the hwtstamp interface
6648  * @netdev: network interface device structure
6649  * @ifr: interface request data
6650  * @cmd: ioctl command
6651  */
6652 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6653 {
6654 	struct ice_netdev_priv *np = netdev_priv(netdev);
6655 	struct ice_pf *pf = np->vsi->back;
6656 
6657 	switch (cmd) {
6658 	case SIOCGHWTSTAMP:
6659 		return ice_ptp_get_ts_config(pf, ifr);
6660 	case SIOCSHWTSTAMP:
6661 		return ice_ptp_set_ts_config(pf, ifr);
6662 	default:
6663 		return -EOPNOTSUPP;
6664 	}
6665 }
6666 
6667 /**
6668  * ice_aq_str - convert AQ err code to a string
6669  * @aq_err: the AQ error code to convert
6670  */
6671 const char *ice_aq_str(enum ice_aq_err aq_err)
6672 {
6673 	switch (aq_err) {
6674 	case ICE_AQ_RC_OK:
6675 		return "OK";
6676 	case ICE_AQ_RC_EPERM:
6677 		return "ICE_AQ_RC_EPERM";
6678 	case ICE_AQ_RC_ENOENT:
6679 		return "ICE_AQ_RC_ENOENT";
6680 	case ICE_AQ_RC_ENOMEM:
6681 		return "ICE_AQ_RC_ENOMEM";
6682 	case ICE_AQ_RC_EBUSY:
6683 		return "ICE_AQ_RC_EBUSY";
6684 	case ICE_AQ_RC_EEXIST:
6685 		return "ICE_AQ_RC_EEXIST";
6686 	case ICE_AQ_RC_EINVAL:
6687 		return "ICE_AQ_RC_EINVAL";
6688 	case ICE_AQ_RC_ENOSPC:
6689 		return "ICE_AQ_RC_ENOSPC";
6690 	case ICE_AQ_RC_ENOSYS:
6691 		return "ICE_AQ_RC_ENOSYS";
6692 	case ICE_AQ_RC_EMODE:
6693 		return "ICE_AQ_RC_EMODE";
6694 	case ICE_AQ_RC_ENOSEC:
6695 		return "ICE_AQ_RC_ENOSEC";
6696 	case ICE_AQ_RC_EBADSIG:
6697 		return "ICE_AQ_RC_EBADSIG";
6698 	case ICE_AQ_RC_ESVN:
6699 		return "ICE_AQ_RC_ESVN";
6700 	case ICE_AQ_RC_EBADMAN:
6701 		return "ICE_AQ_RC_EBADMAN";
6702 	case ICE_AQ_RC_EBADBUF:
6703 		return "ICE_AQ_RC_EBADBUF";
6704 	}
6705 
6706 	return "ICE_AQ_RC_UNKNOWN";
6707 }
6708 
6709 /**
6710  * ice_stat_str - convert status err code to a string
6711  * @stat_err: the status error code to convert
6712  */
6713 const char *ice_stat_str(enum ice_status stat_err)
6714 {
6715 	switch (stat_err) {
6716 	case ICE_SUCCESS:
6717 		return "OK";
6718 	case ICE_ERR_PARAM:
6719 		return "ICE_ERR_PARAM";
6720 	case ICE_ERR_NOT_IMPL:
6721 		return "ICE_ERR_NOT_IMPL";
6722 	case ICE_ERR_NOT_READY:
6723 		return "ICE_ERR_NOT_READY";
6724 	case ICE_ERR_NOT_SUPPORTED:
6725 		return "ICE_ERR_NOT_SUPPORTED";
6726 	case ICE_ERR_BAD_PTR:
6727 		return "ICE_ERR_BAD_PTR";
6728 	case ICE_ERR_INVAL_SIZE:
6729 		return "ICE_ERR_INVAL_SIZE";
6730 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6731 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6732 	case ICE_ERR_RESET_FAILED:
6733 		return "ICE_ERR_RESET_FAILED";
6734 	case ICE_ERR_FW_API_VER:
6735 		return "ICE_ERR_FW_API_VER";
6736 	case ICE_ERR_NO_MEMORY:
6737 		return "ICE_ERR_NO_MEMORY";
6738 	case ICE_ERR_CFG:
6739 		return "ICE_ERR_CFG";
6740 	case ICE_ERR_OUT_OF_RANGE:
6741 		return "ICE_ERR_OUT_OF_RANGE";
6742 	case ICE_ERR_ALREADY_EXISTS:
6743 		return "ICE_ERR_ALREADY_EXISTS";
6744 	case ICE_ERR_NVM:
6745 		return "ICE_ERR_NVM";
6746 	case ICE_ERR_NVM_CHECKSUM:
6747 		return "ICE_ERR_NVM_CHECKSUM";
6748 	case ICE_ERR_BUF_TOO_SHORT:
6749 		return "ICE_ERR_BUF_TOO_SHORT";
6750 	case ICE_ERR_NVM_BLANK_MODE:
6751 		return "ICE_ERR_NVM_BLANK_MODE";
6752 	case ICE_ERR_IN_USE:
6753 		return "ICE_ERR_IN_USE";
6754 	case ICE_ERR_MAX_LIMIT:
6755 		return "ICE_ERR_MAX_LIMIT";
6756 	case ICE_ERR_RESET_ONGOING:
6757 		return "ICE_ERR_RESET_ONGOING";
6758 	case ICE_ERR_HW_TABLE:
6759 		return "ICE_ERR_HW_TABLE";
6760 	case ICE_ERR_DOES_NOT_EXIST:
6761 		return "ICE_ERR_DOES_NOT_EXIST";
6762 	case ICE_ERR_FW_DDP_MISMATCH:
6763 		return "ICE_ERR_FW_DDP_MISMATCH";
6764 	case ICE_ERR_AQ_ERROR:
6765 		return "ICE_ERR_AQ_ERROR";
6766 	case ICE_ERR_AQ_TIMEOUT:
6767 		return "ICE_ERR_AQ_TIMEOUT";
6768 	case ICE_ERR_AQ_FULL:
6769 		return "ICE_ERR_AQ_FULL";
6770 	case ICE_ERR_AQ_NO_WORK:
6771 		return "ICE_ERR_AQ_NO_WORK";
6772 	case ICE_ERR_AQ_EMPTY:
6773 		return "ICE_ERR_AQ_EMPTY";
6774 	case ICE_ERR_AQ_FW_CRITICAL:
6775 		return "ICE_ERR_AQ_FW_CRITICAL";
6776 	}
6777 
6778 	return "ICE_ERR_UNKNOWN";
6779 }
6780 
6781 /**
6782  * ice_set_rss_lut - Set RSS LUT
6783  * @vsi: Pointer to VSI structure
6784  * @lut: Lookup table
6785  * @lut_size: Lookup table size
6786  *
6787  * Returns 0 on success, negative on failure
6788  */
6789 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6790 {
6791 	struct ice_aq_get_set_rss_lut_params params = {};
6792 	struct ice_hw *hw = &vsi->back->hw;
6793 	enum ice_status status;
6794 
6795 	if (!lut)
6796 		return -EINVAL;
6797 
6798 	params.vsi_handle = vsi->idx;
6799 	params.lut_size = lut_size;
6800 	params.lut_type = vsi->rss_lut_type;
6801 	params.lut = lut;
6802 
6803 	status = ice_aq_set_rss_lut(hw, &params);
6804 	if (status) {
6805 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6806 			ice_stat_str(status),
6807 			ice_aq_str(hw->adminq.sq_last_status));
6808 		return -EIO;
6809 	}
6810 
6811 	return 0;
6812 }
6813 
6814 /**
6815  * ice_set_rss_key - Set RSS key
6816  * @vsi: Pointer to the VSI structure
6817  * @seed: RSS hash seed
6818  *
6819  * Returns 0 on success, negative on failure
6820  */
6821 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6822 {
6823 	struct ice_hw *hw = &vsi->back->hw;
6824 	enum ice_status status;
6825 
6826 	if (!seed)
6827 		return -EINVAL;
6828 
6829 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6830 	if (status) {
6831 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6832 			ice_stat_str(status),
6833 			ice_aq_str(hw->adminq.sq_last_status));
6834 		return -EIO;
6835 	}
6836 
6837 	return 0;
6838 }
6839 
6840 /**
6841  * ice_get_rss_lut - Get RSS LUT
6842  * @vsi: Pointer to VSI structure
6843  * @lut: Buffer to store the lookup table entries
6844  * @lut_size: Size of buffer to store the lookup table entries
6845  *
6846  * Returns 0 on success, negative on failure
6847  */
6848 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6849 {
6850 	struct ice_aq_get_set_rss_lut_params params = {};
6851 	struct ice_hw *hw = &vsi->back->hw;
6852 	enum ice_status status;
6853 
6854 	if (!lut)
6855 		return -EINVAL;
6856 
6857 	params.vsi_handle = vsi->idx;
6858 	params.lut_size = lut_size;
6859 	params.lut_type = vsi->rss_lut_type;
6860 	params.lut = lut;
6861 
6862 	status = ice_aq_get_rss_lut(hw, &params);
6863 	if (status) {
6864 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6865 			ice_stat_str(status),
6866 			ice_aq_str(hw->adminq.sq_last_status));
6867 		return -EIO;
6868 	}
6869 
6870 	return 0;
6871 }
6872 
6873 /**
6874  * ice_get_rss_key - Get RSS key
6875  * @vsi: Pointer to VSI structure
6876  * @seed: Buffer to store the key in
6877  *
6878  * Returns 0 on success, negative on failure
6879  */
6880 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6881 {
6882 	struct ice_hw *hw = &vsi->back->hw;
6883 	enum ice_status status;
6884 
6885 	if (!seed)
6886 		return -EINVAL;
6887 
6888 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6889 	if (status) {
6890 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6891 			ice_stat_str(status),
6892 			ice_aq_str(hw->adminq.sq_last_status));
6893 		return -EIO;
6894 	}
6895 
6896 	return 0;
6897 }
6898 
6899 /**
6900  * ice_bridge_getlink - Get the hardware bridge mode
6901  * @skb: skb buff
6902  * @pid: process ID
6903  * @seq: RTNL message seq
6904  * @dev: the netdev being configured
6905  * @filter_mask: filter mask passed in
6906  * @nlflags: netlink flags passed in
6907  *
6908  * Return the bridge mode (VEB/VEPA)
6909  */
6910 static int
6911 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6912 		   struct net_device *dev, u32 filter_mask, int nlflags)
6913 {
6914 	struct ice_netdev_priv *np = netdev_priv(dev);
6915 	struct ice_vsi *vsi = np->vsi;
6916 	struct ice_pf *pf = vsi->back;
6917 	u16 bmode;
6918 
6919 	bmode = pf->first_sw->bridge_mode;
6920 
6921 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6922 				       filter_mask, NULL);
6923 }
6924 
6925 /**
6926  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6927  * @vsi: Pointer to VSI structure
6928  * @bmode: Hardware bridge mode (VEB/VEPA)
6929  *
6930  * Returns 0 on success, negative on failure
6931  */
6932 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6933 {
6934 	struct ice_aqc_vsi_props *vsi_props;
6935 	struct ice_hw *hw = &vsi->back->hw;
6936 	struct ice_vsi_ctx *ctxt;
6937 	enum ice_status status;
6938 	int ret = 0;
6939 
6940 	vsi_props = &vsi->info;
6941 
6942 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6943 	if (!ctxt)
6944 		return -ENOMEM;
6945 
6946 	ctxt->info = vsi->info;
6947 
6948 	if (bmode == BRIDGE_MODE_VEB)
6949 		/* change from VEPA to VEB mode */
6950 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6951 	else
6952 		/* change from VEB to VEPA mode */
6953 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6954 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6955 
6956 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6957 	if (status) {
6958 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6959 			bmode, ice_stat_str(status),
6960 			ice_aq_str(hw->adminq.sq_last_status));
6961 		ret = -EIO;
6962 		goto out;
6963 	}
6964 	/* Update sw flags for book keeping */
6965 	vsi_props->sw_flags = ctxt->info.sw_flags;
6966 
6967 out:
6968 	kfree(ctxt);
6969 	return ret;
6970 }
6971 
6972 /**
6973  * ice_bridge_setlink - Set the hardware bridge mode
6974  * @dev: the netdev being configured
6975  * @nlh: RTNL message
6976  * @flags: bridge setlink flags
6977  * @extack: netlink extended ack
6978  *
6979  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6980  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6981  * not already set for all VSIs connected to this switch. And also update the
6982  * unicast switch filter rules for the corresponding switch of the netdev.
6983  */
6984 static int
6985 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6986 		   u16 __always_unused flags,
6987 		   struct netlink_ext_ack __always_unused *extack)
6988 {
6989 	struct ice_netdev_priv *np = netdev_priv(dev);
6990 	struct ice_pf *pf = np->vsi->back;
6991 	struct nlattr *attr, *br_spec;
6992 	struct ice_hw *hw = &pf->hw;
6993 	enum ice_status status;
6994 	struct ice_sw *pf_sw;
6995 	int rem, v, err = 0;
6996 
6997 	pf_sw = pf->first_sw;
6998 	/* find the attribute in the netlink message */
6999 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7000 
7001 	nla_for_each_nested(attr, br_spec, rem) {
7002 		__u16 mode;
7003 
7004 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7005 			continue;
7006 		mode = nla_get_u16(attr);
7007 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7008 			return -EINVAL;
7009 		/* Continue  if bridge mode is not being flipped */
7010 		if (mode == pf_sw->bridge_mode)
7011 			continue;
7012 		/* Iterates through the PF VSI list and update the loopback
7013 		 * mode of the VSI
7014 		 */
7015 		ice_for_each_vsi(pf, v) {
7016 			if (!pf->vsi[v])
7017 				continue;
7018 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7019 			if (err)
7020 				return err;
7021 		}
7022 
7023 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7024 		/* Update the unicast switch filter rules for the corresponding
7025 		 * switch of the netdev
7026 		 */
7027 		status = ice_update_sw_rule_bridge_mode(hw);
7028 		if (status) {
7029 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
7030 				   mode, ice_stat_str(status),
7031 				   ice_aq_str(hw->adminq.sq_last_status));
7032 			/* revert hw->evb_veb */
7033 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7034 			return -EIO;
7035 		}
7036 
7037 		pf_sw->bridge_mode = mode;
7038 	}
7039 
7040 	return 0;
7041 }
7042 
7043 /**
7044  * ice_tx_timeout - Respond to a Tx Hang
7045  * @netdev: network interface device structure
7046  * @txqueue: Tx queue
7047  */
7048 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7049 {
7050 	struct ice_netdev_priv *np = netdev_priv(netdev);
7051 	struct ice_tx_ring *tx_ring = NULL;
7052 	struct ice_vsi *vsi = np->vsi;
7053 	struct ice_pf *pf = vsi->back;
7054 	u32 i;
7055 
7056 	pf->tx_timeout_count++;
7057 
7058 	/* Check if PFC is enabled for the TC to which the queue belongs
7059 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7060 	 * need to reset and rebuild
7061 	 */
7062 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7063 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7064 			 txqueue);
7065 		return;
7066 	}
7067 
7068 	/* now that we have an index, find the tx_ring struct */
7069 	ice_for_each_txq(vsi, i)
7070 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7071 			if (txqueue == vsi->tx_rings[i]->q_index) {
7072 				tx_ring = vsi->tx_rings[i];
7073 				break;
7074 			}
7075 
7076 	/* Reset recovery level if enough time has elapsed after last timeout.
7077 	 * Also ensure no new reset action happens before next timeout period.
7078 	 */
7079 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7080 		pf->tx_timeout_recovery_level = 1;
7081 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7082 				       netdev->watchdog_timeo)))
7083 		return;
7084 
7085 	if (tx_ring) {
7086 		struct ice_hw *hw = &pf->hw;
7087 		u32 head, val = 0;
7088 
7089 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7090 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7091 		/* Read interrupt register */
7092 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7093 
7094 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7095 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7096 			    head, tx_ring->next_to_use, val);
7097 	}
7098 
7099 	pf->tx_timeout_last_recovery = jiffies;
7100 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7101 		    pf->tx_timeout_recovery_level, txqueue);
7102 
7103 	switch (pf->tx_timeout_recovery_level) {
7104 	case 1:
7105 		set_bit(ICE_PFR_REQ, pf->state);
7106 		break;
7107 	case 2:
7108 		set_bit(ICE_CORER_REQ, pf->state);
7109 		break;
7110 	case 3:
7111 		set_bit(ICE_GLOBR_REQ, pf->state);
7112 		break;
7113 	default:
7114 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7115 		set_bit(ICE_DOWN, pf->state);
7116 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7117 		set_bit(ICE_SERVICE_DIS, pf->state);
7118 		break;
7119 	}
7120 
7121 	ice_service_task_schedule(pf);
7122 	pf->tx_timeout_recovery_level++;
7123 }
7124 
7125 /**
7126  * ice_setup_tc_cls_flower - flower classifier offloads
7127  * @np: net device to configure
7128  * @filter_dev: device on which filter is added
7129  * @cls_flower: offload data
7130  */
7131 static int
7132 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7133 			struct net_device *filter_dev,
7134 			struct flow_cls_offload *cls_flower)
7135 {
7136 	struct ice_vsi *vsi = np->vsi;
7137 
7138 	if (cls_flower->common.chain_index)
7139 		return -EOPNOTSUPP;
7140 
7141 	switch (cls_flower->command) {
7142 	case FLOW_CLS_REPLACE:
7143 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7144 	case FLOW_CLS_DESTROY:
7145 		return ice_del_cls_flower(vsi, cls_flower);
7146 	default:
7147 		return -EINVAL;
7148 	}
7149 }
7150 
7151 /**
7152  * ice_setup_tc_block_cb - callback handler registered for TC block
7153  * @type: TC SETUP type
7154  * @type_data: TC flower offload data that contains user input
7155  * @cb_priv: netdev private data
7156  */
7157 static int
7158 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7159 {
7160 	struct ice_netdev_priv *np = cb_priv;
7161 
7162 	switch (type) {
7163 	case TC_SETUP_CLSFLOWER:
7164 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7165 					       type_data);
7166 	default:
7167 		return -EOPNOTSUPP;
7168 	}
7169 }
7170 
7171 static LIST_HEAD(ice_block_cb_list);
7172 
7173 static int
7174 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7175 	     void *type_data)
7176 {
7177 	struct ice_netdev_priv *np = netdev_priv(netdev);
7178 
7179 	switch (type) {
7180 	case TC_SETUP_BLOCK:
7181 		return flow_block_cb_setup_simple(type_data,
7182 						  &ice_block_cb_list,
7183 						  ice_setup_tc_block_cb,
7184 						  np, np, true);
7185 	default:
7186 		return -EOPNOTSUPP;
7187 	}
7188 	return -EOPNOTSUPP;
7189 }
7190 
7191 /**
7192  * ice_open - Called when a network interface becomes active
7193  * @netdev: network interface device structure
7194  *
7195  * The open entry point is called when a network interface is made
7196  * active by the system (IFF_UP). At this point all resources needed
7197  * for transmit and receive operations are allocated, the interrupt
7198  * handler is registered with the OS, the netdev watchdog is enabled,
7199  * and the stack is notified that the interface is ready.
7200  *
7201  * Returns 0 on success, negative value on failure
7202  */
7203 int ice_open(struct net_device *netdev)
7204 {
7205 	struct ice_netdev_priv *np = netdev_priv(netdev);
7206 	struct ice_pf *pf = np->vsi->back;
7207 
7208 	if (ice_is_reset_in_progress(pf->state)) {
7209 		netdev_err(netdev, "can't open net device while reset is in progress");
7210 		return -EBUSY;
7211 	}
7212 
7213 	return ice_open_internal(netdev);
7214 }
7215 
7216 /**
7217  * ice_open_internal - Called when a network interface becomes active
7218  * @netdev: network interface device structure
7219  *
7220  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7221  * handling routine
7222  *
7223  * Returns 0 on success, negative value on failure
7224  */
7225 int ice_open_internal(struct net_device *netdev)
7226 {
7227 	struct ice_netdev_priv *np = netdev_priv(netdev);
7228 	struct ice_vsi *vsi = np->vsi;
7229 	struct ice_pf *pf = vsi->back;
7230 	struct ice_port_info *pi;
7231 	enum ice_status status;
7232 	int err;
7233 
7234 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7235 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7236 		return -EIO;
7237 	}
7238 
7239 	netif_carrier_off(netdev);
7240 
7241 	pi = vsi->port_info;
7242 	status = ice_update_link_info(pi);
7243 	if (status) {
7244 		netdev_err(netdev, "Failed to get link info, error %s\n",
7245 			   ice_stat_str(status));
7246 		return -EIO;
7247 	}
7248 
7249 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7250 
7251 	/* Set PHY if there is media, otherwise, turn off PHY */
7252 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7253 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7254 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7255 			err = ice_init_phy_user_cfg(pi);
7256 			if (err) {
7257 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7258 					   err);
7259 				return err;
7260 			}
7261 		}
7262 
7263 		err = ice_configure_phy(vsi);
7264 		if (err) {
7265 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7266 				   err);
7267 			return err;
7268 		}
7269 	} else {
7270 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7271 		ice_set_link(vsi, false);
7272 	}
7273 
7274 	err = ice_vsi_open(vsi);
7275 	if (err)
7276 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7277 			   vsi->vsi_num, vsi->vsw->sw_id);
7278 
7279 	/* Update existing tunnels information */
7280 	udp_tunnel_get_rx_info(netdev);
7281 
7282 	return err;
7283 }
7284 
7285 /**
7286  * ice_stop - Disables a network interface
7287  * @netdev: network interface device structure
7288  *
7289  * The stop entry point is called when an interface is de-activated by the OS,
7290  * and the netdevice enters the DOWN state. The hardware is still under the
7291  * driver's control, but the netdev interface is disabled.
7292  *
7293  * Returns success only - not allowed to fail
7294  */
7295 int ice_stop(struct net_device *netdev)
7296 {
7297 	struct ice_netdev_priv *np = netdev_priv(netdev);
7298 	struct ice_vsi *vsi = np->vsi;
7299 	struct ice_pf *pf = vsi->back;
7300 
7301 	if (ice_is_reset_in_progress(pf->state)) {
7302 		netdev_err(netdev, "can't stop net device while reset is in progress");
7303 		return -EBUSY;
7304 	}
7305 
7306 	ice_vsi_close(vsi);
7307 
7308 	return 0;
7309 }
7310 
7311 /**
7312  * ice_features_check - Validate encapsulated packet conforms to limits
7313  * @skb: skb buffer
7314  * @netdev: This port's netdev
7315  * @features: Offload features that the stack believes apply
7316  */
7317 static netdev_features_t
7318 ice_features_check(struct sk_buff *skb,
7319 		   struct net_device __always_unused *netdev,
7320 		   netdev_features_t features)
7321 {
7322 	size_t len;
7323 
7324 	/* No point in doing any of this if neither checksum nor GSO are
7325 	 * being requested for this frame. We can rule out both by just
7326 	 * checking for CHECKSUM_PARTIAL
7327 	 */
7328 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7329 		return features;
7330 
7331 	/* We cannot support GSO if the MSS is going to be less than
7332 	 * 64 bytes. If it is then we need to drop support for GSO.
7333 	 */
7334 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7335 		features &= ~NETIF_F_GSO_MASK;
7336 
7337 	len = skb_network_header(skb) - skb->data;
7338 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7339 		goto out_rm_features;
7340 
7341 	len = skb_transport_header(skb) - skb_network_header(skb);
7342 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7343 		goto out_rm_features;
7344 
7345 	if (skb->encapsulation) {
7346 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7347 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7348 			goto out_rm_features;
7349 
7350 		len = skb_inner_transport_header(skb) -
7351 		      skb_inner_network_header(skb);
7352 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7353 			goto out_rm_features;
7354 	}
7355 
7356 	return features;
7357 out_rm_features:
7358 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7359 }
7360 
7361 static const struct net_device_ops ice_netdev_safe_mode_ops = {
7362 	.ndo_open = ice_open,
7363 	.ndo_stop = ice_stop,
7364 	.ndo_start_xmit = ice_start_xmit,
7365 	.ndo_set_mac_address = ice_set_mac_address,
7366 	.ndo_validate_addr = eth_validate_addr,
7367 	.ndo_change_mtu = ice_change_mtu,
7368 	.ndo_get_stats64 = ice_get_stats64,
7369 	.ndo_tx_timeout = ice_tx_timeout,
7370 	.ndo_bpf = ice_xdp_safe_mode,
7371 };
7372 
7373 static const struct net_device_ops ice_netdev_ops = {
7374 	.ndo_open = ice_open,
7375 	.ndo_stop = ice_stop,
7376 	.ndo_start_xmit = ice_start_xmit,
7377 	.ndo_select_queue = ice_select_queue,
7378 	.ndo_features_check = ice_features_check,
7379 	.ndo_set_rx_mode = ice_set_rx_mode,
7380 	.ndo_set_mac_address = ice_set_mac_address,
7381 	.ndo_validate_addr = eth_validate_addr,
7382 	.ndo_change_mtu = ice_change_mtu,
7383 	.ndo_get_stats64 = ice_get_stats64,
7384 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7385 	.ndo_eth_ioctl = ice_eth_ioctl,
7386 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7387 	.ndo_set_vf_mac = ice_set_vf_mac,
7388 	.ndo_get_vf_config = ice_get_vf_cfg,
7389 	.ndo_set_vf_trust = ice_set_vf_trust,
7390 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7391 	.ndo_set_vf_link_state = ice_set_vf_link_state,
7392 	.ndo_get_vf_stats = ice_get_vf_stats,
7393 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7394 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
7395 	.ndo_setup_tc = ice_setup_tc,
7396 	.ndo_set_features = ice_set_features,
7397 	.ndo_bridge_getlink = ice_bridge_getlink,
7398 	.ndo_bridge_setlink = ice_bridge_setlink,
7399 	.ndo_fdb_add = ice_fdb_add,
7400 	.ndo_fdb_del = ice_fdb_del,
7401 #ifdef CONFIG_RFS_ACCEL
7402 	.ndo_rx_flow_steer = ice_rx_flow_steer,
7403 #endif
7404 	.ndo_tx_timeout = ice_tx_timeout,
7405 	.ndo_bpf = ice_xdp,
7406 	.ndo_xdp_xmit = ice_xdp_xmit,
7407 	.ndo_xsk_wakeup = ice_xsk_wakeup,
7408 };
7409