1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 
23 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
24 static const char ice_driver_string[] = DRV_SUMMARY;
25 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
26 
27 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
28 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
29 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
30 
31 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
32 MODULE_DESCRIPTION(DRV_SUMMARY);
33 MODULE_LICENSE("GPL v2");
34 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
35 
36 static int debug = -1;
37 module_param(debug, int, 0644);
38 #ifndef CONFIG_DYNAMIC_DEBUG
39 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
40 #else
41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
42 #endif /* !CONFIG_DYNAMIC_DEBUG */
43 
44 static DEFINE_IDA(ice_aux_ida);
45 
46 static struct workqueue_struct *ice_wq;
47 static const struct net_device_ops ice_netdev_safe_mode_ops;
48 static const struct net_device_ops ice_netdev_ops;
49 static int ice_vsi_open(struct ice_vsi *vsi);
50 
51 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
52 
53 static void ice_vsi_release_all(struct ice_pf *pf);
54 
55 bool netif_is_ice(struct net_device *dev)
56 {
57 	return dev && (dev->netdev_ops == &ice_netdev_ops);
58 }
59 
60 /**
61  * ice_get_tx_pending - returns number of Tx descriptors not processed
62  * @ring: the ring of descriptors
63  */
64 static u16 ice_get_tx_pending(struct ice_ring *ring)
65 {
66 	u16 head, tail;
67 
68 	head = ring->next_to_clean;
69 	tail = ring->next_to_use;
70 
71 	if (head != tail)
72 		return (head < tail) ?
73 			tail - head : (tail + ring->count - head);
74 	return 0;
75 }
76 
77 /**
78  * ice_check_for_hang_subtask - check for and recover hung queues
79  * @pf: pointer to PF struct
80  */
81 static void ice_check_for_hang_subtask(struct ice_pf *pf)
82 {
83 	struct ice_vsi *vsi = NULL;
84 	struct ice_hw *hw;
85 	unsigned int i;
86 	int packets;
87 	u32 v;
88 
89 	ice_for_each_vsi(pf, v)
90 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
91 			vsi = pf->vsi[v];
92 			break;
93 		}
94 
95 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
96 		return;
97 
98 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
99 		return;
100 
101 	hw = &vsi->back->hw;
102 
103 	for (i = 0; i < vsi->num_txq; i++) {
104 		struct ice_ring *tx_ring = vsi->tx_rings[i];
105 
106 		if (tx_ring && tx_ring->desc) {
107 			/* If packet counter has not changed the queue is
108 			 * likely stalled, so force an interrupt for this
109 			 * queue.
110 			 *
111 			 * prev_pkt would be negative if there was no
112 			 * pending work.
113 			 */
114 			packets = tx_ring->stats.pkts & INT_MAX;
115 			if (tx_ring->tx_stats.prev_pkt == packets) {
116 				/* Trigger sw interrupt to revive the queue */
117 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
118 				continue;
119 			}
120 
121 			/* Memory barrier between read of packet count and call
122 			 * to ice_get_tx_pending()
123 			 */
124 			smp_rmb();
125 			tx_ring->tx_stats.prev_pkt =
126 			    ice_get_tx_pending(tx_ring) ? packets : -1;
127 		}
128 	}
129 }
130 
131 /**
132  * ice_init_mac_fltr - Set initial MAC filters
133  * @pf: board private structure
134  *
135  * Set initial set of MAC filters for PF VSI; configure filters for permanent
136  * address and broadcast address. If an error is encountered, netdevice will be
137  * unregistered.
138  */
139 static int ice_init_mac_fltr(struct ice_pf *pf)
140 {
141 	enum ice_status status;
142 	struct ice_vsi *vsi;
143 	u8 *perm_addr;
144 
145 	vsi = ice_get_main_vsi(pf);
146 	if (!vsi)
147 		return -EINVAL;
148 
149 	perm_addr = vsi->port_info->mac.perm_addr;
150 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
151 	if (status)
152 		return -EIO;
153 
154 	return 0;
155 }
156 
157 /**
158  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
159  * @netdev: the net device on which the sync is happening
160  * @addr: MAC address to sync
161  *
162  * This is a callback function which is called by the in kernel device sync
163  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
164  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
165  * MAC filters from the hardware.
166  */
167 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
168 {
169 	struct ice_netdev_priv *np = netdev_priv(netdev);
170 	struct ice_vsi *vsi = np->vsi;
171 
172 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
173 				     ICE_FWD_TO_VSI))
174 		return -EINVAL;
175 
176 	return 0;
177 }
178 
179 /**
180  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
181  * @netdev: the net device on which the unsync is happening
182  * @addr: MAC address to unsync
183  *
184  * This is a callback function which is called by the in kernel device unsync
185  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
186  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
187  * delete the MAC filters from the hardware.
188  */
189 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
190 {
191 	struct ice_netdev_priv *np = netdev_priv(netdev);
192 	struct ice_vsi *vsi = np->vsi;
193 
194 	/* Under some circumstances, we might receive a request to delete our
195 	 * own device address from our uc list. Because we store the device
196 	 * address in the VSI's MAC filter list, we need to ignore such
197 	 * requests and not delete our device address from this list.
198 	 */
199 	if (ether_addr_equal(addr, netdev->dev_addr))
200 		return 0;
201 
202 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
203 				     ICE_FWD_TO_VSI))
204 		return -EINVAL;
205 
206 	return 0;
207 }
208 
209 /**
210  * ice_vsi_fltr_changed - check if filter state changed
211  * @vsi: VSI to be checked
212  *
213  * returns true if filter state has changed, false otherwise.
214  */
215 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
216 {
217 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
218 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
219 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
220 }
221 
222 /**
223  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
224  * @vsi: the VSI being configured
225  * @promisc_m: mask of promiscuous config bits
226  * @set_promisc: enable or disable promisc flag request
227  *
228  */
229 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
230 {
231 	struct ice_hw *hw = &vsi->back->hw;
232 	enum ice_status status = 0;
233 
234 	if (vsi->type != ICE_VSI_PF)
235 		return 0;
236 
237 	if (vsi->num_vlan > 1) {
238 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
239 						  set_promisc);
240 	} else {
241 		if (set_promisc)
242 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
243 						     0);
244 		else
245 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
246 						       0);
247 	}
248 
249 	if (status)
250 		return -EIO;
251 
252 	return 0;
253 }
254 
255 /**
256  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
257  * @vsi: ptr to the VSI
258  *
259  * Push any outstanding VSI filter changes through the AdminQ.
260  */
261 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
262 {
263 	struct device *dev = ice_pf_to_dev(vsi->back);
264 	struct net_device *netdev = vsi->netdev;
265 	bool promisc_forced_on = false;
266 	struct ice_pf *pf = vsi->back;
267 	struct ice_hw *hw = &pf->hw;
268 	enum ice_status status = 0;
269 	u32 changed_flags = 0;
270 	u8 promisc_m;
271 	int err = 0;
272 
273 	if (!vsi->netdev)
274 		return -EINVAL;
275 
276 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
277 		usleep_range(1000, 2000);
278 
279 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
280 	vsi->current_netdev_flags = vsi->netdev->flags;
281 
282 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
283 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
284 
285 	if (ice_vsi_fltr_changed(vsi)) {
286 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
287 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
288 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
289 
290 		/* grab the netdev's addr_list_lock */
291 		netif_addr_lock_bh(netdev);
292 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
293 			      ice_add_mac_to_unsync_list);
294 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
295 			      ice_add_mac_to_unsync_list);
296 		/* our temp lists are populated. release lock */
297 		netif_addr_unlock_bh(netdev);
298 	}
299 
300 	/* Remove MAC addresses in the unsync list */
301 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
302 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
303 	if (status) {
304 		netdev_err(netdev, "Failed to delete MAC filters\n");
305 		/* if we failed because of alloc failures, just bail */
306 		if (status == ICE_ERR_NO_MEMORY) {
307 			err = -ENOMEM;
308 			goto out;
309 		}
310 	}
311 
312 	/* Add MAC addresses in the sync list */
313 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
314 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
315 	/* If filter is added successfully or already exists, do not go into
316 	 * 'if' condition and report it as error. Instead continue processing
317 	 * rest of the function.
318 	 */
319 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
320 		netdev_err(netdev, "Failed to add MAC filters\n");
321 		/* If there is no more space for new umac filters, VSI
322 		 * should go into promiscuous mode. There should be some
323 		 * space reserved for promiscuous filters.
324 		 */
325 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
326 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
327 				      vsi->state)) {
328 			promisc_forced_on = true;
329 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
330 				    vsi->vsi_num);
331 		} else {
332 			err = -EIO;
333 			goto out;
334 		}
335 	}
336 	/* check for changes in promiscuous modes */
337 	if (changed_flags & IFF_ALLMULTI) {
338 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
339 			if (vsi->num_vlan > 1)
340 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
341 			else
342 				promisc_m = ICE_MCAST_PROMISC_BITS;
343 
344 			err = ice_cfg_promisc(vsi, promisc_m, true);
345 			if (err) {
346 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
347 					   vsi->vsi_num);
348 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
349 				goto out_promisc;
350 			}
351 		} else {
352 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
353 			if (vsi->num_vlan > 1)
354 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
355 			else
356 				promisc_m = ICE_MCAST_PROMISC_BITS;
357 
358 			err = ice_cfg_promisc(vsi, promisc_m, false);
359 			if (err) {
360 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
361 					   vsi->vsi_num);
362 				vsi->current_netdev_flags |= IFF_ALLMULTI;
363 				goto out_promisc;
364 			}
365 		}
366 	}
367 
368 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
369 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
370 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
371 		if (vsi->current_netdev_flags & IFF_PROMISC) {
372 			/* Apply Rx filter rule to get traffic from wire */
373 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
374 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
375 				if (err && err != -EEXIST) {
376 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
377 						   err, vsi->vsi_num);
378 					vsi->current_netdev_flags &=
379 						~IFF_PROMISC;
380 					goto out_promisc;
381 				}
382 				ice_cfg_vlan_pruning(vsi, false, false);
383 			}
384 		} else {
385 			/* Clear Rx filter to remove traffic from wire */
386 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
387 				err = ice_clear_dflt_vsi(pf->first_sw);
388 				if (err) {
389 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
390 						   err, vsi->vsi_num);
391 					vsi->current_netdev_flags |=
392 						IFF_PROMISC;
393 					goto out_promisc;
394 				}
395 				if (vsi->num_vlan > 1)
396 					ice_cfg_vlan_pruning(vsi, true, false);
397 			}
398 		}
399 	}
400 	goto exit;
401 
402 out_promisc:
403 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
404 	goto exit;
405 out:
406 	/* if something went wrong then set the changed flag so we try again */
407 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
408 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
409 exit:
410 	clear_bit(ICE_CFG_BUSY, vsi->state);
411 	return err;
412 }
413 
414 /**
415  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
416  * @pf: board private structure
417  */
418 static void ice_sync_fltr_subtask(struct ice_pf *pf)
419 {
420 	int v;
421 
422 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
423 		return;
424 
425 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
426 
427 	ice_for_each_vsi(pf, v)
428 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
429 		    ice_vsi_sync_fltr(pf->vsi[v])) {
430 			/* come back and try again later */
431 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
432 			break;
433 		}
434 }
435 
436 /**
437  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
438  * @pf: the PF
439  * @locked: is the rtnl_lock already held
440  */
441 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
442 {
443 	int node;
444 	int v;
445 
446 	ice_for_each_vsi(pf, v)
447 		if (pf->vsi[v])
448 			ice_dis_vsi(pf->vsi[v], locked);
449 
450 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
451 		pf->pf_agg_node[node].num_vsis = 0;
452 
453 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
454 		pf->vf_agg_node[node].num_vsis = 0;
455 }
456 
457 /**
458  * ice_prepare_for_reset - prep for the core to reset
459  * @pf: board private structure
460  *
461  * Inform or close all dependent features in prep for reset.
462  */
463 static void
464 ice_prepare_for_reset(struct ice_pf *pf)
465 {
466 	struct ice_hw *hw = &pf->hw;
467 	unsigned int i;
468 
469 	/* already prepared for reset */
470 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
471 		return;
472 
473 	ice_unplug_aux_dev(pf);
474 
475 	/* Notify VFs of impending reset */
476 	if (ice_check_sq_alive(hw, &hw->mailboxq))
477 		ice_vc_notify_reset(pf);
478 
479 	/* Disable VFs until reset is completed */
480 	ice_for_each_vf(pf, i)
481 		ice_set_vf_state_qs_dis(&pf->vf[i]);
482 
483 	/* clear SW filtering DB */
484 	ice_clear_hw_tbls(hw);
485 	/* disable the VSIs and their queues that are not already DOWN */
486 	ice_pf_dis_all_vsi(pf, false);
487 
488 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
489 		ice_ptp_release(pf);
490 
491 	if (hw->port_info)
492 		ice_sched_clear_port(hw->port_info);
493 
494 	ice_shutdown_all_ctrlq(hw);
495 
496 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
497 }
498 
499 /**
500  * ice_do_reset - Initiate one of many types of resets
501  * @pf: board private structure
502  * @reset_type: reset type requested
503  * before this function was called.
504  */
505 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
506 {
507 	struct device *dev = ice_pf_to_dev(pf);
508 	struct ice_hw *hw = &pf->hw;
509 
510 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
511 
512 	ice_prepare_for_reset(pf);
513 
514 	/* trigger the reset */
515 	if (ice_reset(hw, reset_type)) {
516 		dev_err(dev, "reset %d failed\n", reset_type);
517 		set_bit(ICE_RESET_FAILED, pf->state);
518 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
519 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
520 		clear_bit(ICE_PFR_REQ, pf->state);
521 		clear_bit(ICE_CORER_REQ, pf->state);
522 		clear_bit(ICE_GLOBR_REQ, pf->state);
523 		wake_up(&pf->reset_wait_queue);
524 		return;
525 	}
526 
527 	/* PFR is a bit of a special case because it doesn't result in an OICR
528 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
529 	 * associated state bits.
530 	 */
531 	if (reset_type == ICE_RESET_PFR) {
532 		pf->pfr_count++;
533 		ice_rebuild(pf, reset_type);
534 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
535 		clear_bit(ICE_PFR_REQ, pf->state);
536 		wake_up(&pf->reset_wait_queue);
537 		ice_reset_all_vfs(pf, true);
538 	}
539 }
540 
541 /**
542  * ice_reset_subtask - Set up for resetting the device and driver
543  * @pf: board private structure
544  */
545 static void ice_reset_subtask(struct ice_pf *pf)
546 {
547 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
548 
549 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
550 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
551 	 * of reset is pending and sets bits in pf->state indicating the reset
552 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
553 	 * prepare for pending reset if not already (for PF software-initiated
554 	 * global resets the software should already be prepared for it as
555 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
556 	 * by firmware or software on other PFs, that bit is not set so prepare
557 	 * for the reset now), poll for reset done, rebuild and return.
558 	 */
559 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
560 		/* Perform the largest reset requested */
561 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
562 			reset_type = ICE_RESET_CORER;
563 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
564 			reset_type = ICE_RESET_GLOBR;
565 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
566 			reset_type = ICE_RESET_EMPR;
567 		/* return if no valid reset type requested */
568 		if (reset_type == ICE_RESET_INVAL)
569 			return;
570 		ice_prepare_for_reset(pf);
571 
572 		/* make sure we are ready to rebuild */
573 		if (ice_check_reset(&pf->hw)) {
574 			set_bit(ICE_RESET_FAILED, pf->state);
575 		} else {
576 			/* done with reset. start rebuild */
577 			pf->hw.reset_ongoing = false;
578 			ice_rebuild(pf, reset_type);
579 			/* clear bit to resume normal operations, but
580 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
581 			 */
582 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
583 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
584 			clear_bit(ICE_PFR_REQ, pf->state);
585 			clear_bit(ICE_CORER_REQ, pf->state);
586 			clear_bit(ICE_GLOBR_REQ, pf->state);
587 			wake_up(&pf->reset_wait_queue);
588 			ice_reset_all_vfs(pf, true);
589 		}
590 
591 		return;
592 	}
593 
594 	/* No pending resets to finish processing. Check for new resets */
595 	if (test_bit(ICE_PFR_REQ, pf->state))
596 		reset_type = ICE_RESET_PFR;
597 	if (test_bit(ICE_CORER_REQ, pf->state))
598 		reset_type = ICE_RESET_CORER;
599 	if (test_bit(ICE_GLOBR_REQ, pf->state))
600 		reset_type = ICE_RESET_GLOBR;
601 	/* If no valid reset type requested just return */
602 	if (reset_type == ICE_RESET_INVAL)
603 		return;
604 
605 	/* reset if not already down or busy */
606 	if (!test_bit(ICE_DOWN, pf->state) &&
607 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
608 		ice_do_reset(pf, reset_type);
609 	}
610 }
611 
612 /**
613  * ice_print_topo_conflict - print topology conflict message
614  * @vsi: the VSI whose topology status is being checked
615  */
616 static void ice_print_topo_conflict(struct ice_vsi *vsi)
617 {
618 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
619 	case ICE_AQ_LINK_TOPO_CONFLICT:
620 	case ICE_AQ_LINK_MEDIA_CONFLICT:
621 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
622 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
623 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
624 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
625 		break;
626 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
627 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
628 		break;
629 	default:
630 		break;
631 	}
632 }
633 
634 /**
635  * ice_print_link_msg - print link up or down message
636  * @vsi: the VSI whose link status is being queried
637  * @isup: boolean for if the link is now up or down
638  */
639 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
640 {
641 	struct ice_aqc_get_phy_caps_data *caps;
642 	const char *an_advertised;
643 	enum ice_status status;
644 	const char *fec_req;
645 	const char *speed;
646 	const char *fec;
647 	const char *fc;
648 	const char *an;
649 
650 	if (!vsi)
651 		return;
652 
653 	if (vsi->current_isup == isup)
654 		return;
655 
656 	vsi->current_isup = isup;
657 
658 	if (!isup) {
659 		netdev_info(vsi->netdev, "NIC Link is Down\n");
660 		return;
661 	}
662 
663 	switch (vsi->port_info->phy.link_info.link_speed) {
664 	case ICE_AQ_LINK_SPEED_100GB:
665 		speed = "100 G";
666 		break;
667 	case ICE_AQ_LINK_SPEED_50GB:
668 		speed = "50 G";
669 		break;
670 	case ICE_AQ_LINK_SPEED_40GB:
671 		speed = "40 G";
672 		break;
673 	case ICE_AQ_LINK_SPEED_25GB:
674 		speed = "25 G";
675 		break;
676 	case ICE_AQ_LINK_SPEED_20GB:
677 		speed = "20 G";
678 		break;
679 	case ICE_AQ_LINK_SPEED_10GB:
680 		speed = "10 G";
681 		break;
682 	case ICE_AQ_LINK_SPEED_5GB:
683 		speed = "5 G";
684 		break;
685 	case ICE_AQ_LINK_SPEED_2500MB:
686 		speed = "2.5 G";
687 		break;
688 	case ICE_AQ_LINK_SPEED_1000MB:
689 		speed = "1 G";
690 		break;
691 	case ICE_AQ_LINK_SPEED_100MB:
692 		speed = "100 M";
693 		break;
694 	default:
695 		speed = "Unknown ";
696 		break;
697 	}
698 
699 	switch (vsi->port_info->fc.current_mode) {
700 	case ICE_FC_FULL:
701 		fc = "Rx/Tx";
702 		break;
703 	case ICE_FC_TX_PAUSE:
704 		fc = "Tx";
705 		break;
706 	case ICE_FC_RX_PAUSE:
707 		fc = "Rx";
708 		break;
709 	case ICE_FC_NONE:
710 		fc = "None";
711 		break;
712 	default:
713 		fc = "Unknown";
714 		break;
715 	}
716 
717 	/* Get FEC mode based on negotiated link info */
718 	switch (vsi->port_info->phy.link_info.fec_info) {
719 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
720 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
721 		fec = "RS-FEC";
722 		break;
723 	case ICE_AQ_LINK_25G_KR_FEC_EN:
724 		fec = "FC-FEC/BASE-R";
725 		break;
726 	default:
727 		fec = "NONE";
728 		break;
729 	}
730 
731 	/* check if autoneg completed, might be false due to not supported */
732 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
733 		an = "True";
734 	else
735 		an = "False";
736 
737 	/* Get FEC mode requested based on PHY caps last SW configuration */
738 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
739 	if (!caps) {
740 		fec_req = "Unknown";
741 		an_advertised = "Unknown";
742 		goto done;
743 	}
744 
745 	status = ice_aq_get_phy_caps(vsi->port_info, false,
746 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
747 	if (status)
748 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
749 
750 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
751 
752 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
753 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
754 		fec_req = "RS-FEC";
755 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
756 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
757 		fec_req = "FC-FEC/BASE-R";
758 	else
759 		fec_req = "NONE";
760 
761 	kfree(caps);
762 
763 done:
764 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
765 		    speed, fec_req, fec, an_advertised, an, fc);
766 	ice_print_topo_conflict(vsi);
767 }
768 
769 /**
770  * ice_vsi_link_event - update the VSI's netdev
771  * @vsi: the VSI on which the link event occurred
772  * @link_up: whether or not the VSI needs to be set up or down
773  */
774 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
775 {
776 	if (!vsi)
777 		return;
778 
779 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
780 		return;
781 
782 	if (vsi->type == ICE_VSI_PF) {
783 		if (link_up == netif_carrier_ok(vsi->netdev))
784 			return;
785 
786 		if (link_up) {
787 			netif_carrier_on(vsi->netdev);
788 			netif_tx_wake_all_queues(vsi->netdev);
789 		} else {
790 			netif_carrier_off(vsi->netdev);
791 			netif_tx_stop_all_queues(vsi->netdev);
792 		}
793 	}
794 }
795 
796 /**
797  * ice_set_dflt_mib - send a default config MIB to the FW
798  * @pf: private PF struct
799  *
800  * This function sends a default configuration MIB to the FW.
801  *
802  * If this function errors out at any point, the driver is still able to
803  * function.  The main impact is that LFC may not operate as expected.
804  * Therefore an error state in this function should be treated with a DBG
805  * message and continue on with driver rebuild/reenable.
806  */
807 static void ice_set_dflt_mib(struct ice_pf *pf)
808 {
809 	struct device *dev = ice_pf_to_dev(pf);
810 	u8 mib_type, *buf, *lldpmib = NULL;
811 	u16 len, typelen, offset = 0;
812 	struct ice_lldp_org_tlv *tlv;
813 	struct ice_hw *hw = &pf->hw;
814 	u32 ouisubtype;
815 
816 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
817 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
818 	if (!lldpmib) {
819 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
820 			__func__);
821 		return;
822 	}
823 
824 	/* Add ETS CFG TLV */
825 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
826 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
827 		   ICE_IEEE_ETS_TLV_LEN);
828 	tlv->typelen = htons(typelen);
829 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
830 		      ICE_IEEE_SUBTYPE_ETS_CFG);
831 	tlv->ouisubtype = htonl(ouisubtype);
832 
833 	buf = tlv->tlvinfo;
834 	buf[0] = 0;
835 
836 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
837 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
838 	 * Octets 13 - 20 are TSA values - leave as zeros
839 	 */
840 	buf[5] = 0x64;
841 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
842 	offset += len + 2;
843 	tlv = (struct ice_lldp_org_tlv *)
844 		((char *)tlv + sizeof(tlv->typelen) + len);
845 
846 	/* Add ETS REC TLV */
847 	buf = tlv->tlvinfo;
848 	tlv->typelen = htons(typelen);
849 
850 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
851 		      ICE_IEEE_SUBTYPE_ETS_REC);
852 	tlv->ouisubtype = htonl(ouisubtype);
853 
854 	/* First octet of buf is reserved
855 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
856 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
857 	 * Octets 13 - 20 are TSA value - leave as zeros
858 	 */
859 	buf[5] = 0x64;
860 	offset += len + 2;
861 	tlv = (struct ice_lldp_org_tlv *)
862 		((char *)tlv + sizeof(tlv->typelen) + len);
863 
864 	/* Add PFC CFG TLV */
865 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
866 		   ICE_IEEE_PFC_TLV_LEN);
867 	tlv->typelen = htons(typelen);
868 
869 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
870 		      ICE_IEEE_SUBTYPE_PFC_CFG);
871 	tlv->ouisubtype = htonl(ouisubtype);
872 
873 	/* Octet 1 left as all zeros - PFC disabled */
874 	buf[0] = 0x08;
875 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
876 	offset += len + 2;
877 
878 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
879 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
880 
881 	kfree(lldpmib);
882 }
883 
884 /**
885  * ice_check_module_power
886  * @pf: pointer to PF struct
887  * @link_cfg_err: bitmap from the link info structure
888  *
889  * check module power level returned by a previous call to aq_get_link_info
890  * and print error messages if module power level is not supported
891  */
892 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
893 {
894 	/* if module power level is supported, clear the flag */
895 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
896 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
897 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
898 		return;
899 	}
900 
901 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
902 	 * above block didn't clear this bit, there's nothing to do
903 	 */
904 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
905 		return;
906 
907 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
908 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
909 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
910 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
911 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
912 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
913 	}
914 }
915 
916 /**
917  * ice_link_event - process the link event
918  * @pf: PF that the link event is associated with
919  * @pi: port_info for the port that the link event is associated with
920  * @link_up: true if the physical link is up and false if it is down
921  * @link_speed: current link speed received from the link event
922  *
923  * Returns 0 on success and negative on failure
924  */
925 static int
926 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
927 	       u16 link_speed)
928 {
929 	struct device *dev = ice_pf_to_dev(pf);
930 	struct ice_phy_info *phy_info;
931 	enum ice_status status;
932 	struct ice_vsi *vsi;
933 	u16 old_link_speed;
934 	bool old_link;
935 
936 	phy_info = &pi->phy;
937 	phy_info->link_info_old = phy_info->link_info;
938 
939 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
940 	old_link_speed = phy_info->link_info_old.link_speed;
941 
942 	/* update the link info structures and re-enable link events,
943 	 * don't bail on failure due to other book keeping needed
944 	 */
945 	status = ice_update_link_info(pi);
946 	if (status)
947 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
948 			pi->lport, ice_stat_str(status),
949 			ice_aq_str(pi->hw->adminq.sq_last_status));
950 
951 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
952 
953 	/* Check if the link state is up after updating link info, and treat
954 	 * this event as an UP event since the link is actually UP now.
955 	 */
956 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
957 		link_up = true;
958 
959 	vsi = ice_get_main_vsi(pf);
960 	if (!vsi || !vsi->port_info)
961 		return -EINVAL;
962 
963 	/* turn off PHY if media was removed */
964 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
965 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
966 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
967 		ice_set_link(vsi, false);
968 	}
969 
970 	/* if the old link up/down and speed is the same as the new */
971 	if (link_up == old_link && link_speed == old_link_speed)
972 		return 0;
973 
974 	if (ice_is_dcb_active(pf)) {
975 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
976 			ice_dcb_rebuild(pf);
977 	} else {
978 		if (link_up)
979 			ice_set_dflt_mib(pf);
980 	}
981 	ice_vsi_link_event(vsi, link_up);
982 	ice_print_link_msg(vsi, link_up);
983 
984 	ice_vc_notify_link_state(pf);
985 
986 	return 0;
987 }
988 
989 /**
990  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
991  * @pf: board private structure
992  */
993 static void ice_watchdog_subtask(struct ice_pf *pf)
994 {
995 	int i;
996 
997 	/* if interface is down do nothing */
998 	if (test_bit(ICE_DOWN, pf->state) ||
999 	    test_bit(ICE_CFG_BUSY, pf->state))
1000 		return;
1001 
1002 	/* make sure we don't do these things too often */
1003 	if (time_before(jiffies,
1004 			pf->serv_tmr_prev + pf->serv_tmr_period))
1005 		return;
1006 
1007 	pf->serv_tmr_prev = jiffies;
1008 
1009 	/* Update the stats for active netdevs so the network stack
1010 	 * can look at updated numbers whenever it cares to
1011 	 */
1012 	ice_update_pf_stats(pf);
1013 	ice_for_each_vsi(pf, i)
1014 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1015 			ice_update_vsi_stats(pf->vsi[i]);
1016 }
1017 
1018 /**
1019  * ice_init_link_events - enable/initialize link events
1020  * @pi: pointer to the port_info instance
1021  *
1022  * Returns -EIO on failure, 0 on success
1023  */
1024 static int ice_init_link_events(struct ice_port_info *pi)
1025 {
1026 	u16 mask;
1027 
1028 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1029 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
1030 
1031 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1032 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1033 			pi->lport);
1034 		return -EIO;
1035 	}
1036 
1037 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1038 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1039 			pi->lport);
1040 		return -EIO;
1041 	}
1042 
1043 	return 0;
1044 }
1045 
1046 /**
1047  * ice_handle_link_event - handle link event via ARQ
1048  * @pf: PF that the link event is associated with
1049  * @event: event structure containing link status info
1050  */
1051 static int
1052 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1053 {
1054 	struct ice_aqc_get_link_status_data *link_data;
1055 	struct ice_port_info *port_info;
1056 	int status;
1057 
1058 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1059 	port_info = pf->hw.port_info;
1060 	if (!port_info)
1061 		return -EINVAL;
1062 
1063 	status = ice_link_event(pf, port_info,
1064 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1065 				le16_to_cpu(link_data->link_speed));
1066 	if (status)
1067 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1068 			status);
1069 
1070 	return status;
1071 }
1072 
1073 enum ice_aq_task_state {
1074 	ICE_AQ_TASK_WAITING = 0,
1075 	ICE_AQ_TASK_COMPLETE,
1076 	ICE_AQ_TASK_CANCELED,
1077 };
1078 
1079 struct ice_aq_task {
1080 	struct hlist_node entry;
1081 
1082 	u16 opcode;
1083 	struct ice_rq_event_info *event;
1084 	enum ice_aq_task_state state;
1085 };
1086 
1087 /**
1088  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1089  * @pf: pointer to the PF private structure
1090  * @opcode: the opcode to wait for
1091  * @timeout: how long to wait, in jiffies
1092  * @event: storage for the event info
1093  *
1094  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1095  * current thread will be put to sleep until the specified event occurs or
1096  * until the given timeout is reached.
1097  *
1098  * To obtain only the descriptor contents, pass an event without an allocated
1099  * msg_buf. If the complete data buffer is desired, allocate the
1100  * event->msg_buf with enough space ahead of time.
1101  *
1102  * Returns: zero on success, or a negative error code on failure.
1103  */
1104 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1105 			  struct ice_rq_event_info *event)
1106 {
1107 	struct device *dev = ice_pf_to_dev(pf);
1108 	struct ice_aq_task *task;
1109 	unsigned long start;
1110 	long ret;
1111 	int err;
1112 
1113 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1114 	if (!task)
1115 		return -ENOMEM;
1116 
1117 	INIT_HLIST_NODE(&task->entry);
1118 	task->opcode = opcode;
1119 	task->event = event;
1120 	task->state = ICE_AQ_TASK_WAITING;
1121 
1122 	spin_lock_bh(&pf->aq_wait_lock);
1123 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1124 	spin_unlock_bh(&pf->aq_wait_lock);
1125 
1126 	start = jiffies;
1127 
1128 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1129 					       timeout);
1130 	switch (task->state) {
1131 	case ICE_AQ_TASK_WAITING:
1132 		err = ret < 0 ? ret : -ETIMEDOUT;
1133 		break;
1134 	case ICE_AQ_TASK_CANCELED:
1135 		err = ret < 0 ? ret : -ECANCELED;
1136 		break;
1137 	case ICE_AQ_TASK_COMPLETE:
1138 		err = ret < 0 ? ret : 0;
1139 		break;
1140 	default:
1141 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1142 		err = -EINVAL;
1143 		break;
1144 	}
1145 
1146 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1147 		jiffies_to_msecs(jiffies - start),
1148 		jiffies_to_msecs(timeout),
1149 		opcode);
1150 
1151 	spin_lock_bh(&pf->aq_wait_lock);
1152 	hlist_del(&task->entry);
1153 	spin_unlock_bh(&pf->aq_wait_lock);
1154 	kfree(task);
1155 
1156 	return err;
1157 }
1158 
1159 /**
1160  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1161  * @pf: pointer to the PF private structure
1162  * @opcode: the opcode of the event
1163  * @event: the event to check
1164  *
1165  * Loops over the current list of pending threads waiting for an AdminQ event.
1166  * For each matching task, copy the contents of the event into the task
1167  * structure and wake up the thread.
1168  *
1169  * If multiple threads wait for the same opcode, they will all be woken up.
1170  *
1171  * Note that event->msg_buf will only be duplicated if the event has a buffer
1172  * with enough space already allocated. Otherwise, only the descriptor and
1173  * message length will be copied.
1174  *
1175  * Returns: true if an event was found, false otherwise
1176  */
1177 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1178 				struct ice_rq_event_info *event)
1179 {
1180 	struct ice_aq_task *task;
1181 	bool found = false;
1182 
1183 	spin_lock_bh(&pf->aq_wait_lock);
1184 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1185 		if (task->state || task->opcode != opcode)
1186 			continue;
1187 
1188 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1189 		task->event->msg_len = event->msg_len;
1190 
1191 		/* Only copy the data buffer if a destination was set */
1192 		if (task->event->msg_buf &&
1193 		    task->event->buf_len > event->buf_len) {
1194 			memcpy(task->event->msg_buf, event->msg_buf,
1195 			       event->buf_len);
1196 			task->event->buf_len = event->buf_len;
1197 		}
1198 
1199 		task->state = ICE_AQ_TASK_COMPLETE;
1200 		found = true;
1201 	}
1202 	spin_unlock_bh(&pf->aq_wait_lock);
1203 
1204 	if (found)
1205 		wake_up(&pf->aq_wait_queue);
1206 }
1207 
1208 /**
1209  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1210  * @pf: the PF private structure
1211  *
1212  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1213  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1214  */
1215 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1216 {
1217 	struct ice_aq_task *task;
1218 
1219 	spin_lock_bh(&pf->aq_wait_lock);
1220 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1221 		task->state = ICE_AQ_TASK_CANCELED;
1222 	spin_unlock_bh(&pf->aq_wait_lock);
1223 
1224 	wake_up(&pf->aq_wait_queue);
1225 }
1226 
1227 /**
1228  * __ice_clean_ctrlq - helper function to clean controlq rings
1229  * @pf: ptr to struct ice_pf
1230  * @q_type: specific Control queue type
1231  */
1232 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1233 {
1234 	struct device *dev = ice_pf_to_dev(pf);
1235 	struct ice_rq_event_info event;
1236 	struct ice_hw *hw = &pf->hw;
1237 	struct ice_ctl_q_info *cq;
1238 	u16 pending, i = 0;
1239 	const char *qtype;
1240 	u32 oldval, val;
1241 
1242 	/* Do not clean control queue if/when PF reset fails */
1243 	if (test_bit(ICE_RESET_FAILED, pf->state))
1244 		return 0;
1245 
1246 	switch (q_type) {
1247 	case ICE_CTL_Q_ADMIN:
1248 		cq = &hw->adminq;
1249 		qtype = "Admin";
1250 		break;
1251 	case ICE_CTL_Q_SB:
1252 		cq = &hw->sbq;
1253 		qtype = "Sideband";
1254 		break;
1255 	case ICE_CTL_Q_MAILBOX:
1256 		cq = &hw->mailboxq;
1257 		qtype = "Mailbox";
1258 		/* we are going to try to detect a malicious VF, so set the
1259 		 * state to begin detection
1260 		 */
1261 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1262 		break;
1263 	default:
1264 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1265 		return 0;
1266 	}
1267 
1268 	/* check for error indications - PF_xx_AxQLEN register layout for
1269 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1270 	 */
1271 	val = rd32(hw, cq->rq.len);
1272 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1273 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1274 		oldval = val;
1275 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1276 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1277 				qtype);
1278 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1279 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1280 				qtype);
1281 		}
1282 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1283 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1284 				qtype);
1285 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1286 			 PF_FW_ARQLEN_ARQCRIT_M);
1287 		if (oldval != val)
1288 			wr32(hw, cq->rq.len, val);
1289 	}
1290 
1291 	val = rd32(hw, cq->sq.len);
1292 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1293 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1294 		oldval = val;
1295 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1296 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1297 				qtype);
1298 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1299 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1300 				qtype);
1301 		}
1302 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1303 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1304 				qtype);
1305 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1306 			 PF_FW_ATQLEN_ATQCRIT_M);
1307 		if (oldval != val)
1308 			wr32(hw, cq->sq.len, val);
1309 	}
1310 
1311 	event.buf_len = cq->rq_buf_size;
1312 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1313 	if (!event.msg_buf)
1314 		return 0;
1315 
1316 	do {
1317 		enum ice_status ret;
1318 		u16 opcode;
1319 
1320 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1321 		if (ret == ICE_ERR_AQ_NO_WORK)
1322 			break;
1323 		if (ret) {
1324 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1325 				ice_stat_str(ret));
1326 			break;
1327 		}
1328 
1329 		opcode = le16_to_cpu(event.desc.opcode);
1330 
1331 		/* Notify any thread that might be waiting for this event */
1332 		ice_aq_check_events(pf, opcode, &event);
1333 
1334 		switch (opcode) {
1335 		case ice_aqc_opc_get_link_status:
1336 			if (ice_handle_link_event(pf, &event))
1337 				dev_err(dev, "Could not handle link event\n");
1338 			break;
1339 		case ice_aqc_opc_event_lan_overflow:
1340 			ice_vf_lan_overflow_event(pf, &event);
1341 			break;
1342 		case ice_mbx_opc_send_msg_to_pf:
1343 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1344 				ice_vc_process_vf_msg(pf, &event);
1345 			break;
1346 		case ice_aqc_opc_fw_logging:
1347 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1348 			break;
1349 		case ice_aqc_opc_lldp_set_mib_change:
1350 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1351 			break;
1352 		default:
1353 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1354 				qtype, opcode);
1355 			break;
1356 		}
1357 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1358 
1359 	kfree(event.msg_buf);
1360 
1361 	return pending && (i == ICE_DFLT_IRQ_WORK);
1362 }
1363 
1364 /**
1365  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1366  * @hw: pointer to hardware info
1367  * @cq: control queue information
1368  *
1369  * returns true if there are pending messages in a queue, false if there aren't
1370  */
1371 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1372 {
1373 	u16 ntu;
1374 
1375 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1376 	return cq->rq.next_to_clean != ntu;
1377 }
1378 
1379 /**
1380  * ice_clean_adminq_subtask - clean the AdminQ rings
1381  * @pf: board private structure
1382  */
1383 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1384 {
1385 	struct ice_hw *hw = &pf->hw;
1386 
1387 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1388 		return;
1389 
1390 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1391 		return;
1392 
1393 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1394 
1395 	/* There might be a situation where new messages arrive to a control
1396 	 * queue between processing the last message and clearing the
1397 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1398 	 * ice_ctrlq_pending) and process new messages if any.
1399 	 */
1400 	if (ice_ctrlq_pending(hw, &hw->adminq))
1401 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1402 
1403 	ice_flush(hw);
1404 }
1405 
1406 /**
1407  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1408  * @pf: board private structure
1409  */
1410 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1411 {
1412 	struct ice_hw *hw = &pf->hw;
1413 
1414 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1415 		return;
1416 
1417 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1418 		return;
1419 
1420 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1421 
1422 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1423 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1424 
1425 	ice_flush(hw);
1426 }
1427 
1428 /**
1429  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1430  * @pf: board private structure
1431  */
1432 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1433 {
1434 	struct ice_hw *hw = &pf->hw;
1435 
1436 	/* Nothing to do here if sideband queue is not supported */
1437 	if (!ice_is_sbq_supported(hw)) {
1438 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1439 		return;
1440 	}
1441 
1442 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1443 		return;
1444 
1445 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1446 		return;
1447 
1448 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1449 
1450 	if (ice_ctrlq_pending(hw, &hw->sbq))
1451 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1452 
1453 	ice_flush(hw);
1454 }
1455 
1456 /**
1457  * ice_service_task_schedule - schedule the service task to wake up
1458  * @pf: board private structure
1459  *
1460  * If not already scheduled, this puts the task into the work queue.
1461  */
1462 void ice_service_task_schedule(struct ice_pf *pf)
1463 {
1464 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1465 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1466 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1467 		queue_work(ice_wq, &pf->serv_task);
1468 }
1469 
1470 /**
1471  * ice_service_task_complete - finish up the service task
1472  * @pf: board private structure
1473  */
1474 static void ice_service_task_complete(struct ice_pf *pf)
1475 {
1476 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1477 
1478 	/* force memory (pf->state) to sync before next service task */
1479 	smp_mb__before_atomic();
1480 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1481 }
1482 
1483 /**
1484  * ice_service_task_stop - stop service task and cancel works
1485  * @pf: board private structure
1486  *
1487  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1488  * 1 otherwise.
1489  */
1490 static int ice_service_task_stop(struct ice_pf *pf)
1491 {
1492 	int ret;
1493 
1494 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1495 
1496 	if (pf->serv_tmr.function)
1497 		del_timer_sync(&pf->serv_tmr);
1498 	if (pf->serv_task.func)
1499 		cancel_work_sync(&pf->serv_task);
1500 
1501 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1502 	return ret;
1503 }
1504 
1505 /**
1506  * ice_service_task_restart - restart service task and schedule works
1507  * @pf: board private structure
1508  *
1509  * This function is needed for suspend and resume works (e.g WoL scenario)
1510  */
1511 static void ice_service_task_restart(struct ice_pf *pf)
1512 {
1513 	clear_bit(ICE_SERVICE_DIS, pf->state);
1514 	ice_service_task_schedule(pf);
1515 }
1516 
1517 /**
1518  * ice_service_timer - timer callback to schedule service task
1519  * @t: pointer to timer_list
1520  */
1521 static void ice_service_timer(struct timer_list *t)
1522 {
1523 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1524 
1525 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1526 	ice_service_task_schedule(pf);
1527 }
1528 
1529 /**
1530  * ice_handle_mdd_event - handle malicious driver detect event
1531  * @pf: pointer to the PF structure
1532  *
1533  * Called from service task. OICR interrupt handler indicates MDD event.
1534  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1535  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1536  * disable the queue, the PF can be configured to reset the VF using ethtool
1537  * private flag mdd-auto-reset-vf.
1538  */
1539 static void ice_handle_mdd_event(struct ice_pf *pf)
1540 {
1541 	struct device *dev = ice_pf_to_dev(pf);
1542 	struct ice_hw *hw = &pf->hw;
1543 	unsigned int i;
1544 	u32 reg;
1545 
1546 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1547 		/* Since the VF MDD event logging is rate limited, check if
1548 		 * there are pending MDD events.
1549 		 */
1550 		ice_print_vfs_mdd_events(pf);
1551 		return;
1552 	}
1553 
1554 	/* find what triggered an MDD event */
1555 	reg = rd32(hw, GL_MDET_TX_PQM);
1556 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1557 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1558 				GL_MDET_TX_PQM_PF_NUM_S;
1559 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1560 				GL_MDET_TX_PQM_VF_NUM_S;
1561 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1562 				GL_MDET_TX_PQM_MAL_TYPE_S;
1563 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1564 				GL_MDET_TX_PQM_QNUM_S);
1565 
1566 		if (netif_msg_tx_err(pf))
1567 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1568 				 event, queue, pf_num, vf_num);
1569 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1570 	}
1571 
1572 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1573 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1574 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1575 				GL_MDET_TX_TCLAN_PF_NUM_S;
1576 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1577 				GL_MDET_TX_TCLAN_VF_NUM_S;
1578 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1579 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1580 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1581 				GL_MDET_TX_TCLAN_QNUM_S);
1582 
1583 		if (netif_msg_tx_err(pf))
1584 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1585 				 event, queue, pf_num, vf_num);
1586 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1587 	}
1588 
1589 	reg = rd32(hw, GL_MDET_RX);
1590 	if (reg & GL_MDET_RX_VALID_M) {
1591 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1592 				GL_MDET_RX_PF_NUM_S;
1593 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1594 				GL_MDET_RX_VF_NUM_S;
1595 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1596 				GL_MDET_RX_MAL_TYPE_S;
1597 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1598 				GL_MDET_RX_QNUM_S);
1599 
1600 		if (netif_msg_rx_err(pf))
1601 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1602 				 event, queue, pf_num, vf_num);
1603 		wr32(hw, GL_MDET_RX, 0xffffffff);
1604 	}
1605 
1606 	/* check to see if this PF caused an MDD event */
1607 	reg = rd32(hw, PF_MDET_TX_PQM);
1608 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1609 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1610 		if (netif_msg_tx_err(pf))
1611 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1612 	}
1613 
1614 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1615 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1616 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1617 		if (netif_msg_tx_err(pf))
1618 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1619 	}
1620 
1621 	reg = rd32(hw, PF_MDET_RX);
1622 	if (reg & PF_MDET_RX_VALID_M) {
1623 		wr32(hw, PF_MDET_RX, 0xFFFF);
1624 		if (netif_msg_rx_err(pf))
1625 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1626 	}
1627 
1628 	/* Check to see if one of the VFs caused an MDD event, and then
1629 	 * increment counters and set print pending
1630 	 */
1631 	ice_for_each_vf(pf, i) {
1632 		struct ice_vf *vf = &pf->vf[i];
1633 
1634 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1635 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1636 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1637 			vf->mdd_tx_events.count++;
1638 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1639 			if (netif_msg_tx_err(pf))
1640 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1641 					 i);
1642 		}
1643 
1644 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1645 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1646 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1647 			vf->mdd_tx_events.count++;
1648 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1649 			if (netif_msg_tx_err(pf))
1650 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1651 					 i);
1652 		}
1653 
1654 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1655 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1656 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1657 			vf->mdd_tx_events.count++;
1658 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1659 			if (netif_msg_tx_err(pf))
1660 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1661 					 i);
1662 		}
1663 
1664 		reg = rd32(hw, VP_MDET_RX(i));
1665 		if (reg & VP_MDET_RX_VALID_M) {
1666 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1667 			vf->mdd_rx_events.count++;
1668 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1669 			if (netif_msg_rx_err(pf))
1670 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1671 					 i);
1672 
1673 			/* Since the queue is disabled on VF Rx MDD events, the
1674 			 * PF can be configured to reset the VF through ethtool
1675 			 * private flag mdd-auto-reset-vf.
1676 			 */
1677 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1678 				/* VF MDD event counters will be cleared by
1679 				 * reset, so print the event prior to reset.
1680 				 */
1681 				ice_print_vf_rx_mdd_event(vf);
1682 				ice_reset_vf(&pf->vf[i], false);
1683 			}
1684 		}
1685 	}
1686 
1687 	ice_print_vfs_mdd_events(pf);
1688 }
1689 
1690 /**
1691  * ice_force_phys_link_state - Force the physical link state
1692  * @vsi: VSI to force the physical link state to up/down
1693  * @link_up: true/false indicates to set the physical link to up/down
1694  *
1695  * Force the physical link state by getting the current PHY capabilities from
1696  * hardware and setting the PHY config based on the determined capabilities. If
1697  * link changes a link event will be triggered because both the Enable Automatic
1698  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1699  *
1700  * Returns 0 on success, negative on failure
1701  */
1702 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1703 {
1704 	struct ice_aqc_get_phy_caps_data *pcaps;
1705 	struct ice_aqc_set_phy_cfg_data *cfg;
1706 	struct ice_port_info *pi;
1707 	struct device *dev;
1708 	int retcode;
1709 
1710 	if (!vsi || !vsi->port_info || !vsi->back)
1711 		return -EINVAL;
1712 	if (vsi->type != ICE_VSI_PF)
1713 		return 0;
1714 
1715 	dev = ice_pf_to_dev(vsi->back);
1716 
1717 	pi = vsi->port_info;
1718 
1719 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1720 	if (!pcaps)
1721 		return -ENOMEM;
1722 
1723 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1724 				      NULL);
1725 	if (retcode) {
1726 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1727 			vsi->vsi_num, retcode);
1728 		retcode = -EIO;
1729 		goto out;
1730 	}
1731 
1732 	/* No change in link */
1733 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1734 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1735 		goto out;
1736 
1737 	/* Use the current user PHY configuration. The current user PHY
1738 	 * configuration is initialized during probe from PHY capabilities
1739 	 * software mode, and updated on set PHY configuration.
1740 	 */
1741 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1742 	if (!cfg) {
1743 		retcode = -ENOMEM;
1744 		goto out;
1745 	}
1746 
1747 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1748 	if (link_up)
1749 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1750 	else
1751 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1752 
1753 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1754 	if (retcode) {
1755 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1756 			vsi->vsi_num, retcode);
1757 		retcode = -EIO;
1758 	}
1759 
1760 	kfree(cfg);
1761 out:
1762 	kfree(pcaps);
1763 	return retcode;
1764 }
1765 
1766 /**
1767  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1768  * @pi: port info structure
1769  *
1770  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1771  */
1772 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1773 {
1774 	struct ice_aqc_get_phy_caps_data *pcaps;
1775 	struct ice_pf *pf = pi->hw->back;
1776 	enum ice_status status;
1777 	int err = 0;
1778 
1779 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1780 	if (!pcaps)
1781 		return -ENOMEM;
1782 
1783 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1784 				     NULL);
1785 
1786 	if (status) {
1787 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1788 		err = -EIO;
1789 		goto out;
1790 	}
1791 
1792 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1793 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1794 
1795 out:
1796 	kfree(pcaps);
1797 	return err;
1798 }
1799 
1800 /**
1801  * ice_init_link_dflt_override - Initialize link default override
1802  * @pi: port info structure
1803  *
1804  * Initialize link default override and PHY total port shutdown during probe
1805  */
1806 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1807 {
1808 	struct ice_link_default_override_tlv *ldo;
1809 	struct ice_pf *pf = pi->hw->back;
1810 
1811 	ldo = &pf->link_dflt_override;
1812 	if (ice_get_link_default_override(ldo, pi))
1813 		return;
1814 
1815 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1816 		return;
1817 
1818 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1819 	 * ethtool private flag) for ports with Port Disable bit set.
1820 	 */
1821 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1822 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1823 }
1824 
1825 /**
1826  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1827  * @pi: port info structure
1828  *
1829  * If default override is enabled, initialize the user PHY cfg speed and FEC
1830  * settings using the default override mask from the NVM.
1831  *
1832  * The PHY should only be configured with the default override settings the
1833  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1834  * is used to indicate that the user PHY cfg default override is initialized
1835  * and the PHY has not been configured with the default override settings. The
1836  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1837  * configured.
1838  *
1839  * This function should be called only if the FW doesn't support default
1840  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1841  */
1842 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1843 {
1844 	struct ice_link_default_override_tlv *ldo;
1845 	struct ice_aqc_set_phy_cfg_data *cfg;
1846 	struct ice_phy_info *phy = &pi->phy;
1847 	struct ice_pf *pf = pi->hw->back;
1848 
1849 	ldo = &pf->link_dflt_override;
1850 
1851 	/* If link default override is enabled, use to mask NVM PHY capabilities
1852 	 * for speed and FEC default configuration.
1853 	 */
1854 	cfg = &phy->curr_user_phy_cfg;
1855 
1856 	if (ldo->phy_type_low || ldo->phy_type_high) {
1857 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1858 				    cpu_to_le64(ldo->phy_type_low);
1859 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1860 				     cpu_to_le64(ldo->phy_type_high);
1861 	}
1862 	cfg->link_fec_opt = ldo->fec_options;
1863 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1864 
1865 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1866 }
1867 
1868 /**
1869  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1870  * @pi: port info structure
1871  *
1872  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1873  * mode to default. The PHY defaults are from get PHY capabilities topology
1874  * with media so call when media is first available. An error is returned if
1875  * called when media is not available. The PHY initialization completed state is
1876  * set here.
1877  *
1878  * These configurations are used when setting PHY
1879  * configuration. The user PHY configuration is updated on set PHY
1880  * configuration. Returns 0 on success, negative on failure
1881  */
1882 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1883 {
1884 	struct ice_aqc_get_phy_caps_data *pcaps;
1885 	struct ice_phy_info *phy = &pi->phy;
1886 	struct ice_pf *pf = pi->hw->back;
1887 	enum ice_status status;
1888 	int err = 0;
1889 
1890 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1891 		return -EIO;
1892 
1893 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1894 	if (!pcaps)
1895 		return -ENOMEM;
1896 
1897 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1898 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1899 					     pcaps, NULL);
1900 	else
1901 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1902 					     pcaps, NULL);
1903 	if (status) {
1904 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1905 		err = -EIO;
1906 		goto err_out;
1907 	}
1908 
1909 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1910 
1911 	/* check if lenient mode is supported and enabled */
1912 	if (ice_fw_supports_link_override(pi->hw) &&
1913 	    !(pcaps->module_compliance_enforcement &
1914 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1915 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1916 
1917 		/* if the FW supports default PHY configuration mode, then the driver
1918 		 * does not have to apply link override settings. If not,
1919 		 * initialize user PHY configuration with link override values
1920 		 */
1921 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1922 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1923 			ice_init_phy_cfg_dflt_override(pi);
1924 			goto out;
1925 		}
1926 	}
1927 
1928 	/* if link default override is not enabled, set user flow control and
1929 	 * FEC settings based on what get_phy_caps returned
1930 	 */
1931 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1932 						      pcaps->link_fec_options);
1933 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1934 
1935 out:
1936 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1937 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1938 err_out:
1939 	kfree(pcaps);
1940 	return err;
1941 }
1942 
1943 /**
1944  * ice_configure_phy - configure PHY
1945  * @vsi: VSI of PHY
1946  *
1947  * Set the PHY configuration. If the current PHY configuration is the same as
1948  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1949  * configure the based get PHY capabilities for topology with media.
1950  */
1951 static int ice_configure_phy(struct ice_vsi *vsi)
1952 {
1953 	struct device *dev = ice_pf_to_dev(vsi->back);
1954 	struct ice_port_info *pi = vsi->port_info;
1955 	struct ice_aqc_get_phy_caps_data *pcaps;
1956 	struct ice_aqc_set_phy_cfg_data *cfg;
1957 	struct ice_phy_info *phy = &pi->phy;
1958 	struct ice_pf *pf = vsi->back;
1959 	enum ice_status status;
1960 	int err = 0;
1961 
1962 	/* Ensure we have media as we cannot configure a medialess port */
1963 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1964 		return -EPERM;
1965 
1966 	ice_print_topo_conflict(vsi);
1967 
1968 	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1969 		return -EPERM;
1970 
1971 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1972 		return ice_force_phys_link_state(vsi, true);
1973 
1974 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1975 	if (!pcaps)
1976 		return -ENOMEM;
1977 
1978 	/* Get current PHY config */
1979 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1980 				     NULL);
1981 	if (status) {
1982 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1983 			vsi->vsi_num, ice_stat_str(status));
1984 		err = -EIO;
1985 		goto done;
1986 	}
1987 
1988 	/* If PHY enable link is configured and configuration has not changed,
1989 	 * there's nothing to do
1990 	 */
1991 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1992 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1993 		goto done;
1994 
1995 	/* Use PHY topology as baseline for configuration */
1996 	memset(pcaps, 0, sizeof(*pcaps));
1997 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1998 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1999 					     pcaps, NULL);
2000 	else
2001 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2002 					     pcaps, NULL);
2003 	if (status) {
2004 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
2005 			vsi->vsi_num, ice_stat_str(status));
2006 		err = -EIO;
2007 		goto done;
2008 	}
2009 
2010 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2011 	if (!cfg) {
2012 		err = -ENOMEM;
2013 		goto done;
2014 	}
2015 
2016 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2017 
2018 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2019 	 * ice_init_phy_user_cfg_ldo.
2020 	 */
2021 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2022 			       vsi->back->state)) {
2023 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2024 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2025 	} else {
2026 		u64 phy_low = 0, phy_high = 0;
2027 
2028 		ice_update_phy_type(&phy_low, &phy_high,
2029 				    pi->phy.curr_user_speed_req);
2030 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2031 		cfg->phy_type_high = pcaps->phy_type_high &
2032 				     cpu_to_le64(phy_high);
2033 	}
2034 
2035 	/* Can't provide what was requested; use PHY capabilities */
2036 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2037 		cfg->phy_type_low = pcaps->phy_type_low;
2038 		cfg->phy_type_high = pcaps->phy_type_high;
2039 	}
2040 
2041 	/* FEC */
2042 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2043 
2044 	/* Can't provide what was requested; use PHY capabilities */
2045 	if (cfg->link_fec_opt !=
2046 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2047 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2048 		cfg->link_fec_opt = pcaps->link_fec_options;
2049 	}
2050 
2051 	/* Flow Control - always supported; no need to check against
2052 	 * capabilities
2053 	 */
2054 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2055 
2056 	/* Enable link and link update */
2057 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2058 
2059 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2060 	if (status) {
2061 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2062 			vsi->vsi_num, ice_stat_str(status));
2063 		err = -EIO;
2064 	}
2065 
2066 	kfree(cfg);
2067 done:
2068 	kfree(pcaps);
2069 	return err;
2070 }
2071 
2072 /**
2073  * ice_check_media_subtask - Check for media
2074  * @pf: pointer to PF struct
2075  *
2076  * If media is available, then initialize PHY user configuration if it is not
2077  * been, and configure the PHY if the interface is up.
2078  */
2079 static void ice_check_media_subtask(struct ice_pf *pf)
2080 {
2081 	struct ice_port_info *pi;
2082 	struct ice_vsi *vsi;
2083 	int err;
2084 
2085 	/* No need to check for media if it's already present */
2086 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2087 		return;
2088 
2089 	vsi = ice_get_main_vsi(pf);
2090 	if (!vsi)
2091 		return;
2092 
2093 	/* Refresh link info and check if media is present */
2094 	pi = vsi->port_info;
2095 	err = ice_update_link_info(pi);
2096 	if (err)
2097 		return;
2098 
2099 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2100 
2101 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2102 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2103 			ice_init_phy_user_cfg(pi);
2104 
2105 		/* PHY settings are reset on media insertion, reconfigure
2106 		 * PHY to preserve settings.
2107 		 */
2108 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2109 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2110 			return;
2111 
2112 		err = ice_configure_phy(vsi);
2113 		if (!err)
2114 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2115 
2116 		/* A Link Status Event will be generated; the event handler
2117 		 * will complete bringing the interface up
2118 		 */
2119 	}
2120 }
2121 
2122 /**
2123  * ice_service_task - manage and run subtasks
2124  * @work: pointer to work_struct contained by the PF struct
2125  */
2126 static void ice_service_task(struct work_struct *work)
2127 {
2128 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2129 	unsigned long start_time = jiffies;
2130 
2131 	/* subtasks */
2132 
2133 	/* process reset requests first */
2134 	ice_reset_subtask(pf);
2135 
2136 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2137 	if (ice_is_reset_in_progress(pf->state) ||
2138 	    test_bit(ICE_SUSPENDED, pf->state) ||
2139 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2140 		ice_service_task_complete(pf);
2141 		return;
2142 	}
2143 
2144 	ice_clean_adminq_subtask(pf);
2145 	ice_check_media_subtask(pf);
2146 	ice_check_for_hang_subtask(pf);
2147 	ice_sync_fltr_subtask(pf);
2148 	ice_handle_mdd_event(pf);
2149 	ice_watchdog_subtask(pf);
2150 
2151 	if (ice_is_safe_mode(pf)) {
2152 		ice_service_task_complete(pf);
2153 		return;
2154 	}
2155 
2156 	ice_process_vflr_event(pf);
2157 	ice_clean_mailboxq_subtask(pf);
2158 	ice_clean_sbq_subtask(pf);
2159 	ice_sync_arfs_fltrs(pf);
2160 	ice_flush_fdir_ctx(pf);
2161 
2162 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2163 	ice_service_task_complete(pf);
2164 
2165 	/* If the tasks have taken longer than one service timer period
2166 	 * or there is more work to be done, reset the service timer to
2167 	 * schedule the service task now.
2168 	 */
2169 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2170 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2171 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2172 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2173 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2174 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2175 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2176 		mod_timer(&pf->serv_tmr, jiffies);
2177 }
2178 
2179 /**
2180  * ice_set_ctrlq_len - helper function to set controlq length
2181  * @hw: pointer to the HW instance
2182  */
2183 static void ice_set_ctrlq_len(struct ice_hw *hw)
2184 {
2185 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2186 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2187 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2188 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2189 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2190 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2191 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2192 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2193 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2194 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2195 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2196 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2197 }
2198 
2199 /**
2200  * ice_schedule_reset - schedule a reset
2201  * @pf: board private structure
2202  * @reset: reset being requested
2203  */
2204 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2205 {
2206 	struct device *dev = ice_pf_to_dev(pf);
2207 
2208 	/* bail out if earlier reset has failed */
2209 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2210 		dev_dbg(dev, "earlier reset has failed\n");
2211 		return -EIO;
2212 	}
2213 	/* bail if reset/recovery already in progress */
2214 	if (ice_is_reset_in_progress(pf->state)) {
2215 		dev_dbg(dev, "Reset already in progress\n");
2216 		return -EBUSY;
2217 	}
2218 
2219 	ice_unplug_aux_dev(pf);
2220 
2221 	switch (reset) {
2222 	case ICE_RESET_PFR:
2223 		set_bit(ICE_PFR_REQ, pf->state);
2224 		break;
2225 	case ICE_RESET_CORER:
2226 		set_bit(ICE_CORER_REQ, pf->state);
2227 		break;
2228 	case ICE_RESET_GLOBR:
2229 		set_bit(ICE_GLOBR_REQ, pf->state);
2230 		break;
2231 	default:
2232 		return -EINVAL;
2233 	}
2234 
2235 	ice_service_task_schedule(pf);
2236 	return 0;
2237 }
2238 
2239 /**
2240  * ice_irq_affinity_notify - Callback for affinity changes
2241  * @notify: context as to what irq was changed
2242  * @mask: the new affinity mask
2243  *
2244  * This is a callback function used by the irq_set_affinity_notifier function
2245  * so that we may register to receive changes to the irq affinity masks.
2246  */
2247 static void
2248 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2249 			const cpumask_t *mask)
2250 {
2251 	struct ice_q_vector *q_vector =
2252 		container_of(notify, struct ice_q_vector, affinity_notify);
2253 
2254 	cpumask_copy(&q_vector->affinity_mask, mask);
2255 }
2256 
2257 /**
2258  * ice_irq_affinity_release - Callback for affinity notifier release
2259  * @ref: internal core kernel usage
2260  *
2261  * This is a callback function used by the irq_set_affinity_notifier function
2262  * to inform the current notification subscriber that they will no longer
2263  * receive notifications.
2264  */
2265 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2266 
2267 /**
2268  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2269  * @vsi: the VSI being configured
2270  */
2271 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2272 {
2273 	struct ice_hw *hw = &vsi->back->hw;
2274 	int i;
2275 
2276 	ice_for_each_q_vector(vsi, i)
2277 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2278 
2279 	ice_flush(hw);
2280 	return 0;
2281 }
2282 
2283 /**
2284  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2285  * @vsi: the VSI being configured
2286  * @basename: name for the vector
2287  */
2288 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2289 {
2290 	int q_vectors = vsi->num_q_vectors;
2291 	struct ice_pf *pf = vsi->back;
2292 	int base = vsi->base_vector;
2293 	struct device *dev;
2294 	int rx_int_idx = 0;
2295 	int tx_int_idx = 0;
2296 	int vector, err;
2297 	int irq_num;
2298 
2299 	dev = ice_pf_to_dev(pf);
2300 	for (vector = 0; vector < q_vectors; vector++) {
2301 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2302 
2303 		irq_num = pf->msix_entries[base + vector].vector;
2304 
2305 		if (q_vector->tx.ring && q_vector->rx.ring) {
2306 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2307 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2308 			tx_int_idx++;
2309 		} else if (q_vector->rx.ring) {
2310 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2311 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2312 		} else if (q_vector->tx.ring) {
2313 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2314 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2315 		} else {
2316 			/* skip this unused q_vector */
2317 			continue;
2318 		}
2319 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2320 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2321 					       IRQF_SHARED, q_vector->name,
2322 					       q_vector);
2323 		else
2324 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2325 					       0, q_vector->name, q_vector);
2326 		if (err) {
2327 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2328 				   err);
2329 			goto free_q_irqs;
2330 		}
2331 
2332 		/* register for affinity change notifications */
2333 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2334 			struct irq_affinity_notify *affinity_notify;
2335 
2336 			affinity_notify = &q_vector->affinity_notify;
2337 			affinity_notify->notify = ice_irq_affinity_notify;
2338 			affinity_notify->release = ice_irq_affinity_release;
2339 			irq_set_affinity_notifier(irq_num, affinity_notify);
2340 		}
2341 
2342 		/* assign the mask for this irq */
2343 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2344 	}
2345 
2346 	vsi->irqs_ready = true;
2347 	return 0;
2348 
2349 free_q_irqs:
2350 	while (vector) {
2351 		vector--;
2352 		irq_num = pf->msix_entries[base + vector].vector;
2353 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2354 			irq_set_affinity_notifier(irq_num, NULL);
2355 		irq_set_affinity_hint(irq_num, NULL);
2356 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2357 	}
2358 	return err;
2359 }
2360 
2361 /**
2362  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2363  * @vsi: VSI to setup Tx rings used by XDP
2364  *
2365  * Return 0 on success and negative value on error
2366  */
2367 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2368 {
2369 	struct device *dev = ice_pf_to_dev(vsi->back);
2370 	int i;
2371 
2372 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2373 		u16 xdp_q_idx = vsi->alloc_txq + i;
2374 		struct ice_ring *xdp_ring;
2375 
2376 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2377 
2378 		if (!xdp_ring)
2379 			goto free_xdp_rings;
2380 
2381 		xdp_ring->q_index = xdp_q_idx;
2382 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2383 		xdp_ring->ring_active = false;
2384 		xdp_ring->vsi = vsi;
2385 		xdp_ring->netdev = NULL;
2386 		xdp_ring->dev = dev;
2387 		xdp_ring->count = vsi->num_tx_desc;
2388 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2389 		if (ice_setup_tx_ring(xdp_ring))
2390 			goto free_xdp_rings;
2391 		ice_set_ring_xdp(xdp_ring);
2392 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2393 	}
2394 
2395 	return 0;
2396 
2397 free_xdp_rings:
2398 	for (; i >= 0; i--)
2399 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2400 			ice_free_tx_ring(vsi->xdp_rings[i]);
2401 	return -ENOMEM;
2402 }
2403 
2404 /**
2405  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2406  * @vsi: VSI to set the bpf prog on
2407  * @prog: the bpf prog pointer
2408  */
2409 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2410 {
2411 	struct bpf_prog *old_prog;
2412 	int i;
2413 
2414 	old_prog = xchg(&vsi->xdp_prog, prog);
2415 	if (old_prog)
2416 		bpf_prog_put(old_prog);
2417 
2418 	ice_for_each_rxq(vsi, i)
2419 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2420 }
2421 
2422 /**
2423  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2424  * @vsi: VSI to bring up Tx rings used by XDP
2425  * @prog: bpf program that will be assigned to VSI
2426  *
2427  * Return 0 on success and negative value on error
2428  */
2429 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2430 {
2431 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2432 	int xdp_rings_rem = vsi->num_xdp_txq;
2433 	struct ice_pf *pf = vsi->back;
2434 	struct ice_qs_cfg xdp_qs_cfg = {
2435 		.qs_mutex = &pf->avail_q_mutex,
2436 		.pf_map = pf->avail_txqs,
2437 		.pf_map_size = pf->max_pf_txqs,
2438 		.q_count = vsi->num_xdp_txq,
2439 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2440 		.vsi_map = vsi->txq_map,
2441 		.vsi_map_offset = vsi->alloc_txq,
2442 		.mapping_mode = ICE_VSI_MAP_CONTIG
2443 	};
2444 	enum ice_status status;
2445 	struct device *dev;
2446 	int i, v_idx;
2447 
2448 	dev = ice_pf_to_dev(pf);
2449 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2450 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2451 	if (!vsi->xdp_rings)
2452 		return -ENOMEM;
2453 
2454 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2455 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2456 		goto err_map_xdp;
2457 
2458 	if (ice_xdp_alloc_setup_rings(vsi))
2459 		goto clear_xdp_rings;
2460 
2461 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2462 	ice_for_each_q_vector(vsi, v_idx) {
2463 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2464 		int xdp_rings_per_v, q_id, q_base;
2465 
2466 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2467 					       vsi->num_q_vectors - v_idx);
2468 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2469 
2470 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2471 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2472 
2473 			xdp_ring->q_vector = q_vector;
2474 			xdp_ring->next = q_vector->tx.ring;
2475 			q_vector->tx.ring = xdp_ring;
2476 		}
2477 		xdp_rings_rem -= xdp_rings_per_v;
2478 	}
2479 
2480 	/* omit the scheduler update if in reset path; XDP queues will be
2481 	 * taken into account at the end of ice_vsi_rebuild, where
2482 	 * ice_cfg_vsi_lan is being called
2483 	 */
2484 	if (ice_is_reset_in_progress(pf->state))
2485 		return 0;
2486 
2487 	/* tell the Tx scheduler that right now we have
2488 	 * additional queues
2489 	 */
2490 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2491 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2492 
2493 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2494 				 max_txqs);
2495 	if (status) {
2496 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2497 			ice_stat_str(status));
2498 		goto clear_xdp_rings;
2499 	}
2500 	ice_vsi_assign_bpf_prog(vsi, prog);
2501 
2502 	return 0;
2503 clear_xdp_rings:
2504 	for (i = 0; i < vsi->num_xdp_txq; i++)
2505 		if (vsi->xdp_rings[i]) {
2506 			kfree_rcu(vsi->xdp_rings[i], rcu);
2507 			vsi->xdp_rings[i] = NULL;
2508 		}
2509 
2510 err_map_xdp:
2511 	mutex_lock(&pf->avail_q_mutex);
2512 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2513 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2514 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2515 	}
2516 	mutex_unlock(&pf->avail_q_mutex);
2517 
2518 	devm_kfree(dev, vsi->xdp_rings);
2519 	return -ENOMEM;
2520 }
2521 
2522 /**
2523  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2524  * @vsi: VSI to remove XDP rings
2525  *
2526  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2527  * resources
2528  */
2529 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2530 {
2531 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2532 	struct ice_pf *pf = vsi->back;
2533 	int i, v_idx;
2534 
2535 	/* q_vectors are freed in reset path so there's no point in detaching
2536 	 * rings; in case of rebuild being triggered not from reset bits
2537 	 * in pf->state won't be set, so additionally check first q_vector
2538 	 * against NULL
2539 	 */
2540 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2541 		goto free_qmap;
2542 
2543 	ice_for_each_q_vector(vsi, v_idx) {
2544 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2545 		struct ice_ring *ring;
2546 
2547 		ice_for_each_ring(ring, q_vector->tx)
2548 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2549 				break;
2550 
2551 		/* restore the value of last node prior to XDP setup */
2552 		q_vector->tx.ring = ring;
2553 	}
2554 
2555 free_qmap:
2556 	mutex_lock(&pf->avail_q_mutex);
2557 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2558 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2559 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2560 	}
2561 	mutex_unlock(&pf->avail_q_mutex);
2562 
2563 	for (i = 0; i < vsi->num_xdp_txq; i++)
2564 		if (vsi->xdp_rings[i]) {
2565 			if (vsi->xdp_rings[i]->desc)
2566 				ice_free_tx_ring(vsi->xdp_rings[i]);
2567 			kfree_rcu(vsi->xdp_rings[i], rcu);
2568 			vsi->xdp_rings[i] = NULL;
2569 		}
2570 
2571 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2572 	vsi->xdp_rings = NULL;
2573 
2574 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2575 		return 0;
2576 
2577 	ice_vsi_assign_bpf_prog(vsi, NULL);
2578 
2579 	/* notify Tx scheduler that we destroyed XDP queues and bring
2580 	 * back the old number of child nodes
2581 	 */
2582 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2583 		max_txqs[i] = vsi->num_txq;
2584 
2585 	/* change number of XDP Tx queues to 0 */
2586 	vsi->num_xdp_txq = 0;
2587 
2588 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2589 			       max_txqs);
2590 }
2591 
2592 /**
2593  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2594  * @vsi: VSI to schedule napi on
2595  */
2596 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2597 {
2598 	int i;
2599 
2600 	ice_for_each_rxq(vsi, i) {
2601 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2602 
2603 		if (rx_ring->xsk_pool)
2604 			napi_schedule(&rx_ring->q_vector->napi);
2605 	}
2606 }
2607 
2608 /**
2609  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2610  * @vsi: VSI to setup XDP for
2611  * @prog: XDP program
2612  * @extack: netlink extended ack
2613  */
2614 static int
2615 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2616 		   struct netlink_ext_ack *extack)
2617 {
2618 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2619 	bool if_running = netif_running(vsi->netdev);
2620 	int ret = 0, xdp_ring_err = 0;
2621 
2622 	if (frame_size > vsi->rx_buf_len) {
2623 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2624 		return -EOPNOTSUPP;
2625 	}
2626 
2627 	/* need to stop netdev while setting up the program for Rx rings */
2628 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2629 		ret = ice_down(vsi);
2630 		if (ret) {
2631 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2632 			return ret;
2633 		}
2634 	}
2635 
2636 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2637 		vsi->num_xdp_txq = vsi->alloc_rxq;
2638 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2639 		if (xdp_ring_err)
2640 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2641 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2642 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2643 		if (xdp_ring_err)
2644 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2645 	} else {
2646 		ice_vsi_assign_bpf_prog(vsi, prog);
2647 	}
2648 
2649 	if (if_running)
2650 		ret = ice_up(vsi);
2651 
2652 	if (!ret && prog)
2653 		ice_vsi_rx_napi_schedule(vsi);
2654 
2655 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2656 }
2657 
2658 /**
2659  * ice_xdp_safe_mode - XDP handler for safe mode
2660  * @dev: netdevice
2661  * @xdp: XDP command
2662  */
2663 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2664 			     struct netdev_bpf *xdp)
2665 {
2666 	NL_SET_ERR_MSG_MOD(xdp->extack,
2667 			   "Please provide working DDP firmware package in order to use XDP\n"
2668 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2669 	return -EOPNOTSUPP;
2670 }
2671 
2672 /**
2673  * ice_xdp - implements XDP handler
2674  * @dev: netdevice
2675  * @xdp: XDP command
2676  */
2677 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2678 {
2679 	struct ice_netdev_priv *np = netdev_priv(dev);
2680 	struct ice_vsi *vsi = np->vsi;
2681 
2682 	if (vsi->type != ICE_VSI_PF) {
2683 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2684 		return -EINVAL;
2685 	}
2686 
2687 	switch (xdp->command) {
2688 	case XDP_SETUP_PROG:
2689 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2690 	case XDP_SETUP_XSK_POOL:
2691 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2692 					  xdp->xsk.queue_id);
2693 	default:
2694 		return -EINVAL;
2695 	}
2696 }
2697 
2698 /**
2699  * ice_ena_misc_vector - enable the non-queue interrupts
2700  * @pf: board private structure
2701  */
2702 static void ice_ena_misc_vector(struct ice_pf *pf)
2703 {
2704 	struct ice_hw *hw = &pf->hw;
2705 	u32 val;
2706 
2707 	/* Disable anti-spoof detection interrupt to prevent spurious event
2708 	 * interrupts during a function reset. Anti-spoof functionally is
2709 	 * still supported.
2710 	 */
2711 	val = rd32(hw, GL_MDCK_TX_TDPU);
2712 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2713 	wr32(hw, GL_MDCK_TX_TDPU, val);
2714 
2715 	/* clear things first */
2716 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2717 	rd32(hw, PFINT_OICR);		/* read to clear */
2718 
2719 	val = (PFINT_OICR_ECC_ERR_M |
2720 	       PFINT_OICR_MAL_DETECT_M |
2721 	       PFINT_OICR_GRST_M |
2722 	       PFINT_OICR_PCI_EXCEPTION_M |
2723 	       PFINT_OICR_VFLR_M |
2724 	       PFINT_OICR_HMC_ERR_M |
2725 	       PFINT_OICR_PE_PUSH_M |
2726 	       PFINT_OICR_PE_CRITERR_M);
2727 
2728 	wr32(hw, PFINT_OICR_ENA, val);
2729 
2730 	/* SW_ITR_IDX = 0, but don't change INTENA */
2731 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2732 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2733 }
2734 
2735 /**
2736  * ice_misc_intr - misc interrupt handler
2737  * @irq: interrupt number
2738  * @data: pointer to a q_vector
2739  */
2740 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2741 {
2742 	struct ice_pf *pf = (struct ice_pf *)data;
2743 	struct ice_hw *hw = &pf->hw;
2744 	irqreturn_t ret = IRQ_NONE;
2745 	struct device *dev;
2746 	u32 oicr, ena_mask;
2747 
2748 	dev = ice_pf_to_dev(pf);
2749 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2750 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2751 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2752 
2753 	oicr = rd32(hw, PFINT_OICR);
2754 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2755 
2756 	if (oicr & PFINT_OICR_SWINT_M) {
2757 		ena_mask &= ~PFINT_OICR_SWINT_M;
2758 		pf->sw_int_count++;
2759 	}
2760 
2761 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2762 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2763 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2764 	}
2765 	if (oicr & PFINT_OICR_VFLR_M) {
2766 		/* disable any further VFLR event notifications */
2767 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2768 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2769 
2770 			reg &= ~PFINT_OICR_VFLR_M;
2771 			wr32(hw, PFINT_OICR_ENA, reg);
2772 		} else {
2773 			ena_mask &= ~PFINT_OICR_VFLR_M;
2774 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2775 		}
2776 	}
2777 
2778 	if (oicr & PFINT_OICR_GRST_M) {
2779 		u32 reset;
2780 
2781 		/* we have a reset warning */
2782 		ena_mask &= ~PFINT_OICR_GRST_M;
2783 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2784 			GLGEN_RSTAT_RESET_TYPE_S;
2785 
2786 		if (reset == ICE_RESET_CORER)
2787 			pf->corer_count++;
2788 		else if (reset == ICE_RESET_GLOBR)
2789 			pf->globr_count++;
2790 		else if (reset == ICE_RESET_EMPR)
2791 			pf->empr_count++;
2792 		else
2793 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2794 
2795 		/* If a reset cycle isn't already in progress, we set a bit in
2796 		 * pf->state so that the service task can start a reset/rebuild.
2797 		 */
2798 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2799 			if (reset == ICE_RESET_CORER)
2800 				set_bit(ICE_CORER_RECV, pf->state);
2801 			else if (reset == ICE_RESET_GLOBR)
2802 				set_bit(ICE_GLOBR_RECV, pf->state);
2803 			else
2804 				set_bit(ICE_EMPR_RECV, pf->state);
2805 
2806 			/* There are couple of different bits at play here.
2807 			 * hw->reset_ongoing indicates whether the hardware is
2808 			 * in reset. This is set to true when a reset interrupt
2809 			 * is received and set back to false after the driver
2810 			 * has determined that the hardware is out of reset.
2811 			 *
2812 			 * ICE_RESET_OICR_RECV in pf->state indicates
2813 			 * that a post reset rebuild is required before the
2814 			 * driver is operational again. This is set above.
2815 			 *
2816 			 * As this is the start of the reset/rebuild cycle, set
2817 			 * both to indicate that.
2818 			 */
2819 			hw->reset_ongoing = true;
2820 		}
2821 	}
2822 
2823 	if (oicr & PFINT_OICR_TSYN_TX_M) {
2824 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2825 		ice_ptp_process_ts(pf);
2826 	}
2827 
2828 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
2829 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2830 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
2831 
2832 		/* Save EVENTs from GTSYN register */
2833 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
2834 						     GLTSYN_STAT_EVENT1_M |
2835 						     GLTSYN_STAT_EVENT2_M);
2836 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
2837 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
2838 	}
2839 
2840 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2841 	if (oicr & ICE_AUX_CRIT_ERR) {
2842 		struct iidc_event *event;
2843 
2844 		ena_mask &= ~ICE_AUX_CRIT_ERR;
2845 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2846 		if (event) {
2847 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2848 			/* report the entire OICR value to AUX driver */
2849 			event->reg = oicr;
2850 			ice_send_event_to_aux(pf, event);
2851 			kfree(event);
2852 		}
2853 	}
2854 
2855 	/* Report any remaining unexpected interrupts */
2856 	oicr &= ena_mask;
2857 	if (oicr) {
2858 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2859 		/* If a critical error is pending there is no choice but to
2860 		 * reset the device.
2861 		 */
2862 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2863 			    PFINT_OICR_ECC_ERR_M)) {
2864 			set_bit(ICE_PFR_REQ, pf->state);
2865 			ice_service_task_schedule(pf);
2866 		}
2867 	}
2868 	ret = IRQ_HANDLED;
2869 
2870 	ice_service_task_schedule(pf);
2871 	ice_irq_dynamic_ena(hw, NULL, NULL);
2872 
2873 	return ret;
2874 }
2875 
2876 /**
2877  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2878  * @hw: pointer to HW structure
2879  */
2880 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2881 {
2882 	/* disable Admin queue Interrupt causes */
2883 	wr32(hw, PFINT_FW_CTL,
2884 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2885 
2886 	/* disable Mailbox queue Interrupt causes */
2887 	wr32(hw, PFINT_MBX_CTL,
2888 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2889 
2890 	wr32(hw, PFINT_SB_CTL,
2891 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2892 
2893 	/* disable Control queue Interrupt causes */
2894 	wr32(hw, PFINT_OICR_CTL,
2895 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2896 
2897 	ice_flush(hw);
2898 }
2899 
2900 /**
2901  * ice_free_irq_msix_misc - Unroll misc vector setup
2902  * @pf: board private structure
2903  */
2904 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2905 {
2906 	struct ice_hw *hw = &pf->hw;
2907 
2908 	ice_dis_ctrlq_interrupts(hw);
2909 
2910 	/* disable OICR interrupt */
2911 	wr32(hw, PFINT_OICR_ENA, 0);
2912 	ice_flush(hw);
2913 
2914 	if (pf->msix_entries) {
2915 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2916 		devm_free_irq(ice_pf_to_dev(pf),
2917 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2918 	}
2919 
2920 	pf->num_avail_sw_msix += 1;
2921 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2922 }
2923 
2924 /**
2925  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2926  * @hw: pointer to HW structure
2927  * @reg_idx: HW vector index to associate the control queue interrupts with
2928  */
2929 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2930 {
2931 	u32 val;
2932 
2933 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2934 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2935 	wr32(hw, PFINT_OICR_CTL, val);
2936 
2937 	/* enable Admin queue Interrupt causes */
2938 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2939 	       PFINT_FW_CTL_CAUSE_ENA_M);
2940 	wr32(hw, PFINT_FW_CTL, val);
2941 
2942 	/* enable Mailbox queue Interrupt causes */
2943 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2944 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2945 	wr32(hw, PFINT_MBX_CTL, val);
2946 
2947 	/* This enables Sideband queue Interrupt causes */
2948 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
2949 	       PFINT_SB_CTL_CAUSE_ENA_M);
2950 	wr32(hw, PFINT_SB_CTL, val);
2951 
2952 	ice_flush(hw);
2953 }
2954 
2955 /**
2956  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2957  * @pf: board private structure
2958  *
2959  * This sets up the handler for MSIX 0, which is used to manage the
2960  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2961  * when in MSI or Legacy interrupt mode.
2962  */
2963 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2964 {
2965 	struct device *dev = ice_pf_to_dev(pf);
2966 	struct ice_hw *hw = &pf->hw;
2967 	int oicr_idx, err = 0;
2968 
2969 	if (!pf->int_name[0])
2970 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2971 			 dev_driver_string(dev), dev_name(dev));
2972 
2973 	/* Do not request IRQ but do enable OICR interrupt since settings are
2974 	 * lost during reset. Note that this function is called only during
2975 	 * rebuild path and not while reset is in progress.
2976 	 */
2977 	if (ice_is_reset_in_progress(pf->state))
2978 		goto skip_req_irq;
2979 
2980 	/* reserve one vector in irq_tracker for misc interrupts */
2981 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2982 	if (oicr_idx < 0)
2983 		return oicr_idx;
2984 
2985 	pf->num_avail_sw_msix -= 1;
2986 	pf->oicr_idx = (u16)oicr_idx;
2987 
2988 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2989 			       ice_misc_intr, 0, pf->int_name, pf);
2990 	if (err) {
2991 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2992 			pf->int_name, err);
2993 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2994 		pf->num_avail_sw_msix += 1;
2995 		return err;
2996 	}
2997 
2998 skip_req_irq:
2999 	ice_ena_misc_vector(pf);
3000 
3001 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3002 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3003 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3004 
3005 	ice_flush(hw);
3006 	ice_irq_dynamic_ena(hw, NULL, NULL);
3007 
3008 	return 0;
3009 }
3010 
3011 /**
3012  * ice_napi_add - register NAPI handler for the VSI
3013  * @vsi: VSI for which NAPI handler is to be registered
3014  *
3015  * This function is only called in the driver's load path. Registering the NAPI
3016  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3017  * reset/rebuild, etc.)
3018  */
3019 static void ice_napi_add(struct ice_vsi *vsi)
3020 {
3021 	int v_idx;
3022 
3023 	if (!vsi->netdev)
3024 		return;
3025 
3026 	ice_for_each_q_vector(vsi, v_idx)
3027 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3028 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3029 }
3030 
3031 /**
3032  * ice_set_ops - set netdev and ethtools ops for the given netdev
3033  * @netdev: netdev instance
3034  */
3035 static void ice_set_ops(struct net_device *netdev)
3036 {
3037 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3038 
3039 	if (ice_is_safe_mode(pf)) {
3040 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3041 		ice_set_ethtool_safe_mode_ops(netdev);
3042 		return;
3043 	}
3044 
3045 	netdev->netdev_ops = &ice_netdev_ops;
3046 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3047 	ice_set_ethtool_ops(netdev);
3048 }
3049 
3050 /**
3051  * ice_set_netdev_features - set features for the given netdev
3052  * @netdev: netdev instance
3053  */
3054 static void ice_set_netdev_features(struct net_device *netdev)
3055 {
3056 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3057 	netdev_features_t csumo_features;
3058 	netdev_features_t vlano_features;
3059 	netdev_features_t dflt_features;
3060 	netdev_features_t tso_features;
3061 
3062 	if (ice_is_safe_mode(pf)) {
3063 		/* safe mode */
3064 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3065 		netdev->hw_features = netdev->features;
3066 		return;
3067 	}
3068 
3069 	dflt_features = NETIF_F_SG	|
3070 			NETIF_F_HIGHDMA	|
3071 			NETIF_F_NTUPLE	|
3072 			NETIF_F_RXHASH;
3073 
3074 	csumo_features = NETIF_F_RXCSUM	  |
3075 			 NETIF_F_IP_CSUM  |
3076 			 NETIF_F_SCTP_CRC |
3077 			 NETIF_F_IPV6_CSUM;
3078 
3079 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3080 			 NETIF_F_HW_VLAN_CTAG_TX     |
3081 			 NETIF_F_HW_VLAN_CTAG_RX;
3082 
3083 	tso_features = NETIF_F_TSO			|
3084 		       NETIF_F_TSO_ECN			|
3085 		       NETIF_F_TSO6			|
3086 		       NETIF_F_GSO_GRE			|
3087 		       NETIF_F_GSO_UDP_TUNNEL		|
3088 		       NETIF_F_GSO_GRE_CSUM		|
3089 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3090 		       NETIF_F_GSO_PARTIAL		|
3091 		       NETIF_F_GSO_IPXIP4		|
3092 		       NETIF_F_GSO_IPXIP6		|
3093 		       NETIF_F_GSO_UDP_L4;
3094 
3095 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3096 					NETIF_F_GSO_GRE_CSUM;
3097 	/* set features that user can change */
3098 	netdev->hw_features = dflt_features | csumo_features |
3099 			      vlano_features | tso_features;
3100 
3101 	/* add support for HW_CSUM on packets with MPLS header */
3102 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3103 
3104 	/* enable features */
3105 	netdev->features |= netdev->hw_features;
3106 	/* encap and VLAN devices inherit default, csumo and tso features */
3107 	netdev->hw_enc_features |= dflt_features | csumo_features |
3108 				   tso_features;
3109 	netdev->vlan_features |= dflt_features | csumo_features |
3110 				 tso_features;
3111 }
3112 
3113 /**
3114  * ice_cfg_netdev - Allocate, configure and register a netdev
3115  * @vsi: the VSI associated with the new netdev
3116  *
3117  * Returns 0 on success, negative value on failure
3118  */
3119 static int ice_cfg_netdev(struct ice_vsi *vsi)
3120 {
3121 	struct ice_netdev_priv *np;
3122 	struct net_device *netdev;
3123 	u8 mac_addr[ETH_ALEN];
3124 
3125 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3126 				    vsi->alloc_rxq);
3127 	if (!netdev)
3128 		return -ENOMEM;
3129 
3130 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3131 	vsi->netdev = netdev;
3132 	np = netdev_priv(netdev);
3133 	np->vsi = vsi;
3134 
3135 	ice_set_netdev_features(netdev);
3136 
3137 	ice_set_ops(netdev);
3138 
3139 	if (vsi->type == ICE_VSI_PF) {
3140 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3141 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3142 		ether_addr_copy(netdev->dev_addr, mac_addr);
3143 		ether_addr_copy(netdev->perm_addr, mac_addr);
3144 	}
3145 
3146 	netdev->priv_flags |= IFF_UNICAST_FLT;
3147 
3148 	/* Setup netdev TC information */
3149 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3150 
3151 	/* setup watchdog timeout value to be 5 second */
3152 	netdev->watchdog_timeo = 5 * HZ;
3153 
3154 	netdev->min_mtu = ETH_MIN_MTU;
3155 	netdev->max_mtu = ICE_MAX_MTU;
3156 
3157 	return 0;
3158 }
3159 
3160 /**
3161  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3162  * @lut: Lookup table
3163  * @rss_table_size: Lookup table size
3164  * @rss_size: Range of queue number for hashing
3165  */
3166 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3167 {
3168 	u16 i;
3169 
3170 	for (i = 0; i < rss_table_size; i++)
3171 		lut[i] = i % rss_size;
3172 }
3173 
3174 /**
3175  * ice_pf_vsi_setup - Set up a PF VSI
3176  * @pf: board private structure
3177  * @pi: pointer to the port_info instance
3178  *
3179  * Returns pointer to the successfully allocated VSI software struct
3180  * on success, otherwise returns NULL on failure.
3181  */
3182 static struct ice_vsi *
3183 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3184 {
3185 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3186 }
3187 
3188 /**
3189  * ice_ctrl_vsi_setup - Set up a control VSI
3190  * @pf: board private structure
3191  * @pi: pointer to the port_info instance
3192  *
3193  * Returns pointer to the successfully allocated VSI software struct
3194  * on success, otherwise returns NULL on failure.
3195  */
3196 static struct ice_vsi *
3197 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3198 {
3199 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3200 }
3201 
3202 /**
3203  * ice_lb_vsi_setup - Set up a loopback VSI
3204  * @pf: board private structure
3205  * @pi: pointer to the port_info instance
3206  *
3207  * Returns pointer to the successfully allocated VSI software struct
3208  * on success, otherwise returns NULL on failure.
3209  */
3210 struct ice_vsi *
3211 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3212 {
3213 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3214 }
3215 
3216 /**
3217  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3218  * @netdev: network interface to be adjusted
3219  * @proto: unused protocol
3220  * @vid: VLAN ID to be added
3221  *
3222  * net_device_ops implementation for adding VLAN IDs
3223  */
3224 static int
3225 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3226 		    u16 vid)
3227 {
3228 	struct ice_netdev_priv *np = netdev_priv(netdev);
3229 	struct ice_vsi *vsi = np->vsi;
3230 	int ret;
3231 
3232 	/* VLAN 0 is added by default during load/reset */
3233 	if (!vid)
3234 		return 0;
3235 
3236 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3237 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3238 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3239 		if (ret)
3240 			return ret;
3241 	}
3242 
3243 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3244 	 * packets aren't pruned by the device's internal switch on Rx
3245 	 */
3246 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3247 	if (!ret)
3248 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3249 
3250 	return ret;
3251 }
3252 
3253 /**
3254  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3255  * @netdev: network interface to be adjusted
3256  * @proto: unused protocol
3257  * @vid: VLAN ID to be removed
3258  *
3259  * net_device_ops implementation for removing VLAN IDs
3260  */
3261 static int
3262 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3263 		     u16 vid)
3264 {
3265 	struct ice_netdev_priv *np = netdev_priv(netdev);
3266 	struct ice_vsi *vsi = np->vsi;
3267 	int ret;
3268 
3269 	/* don't allow removal of VLAN 0 */
3270 	if (!vid)
3271 		return 0;
3272 
3273 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3274 	 * information
3275 	 */
3276 	ret = ice_vsi_kill_vlan(vsi, vid);
3277 	if (ret)
3278 		return ret;
3279 
3280 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3281 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3282 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3283 
3284 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3285 	return ret;
3286 }
3287 
3288 /**
3289  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3290  * @pf: board private structure
3291  *
3292  * Returns 0 on success, negative value on failure
3293  */
3294 static int ice_setup_pf_sw(struct ice_pf *pf)
3295 {
3296 	struct ice_vsi *vsi;
3297 	int status = 0;
3298 
3299 	if (ice_is_reset_in_progress(pf->state))
3300 		return -EBUSY;
3301 
3302 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3303 	if (!vsi)
3304 		return -ENOMEM;
3305 
3306 	status = ice_cfg_netdev(vsi);
3307 	if (status) {
3308 		status = -ENODEV;
3309 		goto unroll_vsi_setup;
3310 	}
3311 	/* netdev has to be configured before setting frame size */
3312 	ice_vsi_cfg_frame_size(vsi);
3313 
3314 	/* Setup DCB netlink interface */
3315 	ice_dcbnl_setup(vsi);
3316 
3317 	/* registering the NAPI handler requires both the queues and
3318 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3319 	 * and ice_cfg_netdev() respectively
3320 	 */
3321 	ice_napi_add(vsi);
3322 
3323 	status = ice_set_cpu_rx_rmap(vsi);
3324 	if (status) {
3325 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3326 			vsi->vsi_num, status);
3327 		status = -EINVAL;
3328 		goto unroll_napi_add;
3329 	}
3330 	status = ice_init_mac_fltr(pf);
3331 	if (status)
3332 		goto free_cpu_rx_map;
3333 
3334 	return status;
3335 
3336 free_cpu_rx_map:
3337 	ice_free_cpu_rx_rmap(vsi);
3338 
3339 unroll_napi_add:
3340 	if (vsi) {
3341 		ice_napi_del(vsi);
3342 		if (vsi->netdev) {
3343 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3344 			free_netdev(vsi->netdev);
3345 			vsi->netdev = NULL;
3346 		}
3347 	}
3348 
3349 unroll_vsi_setup:
3350 	ice_vsi_release(vsi);
3351 	return status;
3352 }
3353 
3354 /**
3355  * ice_get_avail_q_count - Get count of queues in use
3356  * @pf_qmap: bitmap to get queue use count from
3357  * @lock: pointer to a mutex that protects access to pf_qmap
3358  * @size: size of the bitmap
3359  */
3360 static u16
3361 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3362 {
3363 	unsigned long bit;
3364 	u16 count = 0;
3365 
3366 	mutex_lock(lock);
3367 	for_each_clear_bit(bit, pf_qmap, size)
3368 		count++;
3369 	mutex_unlock(lock);
3370 
3371 	return count;
3372 }
3373 
3374 /**
3375  * ice_get_avail_txq_count - Get count of Tx queues in use
3376  * @pf: pointer to an ice_pf instance
3377  */
3378 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3379 {
3380 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3381 				     pf->max_pf_txqs);
3382 }
3383 
3384 /**
3385  * ice_get_avail_rxq_count - Get count of Rx queues in use
3386  * @pf: pointer to an ice_pf instance
3387  */
3388 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3389 {
3390 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3391 				     pf->max_pf_rxqs);
3392 }
3393 
3394 /**
3395  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3396  * @pf: board private structure to initialize
3397  */
3398 static void ice_deinit_pf(struct ice_pf *pf)
3399 {
3400 	ice_service_task_stop(pf);
3401 	mutex_destroy(&pf->sw_mutex);
3402 	mutex_destroy(&pf->tc_mutex);
3403 	mutex_destroy(&pf->avail_q_mutex);
3404 
3405 	if (pf->avail_txqs) {
3406 		bitmap_free(pf->avail_txqs);
3407 		pf->avail_txqs = NULL;
3408 	}
3409 
3410 	if (pf->avail_rxqs) {
3411 		bitmap_free(pf->avail_rxqs);
3412 		pf->avail_rxqs = NULL;
3413 	}
3414 
3415 	if (pf->ptp.clock)
3416 		ptp_clock_unregister(pf->ptp.clock);
3417 }
3418 
3419 /**
3420  * ice_set_pf_caps - set PFs capability flags
3421  * @pf: pointer to the PF instance
3422  */
3423 static void ice_set_pf_caps(struct ice_pf *pf)
3424 {
3425 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3426 
3427 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3428 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3429 	if (func_caps->common_cap.rdma) {
3430 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3431 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3432 	}
3433 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3434 	if (func_caps->common_cap.dcb)
3435 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3436 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3437 	if (func_caps->common_cap.sr_iov_1_1) {
3438 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3439 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3440 					      ICE_MAX_VF_COUNT);
3441 	}
3442 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3443 	if (func_caps->common_cap.rss_table_size)
3444 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3445 
3446 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3447 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3448 		u16 unused;
3449 
3450 		/* ctrl_vsi_idx will be set to a valid value when flow director
3451 		 * is setup by ice_init_fdir
3452 		 */
3453 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3454 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3455 		/* force guaranteed filter pool for PF */
3456 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3457 				       func_caps->fd_fltr_guar);
3458 		/* force shared filter pool for PF */
3459 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3460 				       func_caps->fd_fltr_best_effort);
3461 	}
3462 
3463 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3464 	if (func_caps->common_cap.ieee_1588)
3465 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3466 
3467 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3468 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3469 }
3470 
3471 /**
3472  * ice_init_pf - Initialize general software structures (struct ice_pf)
3473  * @pf: board private structure to initialize
3474  */
3475 static int ice_init_pf(struct ice_pf *pf)
3476 {
3477 	ice_set_pf_caps(pf);
3478 
3479 	mutex_init(&pf->sw_mutex);
3480 	mutex_init(&pf->tc_mutex);
3481 
3482 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3483 	spin_lock_init(&pf->aq_wait_lock);
3484 	init_waitqueue_head(&pf->aq_wait_queue);
3485 
3486 	init_waitqueue_head(&pf->reset_wait_queue);
3487 
3488 	/* setup service timer and periodic service task */
3489 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3490 	pf->serv_tmr_period = HZ;
3491 	INIT_WORK(&pf->serv_task, ice_service_task);
3492 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3493 
3494 	mutex_init(&pf->avail_q_mutex);
3495 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3496 	if (!pf->avail_txqs)
3497 		return -ENOMEM;
3498 
3499 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3500 	if (!pf->avail_rxqs) {
3501 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3502 		pf->avail_txqs = NULL;
3503 		return -ENOMEM;
3504 	}
3505 
3506 	return 0;
3507 }
3508 
3509 /**
3510  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3511  * @pf: board private structure
3512  *
3513  * compute the number of MSIX vectors required (v_budget) and request from
3514  * the OS. Return the number of vectors reserved or negative on failure
3515  */
3516 static int ice_ena_msix_range(struct ice_pf *pf)
3517 {
3518 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3519 	struct device *dev = ice_pf_to_dev(pf);
3520 	int needed, err, i;
3521 
3522 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3523 	num_cpus = num_online_cpus();
3524 
3525 	/* reserve for LAN miscellaneous handler */
3526 	needed = ICE_MIN_LAN_OICR_MSIX;
3527 	if (v_left < needed)
3528 		goto no_hw_vecs_left_err;
3529 	v_budget += needed;
3530 	v_left -= needed;
3531 
3532 	/* reserve for flow director */
3533 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3534 		needed = ICE_FDIR_MSIX;
3535 		if (v_left < needed)
3536 			goto no_hw_vecs_left_err;
3537 		v_budget += needed;
3538 		v_left -= needed;
3539 	}
3540 
3541 	/* total used for non-traffic vectors */
3542 	v_other = v_budget;
3543 
3544 	/* reserve vectors for LAN traffic */
3545 	needed = num_cpus;
3546 	if (v_left < needed)
3547 		goto no_hw_vecs_left_err;
3548 	pf->num_lan_msix = needed;
3549 	v_budget += needed;
3550 	v_left -= needed;
3551 
3552 	/* reserve vectors for RDMA auxiliary driver */
3553 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3554 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3555 		if (v_left < needed)
3556 			goto no_hw_vecs_left_err;
3557 		pf->num_rdma_msix = needed;
3558 		v_budget += needed;
3559 		v_left -= needed;
3560 	}
3561 
3562 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3563 					sizeof(*pf->msix_entries), GFP_KERNEL);
3564 	if (!pf->msix_entries) {
3565 		err = -ENOMEM;
3566 		goto exit_err;
3567 	}
3568 
3569 	for (i = 0; i < v_budget; i++)
3570 		pf->msix_entries[i].entry = i;
3571 
3572 	/* actually reserve the vectors */
3573 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3574 					 ICE_MIN_MSIX, v_budget);
3575 	if (v_actual < 0) {
3576 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3577 		err = v_actual;
3578 		goto msix_err;
3579 	}
3580 
3581 	if (v_actual < v_budget) {
3582 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3583 			 v_budget, v_actual);
3584 
3585 		if (v_actual < ICE_MIN_MSIX) {
3586 			/* error if we can't get minimum vectors */
3587 			pci_disable_msix(pf->pdev);
3588 			err = -ERANGE;
3589 			goto msix_err;
3590 		} else {
3591 			int v_remain = v_actual - v_other;
3592 			int v_rdma = 0, v_min_rdma = 0;
3593 
3594 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3595 				/* Need at least 1 interrupt in addition to
3596 				 * AEQ MSIX
3597 				 */
3598 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3599 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3600 			}
3601 
3602 			if (v_actual == ICE_MIN_MSIX ||
3603 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3604 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3605 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3606 
3607 				pf->num_rdma_msix = 0;
3608 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3609 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3610 				   (v_remain - v_rdma < v_rdma)) {
3611 				/* Support minimum RDMA and give remaining
3612 				 * vectors to LAN MSIX
3613 				 */
3614 				pf->num_rdma_msix = v_min_rdma;
3615 				pf->num_lan_msix = v_remain - v_min_rdma;
3616 			} else {
3617 				/* Split remaining MSIX with RDMA after
3618 				 * accounting for AEQ MSIX
3619 				 */
3620 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3621 						    ICE_RDMA_NUM_AEQ_MSIX;
3622 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3623 			}
3624 
3625 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3626 				   pf->num_lan_msix);
3627 
3628 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3629 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3630 					   pf->num_rdma_msix);
3631 		}
3632 	}
3633 
3634 	return v_actual;
3635 
3636 msix_err:
3637 	devm_kfree(dev, pf->msix_entries);
3638 	goto exit_err;
3639 
3640 no_hw_vecs_left_err:
3641 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3642 		needed, v_left);
3643 	err = -ERANGE;
3644 exit_err:
3645 	pf->num_rdma_msix = 0;
3646 	pf->num_lan_msix = 0;
3647 	return err;
3648 }
3649 
3650 /**
3651  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3652  * @pf: board private structure
3653  */
3654 static void ice_dis_msix(struct ice_pf *pf)
3655 {
3656 	pci_disable_msix(pf->pdev);
3657 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3658 	pf->msix_entries = NULL;
3659 }
3660 
3661 /**
3662  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3663  * @pf: board private structure
3664  */
3665 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3666 {
3667 	ice_dis_msix(pf);
3668 
3669 	if (pf->irq_tracker) {
3670 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3671 		pf->irq_tracker = NULL;
3672 	}
3673 }
3674 
3675 /**
3676  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3677  * @pf: board private structure to initialize
3678  */
3679 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3680 {
3681 	int vectors;
3682 
3683 	vectors = ice_ena_msix_range(pf);
3684 
3685 	if (vectors < 0)
3686 		return vectors;
3687 
3688 	/* set up vector assignment tracking */
3689 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3690 				       struct_size(pf->irq_tracker, list, vectors),
3691 				       GFP_KERNEL);
3692 	if (!pf->irq_tracker) {
3693 		ice_dis_msix(pf);
3694 		return -ENOMEM;
3695 	}
3696 
3697 	/* populate SW interrupts pool with number of OS granted IRQs. */
3698 	pf->num_avail_sw_msix = (u16)vectors;
3699 	pf->irq_tracker->num_entries = (u16)vectors;
3700 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3701 
3702 	return 0;
3703 }
3704 
3705 /**
3706  * ice_is_wol_supported - check if WoL is supported
3707  * @hw: pointer to hardware info
3708  *
3709  * Check if WoL is supported based on the HW configuration.
3710  * Returns true if NVM supports and enables WoL for this port, false otherwise
3711  */
3712 bool ice_is_wol_supported(struct ice_hw *hw)
3713 {
3714 	u16 wol_ctrl;
3715 
3716 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3717 	 * word) indicates WoL is not supported on the corresponding PF ID.
3718 	 */
3719 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3720 		return false;
3721 
3722 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3723 }
3724 
3725 /**
3726  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3727  * @vsi: VSI being changed
3728  * @new_rx: new number of Rx queues
3729  * @new_tx: new number of Tx queues
3730  *
3731  * Only change the number of queues if new_tx, or new_rx is non-0.
3732  *
3733  * Returns 0 on success.
3734  */
3735 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3736 {
3737 	struct ice_pf *pf = vsi->back;
3738 	int err = 0, timeout = 50;
3739 
3740 	if (!new_rx && !new_tx)
3741 		return -EINVAL;
3742 
3743 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3744 		timeout--;
3745 		if (!timeout)
3746 			return -EBUSY;
3747 		usleep_range(1000, 2000);
3748 	}
3749 
3750 	if (new_tx)
3751 		vsi->req_txq = (u16)new_tx;
3752 	if (new_rx)
3753 		vsi->req_rxq = (u16)new_rx;
3754 
3755 	/* set for the next time the netdev is started */
3756 	if (!netif_running(vsi->netdev)) {
3757 		ice_vsi_rebuild(vsi, false);
3758 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3759 		goto done;
3760 	}
3761 
3762 	ice_vsi_close(vsi);
3763 	ice_vsi_rebuild(vsi, false);
3764 	ice_pf_dcb_recfg(pf);
3765 	ice_vsi_open(vsi);
3766 done:
3767 	clear_bit(ICE_CFG_BUSY, pf->state);
3768 	return err;
3769 }
3770 
3771 /**
3772  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3773  * @pf: PF to configure
3774  *
3775  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3776  * VSI can still Tx/Rx VLAN tagged packets.
3777  */
3778 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3779 {
3780 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3781 	struct ice_vsi_ctx *ctxt;
3782 	enum ice_status status;
3783 	struct ice_hw *hw;
3784 
3785 	if (!vsi)
3786 		return;
3787 
3788 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3789 	if (!ctxt)
3790 		return;
3791 
3792 	hw = &pf->hw;
3793 	ctxt->info = vsi->info;
3794 
3795 	ctxt->info.valid_sections =
3796 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3797 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3798 			    ICE_AQ_VSI_PROP_SW_VALID);
3799 
3800 	/* disable VLAN anti-spoof */
3801 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3802 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3803 
3804 	/* disable VLAN pruning and keep all other settings */
3805 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3806 
3807 	/* allow all VLANs on Tx and don't strip on Rx */
3808 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3809 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3810 
3811 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3812 	if (status) {
3813 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3814 			ice_stat_str(status),
3815 			ice_aq_str(hw->adminq.sq_last_status));
3816 	} else {
3817 		vsi->info.sec_flags = ctxt->info.sec_flags;
3818 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3819 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3820 	}
3821 
3822 	kfree(ctxt);
3823 }
3824 
3825 /**
3826  * ice_log_pkg_init - log result of DDP package load
3827  * @hw: pointer to hardware info
3828  * @status: status of package load
3829  */
3830 static void
3831 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3832 {
3833 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3834 	struct device *dev = ice_pf_to_dev(pf);
3835 
3836 	switch (*status) {
3837 	case ICE_SUCCESS:
3838 		/* The package download AdminQ command returned success because
3839 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3840 		 * already a package loaded on the device.
3841 		 */
3842 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3843 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3844 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3845 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3846 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3847 			    sizeof(hw->pkg_name))) {
3848 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3849 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3850 					 hw->active_pkg_name,
3851 					 hw->active_pkg_ver.major,
3852 					 hw->active_pkg_ver.minor,
3853 					 hw->active_pkg_ver.update,
3854 					 hw->active_pkg_ver.draft);
3855 			else
3856 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3857 					 hw->active_pkg_name,
3858 					 hw->active_pkg_ver.major,
3859 					 hw->active_pkg_ver.minor,
3860 					 hw->active_pkg_ver.update,
3861 					 hw->active_pkg_ver.draft);
3862 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3863 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3864 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3865 				hw->active_pkg_name,
3866 				hw->active_pkg_ver.major,
3867 				hw->active_pkg_ver.minor,
3868 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3869 			*status = ICE_ERR_NOT_SUPPORTED;
3870 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3871 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3872 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3873 				 hw->active_pkg_name,
3874 				 hw->active_pkg_ver.major,
3875 				 hw->active_pkg_ver.minor,
3876 				 hw->active_pkg_ver.update,
3877 				 hw->active_pkg_ver.draft,
3878 				 hw->pkg_name,
3879 				 hw->pkg_ver.major,
3880 				 hw->pkg_ver.minor,
3881 				 hw->pkg_ver.update,
3882 				 hw->pkg_ver.draft);
3883 		} else {
3884 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3885 			*status = ICE_ERR_NOT_SUPPORTED;
3886 		}
3887 		break;
3888 	case ICE_ERR_FW_DDP_MISMATCH:
3889 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3890 		break;
3891 	case ICE_ERR_BUF_TOO_SHORT:
3892 	case ICE_ERR_CFG:
3893 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3894 		break;
3895 	case ICE_ERR_NOT_SUPPORTED:
3896 		/* Package File version not supported */
3897 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3898 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3899 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3900 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3901 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3902 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3903 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3904 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3905 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3906 		break;
3907 	case ICE_ERR_AQ_ERROR:
3908 		switch (hw->pkg_dwnld_status) {
3909 		case ICE_AQ_RC_ENOSEC:
3910 		case ICE_AQ_RC_EBADSIG:
3911 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3912 			return;
3913 		case ICE_AQ_RC_ESVN:
3914 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3915 			return;
3916 		case ICE_AQ_RC_EBADMAN:
3917 		case ICE_AQ_RC_EBADBUF:
3918 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3919 			/* poll for reset to complete */
3920 			if (ice_check_reset(hw))
3921 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3922 			return;
3923 		default:
3924 			break;
3925 		}
3926 		fallthrough;
3927 	default:
3928 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3929 			*status);
3930 		break;
3931 	}
3932 }
3933 
3934 /**
3935  * ice_load_pkg - load/reload the DDP Package file
3936  * @firmware: firmware structure when firmware requested or NULL for reload
3937  * @pf: pointer to the PF instance
3938  *
3939  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3940  * initialize HW tables.
3941  */
3942 static void
3943 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3944 {
3945 	enum ice_status status = ICE_ERR_PARAM;
3946 	struct device *dev = ice_pf_to_dev(pf);
3947 	struct ice_hw *hw = &pf->hw;
3948 
3949 	/* Load DDP Package */
3950 	if (firmware && !hw->pkg_copy) {
3951 		status = ice_copy_and_init_pkg(hw, firmware->data,
3952 					       firmware->size);
3953 		ice_log_pkg_init(hw, &status);
3954 	} else if (!firmware && hw->pkg_copy) {
3955 		/* Reload package during rebuild after CORER/GLOBR reset */
3956 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3957 		ice_log_pkg_init(hw, &status);
3958 	} else {
3959 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3960 	}
3961 
3962 	if (status) {
3963 		/* Safe Mode */
3964 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3965 		return;
3966 	}
3967 
3968 	/* Successful download package is the precondition for advanced
3969 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3970 	 */
3971 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3972 }
3973 
3974 /**
3975  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3976  * @pf: pointer to the PF structure
3977  *
3978  * There is no error returned here because the driver should be able to handle
3979  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3980  * specifically with Tx.
3981  */
3982 static void ice_verify_cacheline_size(struct ice_pf *pf)
3983 {
3984 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3985 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3986 			 ICE_CACHE_LINE_BYTES);
3987 }
3988 
3989 /**
3990  * ice_send_version - update firmware with driver version
3991  * @pf: PF struct
3992  *
3993  * Returns ICE_SUCCESS on success, else error code
3994  */
3995 static enum ice_status ice_send_version(struct ice_pf *pf)
3996 {
3997 	struct ice_driver_ver dv;
3998 
3999 	dv.major_ver = 0xff;
4000 	dv.minor_ver = 0xff;
4001 	dv.build_ver = 0xff;
4002 	dv.subbuild_ver = 0;
4003 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4004 		sizeof(dv.driver_string));
4005 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4006 }
4007 
4008 /**
4009  * ice_init_fdir - Initialize flow director VSI and configuration
4010  * @pf: pointer to the PF instance
4011  *
4012  * returns 0 on success, negative on error
4013  */
4014 static int ice_init_fdir(struct ice_pf *pf)
4015 {
4016 	struct device *dev = ice_pf_to_dev(pf);
4017 	struct ice_vsi *ctrl_vsi;
4018 	int err;
4019 
4020 	/* Side Band Flow Director needs to have a control VSI.
4021 	 * Allocate it and store it in the PF.
4022 	 */
4023 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4024 	if (!ctrl_vsi) {
4025 		dev_dbg(dev, "could not create control VSI\n");
4026 		return -ENOMEM;
4027 	}
4028 
4029 	err = ice_vsi_open_ctrl(ctrl_vsi);
4030 	if (err) {
4031 		dev_dbg(dev, "could not open control VSI\n");
4032 		goto err_vsi_open;
4033 	}
4034 
4035 	mutex_init(&pf->hw.fdir_fltr_lock);
4036 
4037 	err = ice_fdir_create_dflt_rules(pf);
4038 	if (err)
4039 		goto err_fdir_rule;
4040 
4041 	return 0;
4042 
4043 err_fdir_rule:
4044 	ice_fdir_release_flows(&pf->hw);
4045 	ice_vsi_close(ctrl_vsi);
4046 err_vsi_open:
4047 	ice_vsi_release(ctrl_vsi);
4048 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4049 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4050 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4051 	}
4052 	return err;
4053 }
4054 
4055 /**
4056  * ice_get_opt_fw_name - return optional firmware file name or NULL
4057  * @pf: pointer to the PF instance
4058  */
4059 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4060 {
4061 	/* Optional firmware name same as default with additional dash
4062 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4063 	 */
4064 	struct pci_dev *pdev = pf->pdev;
4065 	char *opt_fw_filename;
4066 	u64 dsn;
4067 
4068 	/* Determine the name of the optional file using the DSN (two
4069 	 * dwords following the start of the DSN Capability).
4070 	 */
4071 	dsn = pci_get_dsn(pdev);
4072 	if (!dsn)
4073 		return NULL;
4074 
4075 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4076 	if (!opt_fw_filename)
4077 		return NULL;
4078 
4079 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4080 		 ICE_DDP_PKG_PATH, dsn);
4081 
4082 	return opt_fw_filename;
4083 }
4084 
4085 /**
4086  * ice_request_fw - Device initialization routine
4087  * @pf: pointer to the PF instance
4088  */
4089 static void ice_request_fw(struct ice_pf *pf)
4090 {
4091 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4092 	const struct firmware *firmware = NULL;
4093 	struct device *dev = ice_pf_to_dev(pf);
4094 	int err = 0;
4095 
4096 	/* optional device-specific DDP (if present) overrides the default DDP
4097 	 * package file. kernel logs a debug message if the file doesn't exist,
4098 	 * and warning messages for other errors.
4099 	 */
4100 	if (opt_fw_filename) {
4101 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4102 		if (err) {
4103 			kfree(opt_fw_filename);
4104 			goto dflt_pkg_load;
4105 		}
4106 
4107 		/* request for firmware was successful. Download to device */
4108 		ice_load_pkg(firmware, pf);
4109 		kfree(opt_fw_filename);
4110 		release_firmware(firmware);
4111 		return;
4112 	}
4113 
4114 dflt_pkg_load:
4115 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4116 	if (err) {
4117 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4118 		return;
4119 	}
4120 
4121 	/* request for firmware was successful. Download to device */
4122 	ice_load_pkg(firmware, pf);
4123 	release_firmware(firmware);
4124 }
4125 
4126 /**
4127  * ice_print_wake_reason - show the wake up cause in the log
4128  * @pf: pointer to the PF struct
4129  */
4130 static void ice_print_wake_reason(struct ice_pf *pf)
4131 {
4132 	u32 wus = pf->wakeup_reason;
4133 	const char *wake_str;
4134 
4135 	/* if no wake event, nothing to print */
4136 	if (!wus)
4137 		return;
4138 
4139 	if (wus & PFPM_WUS_LNKC_M)
4140 		wake_str = "Link\n";
4141 	else if (wus & PFPM_WUS_MAG_M)
4142 		wake_str = "Magic Packet\n";
4143 	else if (wus & PFPM_WUS_MNG_M)
4144 		wake_str = "Management\n";
4145 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4146 		wake_str = "Firmware Reset\n";
4147 	else
4148 		wake_str = "Unknown\n";
4149 
4150 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4151 }
4152 
4153 /**
4154  * ice_register_netdev - register netdev and devlink port
4155  * @pf: pointer to the PF struct
4156  */
4157 static int ice_register_netdev(struct ice_pf *pf)
4158 {
4159 	struct ice_vsi *vsi;
4160 	int err = 0;
4161 
4162 	vsi = ice_get_main_vsi(pf);
4163 	if (!vsi || !vsi->netdev)
4164 		return -EIO;
4165 
4166 	err = register_netdev(vsi->netdev);
4167 	if (err)
4168 		goto err_register_netdev;
4169 
4170 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4171 	netif_carrier_off(vsi->netdev);
4172 	netif_tx_stop_all_queues(vsi->netdev);
4173 	err = ice_devlink_create_port(vsi);
4174 	if (err)
4175 		goto err_devlink_create;
4176 
4177 	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
4178 
4179 	return 0;
4180 err_devlink_create:
4181 	unregister_netdev(vsi->netdev);
4182 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4183 err_register_netdev:
4184 	free_netdev(vsi->netdev);
4185 	vsi->netdev = NULL;
4186 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4187 	return err;
4188 }
4189 
4190 /**
4191  * ice_probe - Device initialization routine
4192  * @pdev: PCI device information struct
4193  * @ent: entry in ice_pci_tbl
4194  *
4195  * Returns 0 on success, negative on failure
4196  */
4197 static int
4198 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4199 {
4200 	struct device *dev = &pdev->dev;
4201 	struct ice_pf *pf;
4202 	struct ice_hw *hw;
4203 	int i, err;
4204 
4205 	if (pdev->is_virtfn) {
4206 		dev_err(dev, "can't probe a virtual function\n");
4207 		return -EINVAL;
4208 	}
4209 
4210 	/* this driver uses devres, see
4211 	 * Documentation/driver-api/driver-model/devres.rst
4212 	 */
4213 	err = pcim_enable_device(pdev);
4214 	if (err)
4215 		return err;
4216 
4217 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4218 	if (err) {
4219 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4220 		return err;
4221 	}
4222 
4223 	pf = ice_allocate_pf(dev);
4224 	if (!pf)
4225 		return -ENOMEM;
4226 
4227 	/* set up for high or low DMA */
4228 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4229 	if (err)
4230 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4231 	if (err) {
4232 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4233 		return err;
4234 	}
4235 
4236 	pci_enable_pcie_error_reporting(pdev);
4237 	pci_set_master(pdev);
4238 
4239 	pf->pdev = pdev;
4240 	pci_set_drvdata(pdev, pf);
4241 	set_bit(ICE_DOWN, pf->state);
4242 	/* Disable service task until DOWN bit is cleared */
4243 	set_bit(ICE_SERVICE_DIS, pf->state);
4244 
4245 	hw = &pf->hw;
4246 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4247 	pci_save_state(pdev);
4248 
4249 	hw->back = pf;
4250 	hw->vendor_id = pdev->vendor;
4251 	hw->device_id = pdev->device;
4252 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4253 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4254 	hw->subsystem_device_id = pdev->subsystem_device;
4255 	hw->bus.device = PCI_SLOT(pdev->devfn);
4256 	hw->bus.func = PCI_FUNC(pdev->devfn);
4257 	ice_set_ctrlq_len(hw);
4258 
4259 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4260 
4261 	err = ice_devlink_register(pf);
4262 	if (err) {
4263 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4264 		goto err_exit_unroll;
4265 	}
4266 
4267 #ifndef CONFIG_DYNAMIC_DEBUG
4268 	if (debug < -1)
4269 		hw->debug_mask = debug;
4270 #endif
4271 
4272 	err = ice_init_hw(hw);
4273 	if (err) {
4274 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4275 		err = -EIO;
4276 		goto err_exit_unroll;
4277 	}
4278 
4279 	ice_request_fw(pf);
4280 
4281 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4282 	 * set in pf->state, which will cause ice_is_safe_mode to return
4283 	 * true
4284 	 */
4285 	if (ice_is_safe_mode(pf)) {
4286 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4287 		/* we already got function/device capabilities but these don't
4288 		 * reflect what the driver needs to do in safe mode. Instead of
4289 		 * adding conditional logic everywhere to ignore these
4290 		 * device/function capabilities, override them.
4291 		 */
4292 		ice_set_safe_mode_caps(hw);
4293 	}
4294 
4295 	err = ice_init_pf(pf);
4296 	if (err) {
4297 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4298 		goto err_init_pf_unroll;
4299 	}
4300 
4301 	ice_devlink_init_regions(pf);
4302 
4303 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4304 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4305 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4306 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4307 	i = 0;
4308 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4309 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4310 			pf->hw.tnl.valid_count[TNL_VXLAN];
4311 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4312 			UDP_TUNNEL_TYPE_VXLAN;
4313 		i++;
4314 	}
4315 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4316 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4317 			pf->hw.tnl.valid_count[TNL_GENEVE];
4318 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4319 			UDP_TUNNEL_TYPE_GENEVE;
4320 		i++;
4321 	}
4322 
4323 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4324 	if (!pf->num_alloc_vsi) {
4325 		err = -EIO;
4326 		goto err_init_pf_unroll;
4327 	}
4328 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4329 		dev_warn(&pf->pdev->dev,
4330 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4331 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4332 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4333 	}
4334 
4335 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4336 			       GFP_KERNEL);
4337 	if (!pf->vsi) {
4338 		err = -ENOMEM;
4339 		goto err_init_pf_unroll;
4340 	}
4341 
4342 	err = ice_init_interrupt_scheme(pf);
4343 	if (err) {
4344 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4345 		err = -EIO;
4346 		goto err_init_vsi_unroll;
4347 	}
4348 
4349 	/* In case of MSIX we are going to setup the misc vector right here
4350 	 * to handle admin queue events etc. In case of legacy and MSI
4351 	 * the misc functionality and queue processing is combined in
4352 	 * the same vector and that gets setup at open.
4353 	 */
4354 	err = ice_req_irq_msix_misc(pf);
4355 	if (err) {
4356 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4357 		goto err_init_interrupt_unroll;
4358 	}
4359 
4360 	/* create switch struct for the switch element created by FW on boot */
4361 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4362 	if (!pf->first_sw) {
4363 		err = -ENOMEM;
4364 		goto err_msix_misc_unroll;
4365 	}
4366 
4367 	if (hw->evb_veb)
4368 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4369 	else
4370 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4371 
4372 	pf->first_sw->pf = pf;
4373 
4374 	/* record the sw_id available for later use */
4375 	pf->first_sw->sw_id = hw->port_info->sw_id;
4376 
4377 	err = ice_setup_pf_sw(pf);
4378 	if (err) {
4379 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4380 		goto err_alloc_sw_unroll;
4381 	}
4382 
4383 	clear_bit(ICE_SERVICE_DIS, pf->state);
4384 
4385 	/* tell the firmware we are up */
4386 	err = ice_send_version(pf);
4387 	if (err) {
4388 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4389 			UTS_RELEASE, err);
4390 		goto err_send_version_unroll;
4391 	}
4392 
4393 	/* since everything is good, start the service timer */
4394 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4395 
4396 	err = ice_init_link_events(pf->hw.port_info);
4397 	if (err) {
4398 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4399 		goto err_send_version_unroll;
4400 	}
4401 
4402 	/* not a fatal error if this fails */
4403 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4404 	if (err)
4405 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4406 
4407 	/* not a fatal error if this fails */
4408 	err = ice_update_link_info(pf->hw.port_info);
4409 	if (err)
4410 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4411 
4412 	ice_init_link_dflt_override(pf->hw.port_info);
4413 
4414 	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
4415 
4416 	/* if media available, initialize PHY settings */
4417 	if (pf->hw.port_info->phy.link_info.link_info &
4418 	    ICE_AQ_MEDIA_AVAILABLE) {
4419 		/* not a fatal error if this fails */
4420 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4421 		if (err)
4422 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4423 
4424 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4425 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4426 
4427 			if (vsi)
4428 				ice_configure_phy(vsi);
4429 		}
4430 	} else {
4431 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4432 	}
4433 
4434 	ice_verify_cacheline_size(pf);
4435 
4436 	/* Save wakeup reason register for later use */
4437 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4438 
4439 	/* check for a power management event */
4440 	ice_print_wake_reason(pf);
4441 
4442 	/* clear wake status, all bits */
4443 	wr32(hw, PFPM_WUS, U32_MAX);
4444 
4445 	/* Disable WoL at init, wait for user to enable */
4446 	device_set_wakeup_enable(dev, false);
4447 
4448 	if (ice_is_safe_mode(pf)) {
4449 		ice_set_safe_mode_vlan_cfg(pf);
4450 		goto probe_done;
4451 	}
4452 
4453 	/* initialize DDP driven features */
4454 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4455 		ice_ptp_init(pf);
4456 
4457 	/* Note: Flow director init failure is non-fatal to load */
4458 	if (ice_init_fdir(pf))
4459 		dev_err(dev, "could not initialize flow director\n");
4460 
4461 	/* Note: DCB init failure is non-fatal to load */
4462 	if (ice_init_pf_dcb(pf, false)) {
4463 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4464 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4465 	} else {
4466 		ice_cfg_lldp_mib_change(&pf->hw, true);
4467 	}
4468 
4469 	if (ice_init_lag(pf))
4470 		dev_warn(dev, "Failed to init link aggregation support\n");
4471 
4472 	/* print PCI link speed and width */
4473 	pcie_print_link_status(pf->pdev);
4474 
4475 probe_done:
4476 	err = ice_register_netdev(pf);
4477 	if (err)
4478 		goto err_netdev_reg;
4479 
4480 	/* ready to go, so clear down state bit */
4481 	clear_bit(ICE_DOWN, pf->state);
4482 	if (ice_is_aux_ena(pf)) {
4483 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4484 		if (pf->aux_idx < 0) {
4485 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4486 			err = -ENOMEM;
4487 			goto err_netdev_reg;
4488 		}
4489 
4490 		err = ice_init_rdma(pf);
4491 		if (err) {
4492 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4493 			err = -EIO;
4494 			goto err_init_aux_unroll;
4495 		}
4496 	} else {
4497 		dev_warn(dev, "RDMA is not supported on this device\n");
4498 	}
4499 
4500 	return 0;
4501 
4502 err_init_aux_unroll:
4503 	pf->adev = NULL;
4504 	ida_free(&ice_aux_ida, pf->aux_idx);
4505 err_netdev_reg:
4506 err_send_version_unroll:
4507 	ice_vsi_release_all(pf);
4508 err_alloc_sw_unroll:
4509 	set_bit(ICE_SERVICE_DIS, pf->state);
4510 	set_bit(ICE_DOWN, pf->state);
4511 	devm_kfree(dev, pf->first_sw);
4512 err_msix_misc_unroll:
4513 	ice_free_irq_msix_misc(pf);
4514 err_init_interrupt_unroll:
4515 	ice_clear_interrupt_scheme(pf);
4516 err_init_vsi_unroll:
4517 	devm_kfree(dev, pf->vsi);
4518 err_init_pf_unroll:
4519 	ice_deinit_pf(pf);
4520 	ice_devlink_destroy_regions(pf);
4521 	ice_deinit_hw(hw);
4522 err_exit_unroll:
4523 	ice_devlink_unregister(pf);
4524 	pci_disable_pcie_error_reporting(pdev);
4525 	pci_disable_device(pdev);
4526 	return err;
4527 }
4528 
4529 /**
4530  * ice_set_wake - enable or disable Wake on LAN
4531  * @pf: pointer to the PF struct
4532  *
4533  * Simple helper for WoL control
4534  */
4535 static void ice_set_wake(struct ice_pf *pf)
4536 {
4537 	struct ice_hw *hw = &pf->hw;
4538 	bool wol = pf->wol_ena;
4539 
4540 	/* clear wake state, otherwise new wake events won't fire */
4541 	wr32(hw, PFPM_WUS, U32_MAX);
4542 
4543 	/* enable / disable APM wake up, no RMW needed */
4544 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4545 
4546 	/* set magic packet filter enabled */
4547 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4548 }
4549 
4550 /**
4551  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4552  * @pf: pointer to the PF struct
4553  *
4554  * Issue firmware command to enable multicast magic wake, making
4555  * sure that any locally administered address (LAA) is used for
4556  * wake, and that PF reset doesn't undo the LAA.
4557  */
4558 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4559 {
4560 	struct device *dev = ice_pf_to_dev(pf);
4561 	struct ice_hw *hw = &pf->hw;
4562 	enum ice_status status;
4563 	u8 mac_addr[ETH_ALEN];
4564 	struct ice_vsi *vsi;
4565 	u8 flags;
4566 
4567 	if (!pf->wol_ena)
4568 		return;
4569 
4570 	vsi = ice_get_main_vsi(pf);
4571 	if (!vsi)
4572 		return;
4573 
4574 	/* Get current MAC address in case it's an LAA */
4575 	if (vsi->netdev)
4576 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4577 	else
4578 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4579 
4580 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4581 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4582 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4583 
4584 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4585 	if (status)
4586 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4587 			ice_stat_str(status),
4588 			ice_aq_str(hw->adminq.sq_last_status));
4589 }
4590 
4591 /**
4592  * ice_remove - Device removal routine
4593  * @pdev: PCI device information struct
4594  */
4595 static void ice_remove(struct pci_dev *pdev)
4596 {
4597 	struct ice_pf *pf = pci_get_drvdata(pdev);
4598 	int i;
4599 
4600 	if (!pf)
4601 		return;
4602 
4603 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4604 		if (!ice_is_reset_in_progress(pf->state))
4605 			break;
4606 		msleep(100);
4607 	}
4608 
4609 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4610 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4611 		ice_free_vfs(pf);
4612 	}
4613 
4614 	ice_service_task_stop(pf);
4615 
4616 	ice_aq_cancel_waiting_tasks(pf);
4617 	ice_unplug_aux_dev(pf);
4618 	ida_free(&ice_aux_ida, pf->aux_idx);
4619 	set_bit(ICE_DOWN, pf->state);
4620 
4621 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4622 	ice_deinit_lag(pf);
4623 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4624 		ice_ptp_release(pf);
4625 	if (!ice_is_safe_mode(pf))
4626 		ice_remove_arfs(pf);
4627 	ice_setup_mc_magic_wake(pf);
4628 	ice_vsi_release_all(pf);
4629 	ice_set_wake(pf);
4630 	ice_free_irq_msix_misc(pf);
4631 	ice_for_each_vsi(pf, i) {
4632 		if (!pf->vsi[i])
4633 			continue;
4634 		ice_vsi_free_q_vectors(pf->vsi[i]);
4635 	}
4636 	ice_deinit_pf(pf);
4637 	ice_devlink_destroy_regions(pf);
4638 	ice_deinit_hw(&pf->hw);
4639 	ice_devlink_unregister(pf);
4640 
4641 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4642 	 * do it via ice_schedule_reset() since there is no need to rebuild
4643 	 * and the service task is already stopped.
4644 	 */
4645 	ice_reset(&pf->hw, ICE_RESET_PFR);
4646 	pci_wait_for_pending_transaction(pdev);
4647 	ice_clear_interrupt_scheme(pf);
4648 	pci_disable_pcie_error_reporting(pdev);
4649 	pci_disable_device(pdev);
4650 }
4651 
4652 /**
4653  * ice_shutdown - PCI callback for shutting down device
4654  * @pdev: PCI device information struct
4655  */
4656 static void ice_shutdown(struct pci_dev *pdev)
4657 {
4658 	struct ice_pf *pf = pci_get_drvdata(pdev);
4659 
4660 	ice_remove(pdev);
4661 
4662 	if (system_state == SYSTEM_POWER_OFF) {
4663 		pci_wake_from_d3(pdev, pf->wol_ena);
4664 		pci_set_power_state(pdev, PCI_D3hot);
4665 	}
4666 }
4667 
4668 #ifdef CONFIG_PM
4669 /**
4670  * ice_prepare_for_shutdown - prep for PCI shutdown
4671  * @pf: board private structure
4672  *
4673  * Inform or close all dependent features in prep for PCI device shutdown
4674  */
4675 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4676 {
4677 	struct ice_hw *hw = &pf->hw;
4678 	u32 v;
4679 
4680 	/* Notify VFs of impending reset */
4681 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4682 		ice_vc_notify_reset(pf);
4683 
4684 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4685 
4686 	/* disable the VSIs and their queues that are not already DOWN */
4687 	ice_pf_dis_all_vsi(pf, false);
4688 
4689 	ice_for_each_vsi(pf, v)
4690 		if (pf->vsi[v])
4691 			pf->vsi[v]->vsi_num = 0;
4692 
4693 	ice_shutdown_all_ctrlq(hw);
4694 }
4695 
4696 /**
4697  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4698  * @pf: board private structure to reinitialize
4699  *
4700  * This routine reinitialize interrupt scheme that was cleared during
4701  * power management suspend callback.
4702  *
4703  * This should be called during resume routine to re-allocate the q_vectors
4704  * and reacquire interrupts.
4705  */
4706 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4707 {
4708 	struct device *dev = ice_pf_to_dev(pf);
4709 	int ret, v;
4710 
4711 	/* Since we clear MSIX flag during suspend, we need to
4712 	 * set it back during resume...
4713 	 */
4714 
4715 	ret = ice_init_interrupt_scheme(pf);
4716 	if (ret) {
4717 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4718 		return ret;
4719 	}
4720 
4721 	/* Remap vectors and rings, after successful re-init interrupts */
4722 	ice_for_each_vsi(pf, v) {
4723 		if (!pf->vsi[v])
4724 			continue;
4725 
4726 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4727 		if (ret)
4728 			goto err_reinit;
4729 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4730 	}
4731 
4732 	ret = ice_req_irq_msix_misc(pf);
4733 	if (ret) {
4734 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4735 			ret);
4736 		goto err_reinit;
4737 	}
4738 
4739 	return 0;
4740 
4741 err_reinit:
4742 	while (v--)
4743 		if (pf->vsi[v])
4744 			ice_vsi_free_q_vectors(pf->vsi[v]);
4745 
4746 	return ret;
4747 }
4748 
4749 /**
4750  * ice_suspend
4751  * @dev: generic device information structure
4752  *
4753  * Power Management callback to quiesce the device and prepare
4754  * for D3 transition.
4755  */
4756 static int __maybe_unused ice_suspend(struct device *dev)
4757 {
4758 	struct pci_dev *pdev = to_pci_dev(dev);
4759 	struct ice_pf *pf;
4760 	int disabled, v;
4761 
4762 	pf = pci_get_drvdata(pdev);
4763 
4764 	if (!ice_pf_state_is_nominal(pf)) {
4765 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4766 		return -EBUSY;
4767 	}
4768 
4769 	/* Stop watchdog tasks until resume completion.
4770 	 * Even though it is most likely that the service task is
4771 	 * disabled if the device is suspended or down, the service task's
4772 	 * state is controlled by a different state bit, and we should
4773 	 * store and honor whatever state that bit is in at this point.
4774 	 */
4775 	disabled = ice_service_task_stop(pf);
4776 
4777 	ice_unplug_aux_dev(pf);
4778 
4779 	/* Already suspended?, then there is nothing to do */
4780 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4781 		if (!disabled)
4782 			ice_service_task_restart(pf);
4783 		return 0;
4784 	}
4785 
4786 	if (test_bit(ICE_DOWN, pf->state) ||
4787 	    ice_is_reset_in_progress(pf->state)) {
4788 		dev_err(dev, "can't suspend device in reset or already down\n");
4789 		if (!disabled)
4790 			ice_service_task_restart(pf);
4791 		return 0;
4792 	}
4793 
4794 	ice_setup_mc_magic_wake(pf);
4795 
4796 	ice_prepare_for_shutdown(pf);
4797 
4798 	ice_set_wake(pf);
4799 
4800 	/* Free vectors, clear the interrupt scheme and release IRQs
4801 	 * for proper hibernation, especially with large number of CPUs.
4802 	 * Otherwise hibernation might fail when mapping all the vectors back
4803 	 * to CPU0.
4804 	 */
4805 	ice_free_irq_msix_misc(pf);
4806 	ice_for_each_vsi(pf, v) {
4807 		if (!pf->vsi[v])
4808 			continue;
4809 		ice_vsi_free_q_vectors(pf->vsi[v]);
4810 	}
4811 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4812 	ice_clear_interrupt_scheme(pf);
4813 
4814 	pci_save_state(pdev);
4815 	pci_wake_from_d3(pdev, pf->wol_ena);
4816 	pci_set_power_state(pdev, PCI_D3hot);
4817 	return 0;
4818 }
4819 
4820 /**
4821  * ice_resume - PM callback for waking up from D3
4822  * @dev: generic device information structure
4823  */
4824 static int __maybe_unused ice_resume(struct device *dev)
4825 {
4826 	struct pci_dev *pdev = to_pci_dev(dev);
4827 	enum ice_reset_req reset_type;
4828 	struct ice_pf *pf;
4829 	struct ice_hw *hw;
4830 	int ret;
4831 
4832 	pci_set_power_state(pdev, PCI_D0);
4833 	pci_restore_state(pdev);
4834 	pci_save_state(pdev);
4835 
4836 	if (!pci_device_is_present(pdev))
4837 		return -ENODEV;
4838 
4839 	ret = pci_enable_device_mem(pdev);
4840 	if (ret) {
4841 		dev_err(dev, "Cannot enable device after suspend\n");
4842 		return ret;
4843 	}
4844 
4845 	pf = pci_get_drvdata(pdev);
4846 	hw = &pf->hw;
4847 
4848 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4849 	ice_print_wake_reason(pf);
4850 
4851 	/* We cleared the interrupt scheme when we suspended, so we need to
4852 	 * restore it now to resume device functionality.
4853 	 */
4854 	ret = ice_reinit_interrupt_scheme(pf);
4855 	if (ret)
4856 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4857 
4858 	clear_bit(ICE_DOWN, pf->state);
4859 	/* Now perform PF reset and rebuild */
4860 	reset_type = ICE_RESET_PFR;
4861 	/* re-enable service task for reset, but allow reset to schedule it */
4862 	clear_bit(ICE_SERVICE_DIS, pf->state);
4863 
4864 	if (ice_schedule_reset(pf, reset_type))
4865 		dev_err(dev, "Reset during resume failed.\n");
4866 
4867 	clear_bit(ICE_SUSPENDED, pf->state);
4868 	ice_service_task_restart(pf);
4869 
4870 	/* Restart the service task */
4871 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4872 
4873 	return 0;
4874 }
4875 #endif /* CONFIG_PM */
4876 
4877 /**
4878  * ice_pci_err_detected - warning that PCI error has been detected
4879  * @pdev: PCI device information struct
4880  * @err: the type of PCI error
4881  *
4882  * Called to warn that something happened on the PCI bus and the error handling
4883  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4884  */
4885 static pci_ers_result_t
4886 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4887 {
4888 	struct ice_pf *pf = pci_get_drvdata(pdev);
4889 
4890 	if (!pf) {
4891 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4892 			__func__, err);
4893 		return PCI_ERS_RESULT_DISCONNECT;
4894 	}
4895 
4896 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4897 		ice_service_task_stop(pf);
4898 
4899 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4900 			set_bit(ICE_PFR_REQ, pf->state);
4901 			ice_prepare_for_reset(pf);
4902 		}
4903 	}
4904 
4905 	return PCI_ERS_RESULT_NEED_RESET;
4906 }
4907 
4908 /**
4909  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4910  * @pdev: PCI device information struct
4911  *
4912  * Called to determine if the driver can recover from the PCI slot reset by
4913  * using a register read to determine if the device is recoverable.
4914  */
4915 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4916 {
4917 	struct ice_pf *pf = pci_get_drvdata(pdev);
4918 	pci_ers_result_t result;
4919 	int err;
4920 	u32 reg;
4921 
4922 	err = pci_enable_device_mem(pdev);
4923 	if (err) {
4924 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4925 			err);
4926 		result = PCI_ERS_RESULT_DISCONNECT;
4927 	} else {
4928 		pci_set_master(pdev);
4929 		pci_restore_state(pdev);
4930 		pci_save_state(pdev);
4931 		pci_wake_from_d3(pdev, false);
4932 
4933 		/* Check for life */
4934 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4935 		if (!reg)
4936 			result = PCI_ERS_RESULT_RECOVERED;
4937 		else
4938 			result = PCI_ERS_RESULT_DISCONNECT;
4939 	}
4940 
4941 	err = pci_aer_clear_nonfatal_status(pdev);
4942 	if (err)
4943 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4944 			err);
4945 		/* non-fatal, continue */
4946 
4947 	return result;
4948 }
4949 
4950 /**
4951  * ice_pci_err_resume - restart operations after PCI error recovery
4952  * @pdev: PCI device information struct
4953  *
4954  * Called to allow the driver to bring things back up after PCI error and/or
4955  * reset recovery have finished
4956  */
4957 static void ice_pci_err_resume(struct pci_dev *pdev)
4958 {
4959 	struct ice_pf *pf = pci_get_drvdata(pdev);
4960 
4961 	if (!pf) {
4962 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4963 			__func__);
4964 		return;
4965 	}
4966 
4967 	if (test_bit(ICE_SUSPENDED, pf->state)) {
4968 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4969 			__func__);
4970 		return;
4971 	}
4972 
4973 	ice_restore_all_vfs_msi_state(pdev);
4974 
4975 	ice_do_reset(pf, ICE_RESET_PFR);
4976 	ice_service_task_restart(pf);
4977 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4978 }
4979 
4980 /**
4981  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4982  * @pdev: PCI device information struct
4983  */
4984 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4985 {
4986 	struct ice_pf *pf = pci_get_drvdata(pdev);
4987 
4988 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4989 		ice_service_task_stop(pf);
4990 
4991 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4992 			set_bit(ICE_PFR_REQ, pf->state);
4993 			ice_prepare_for_reset(pf);
4994 		}
4995 	}
4996 }
4997 
4998 /**
4999  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5000  * @pdev: PCI device information struct
5001  */
5002 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5003 {
5004 	ice_pci_err_resume(pdev);
5005 }
5006 
5007 /* ice_pci_tbl - PCI Device ID Table
5008  *
5009  * Wildcard entries (PCI_ANY_ID) should come last
5010  * Last entry must be all 0s
5011  *
5012  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5013  *   Class, Class Mask, private data (not used) }
5014  */
5015 static const struct pci_device_id ice_pci_tbl[] = {
5016 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5017 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5018 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5019 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5020 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5021 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5022 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5023 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5024 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5025 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5026 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5027 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5028 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5029 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5030 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5031 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5032 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5033 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5034 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5035 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5036 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5037 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5038 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5039 	/* required last entry */
5040 	{ 0, }
5041 };
5042 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5043 
5044 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5045 
5046 static const struct pci_error_handlers ice_pci_err_handler = {
5047 	.error_detected = ice_pci_err_detected,
5048 	.slot_reset = ice_pci_err_slot_reset,
5049 	.reset_prepare = ice_pci_err_reset_prepare,
5050 	.reset_done = ice_pci_err_reset_done,
5051 	.resume = ice_pci_err_resume
5052 };
5053 
5054 static struct pci_driver ice_driver = {
5055 	.name = KBUILD_MODNAME,
5056 	.id_table = ice_pci_tbl,
5057 	.probe = ice_probe,
5058 	.remove = ice_remove,
5059 #ifdef CONFIG_PM
5060 	.driver.pm = &ice_pm_ops,
5061 #endif /* CONFIG_PM */
5062 	.shutdown = ice_shutdown,
5063 	.sriov_configure = ice_sriov_configure,
5064 	.err_handler = &ice_pci_err_handler
5065 };
5066 
5067 /**
5068  * ice_module_init - Driver registration routine
5069  *
5070  * ice_module_init is the first routine called when the driver is
5071  * loaded. All it does is register with the PCI subsystem.
5072  */
5073 static int __init ice_module_init(void)
5074 {
5075 	int status;
5076 
5077 	pr_info("%s\n", ice_driver_string);
5078 	pr_info("%s\n", ice_copyright);
5079 
5080 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5081 	if (!ice_wq) {
5082 		pr_err("Failed to create workqueue\n");
5083 		return -ENOMEM;
5084 	}
5085 
5086 	status = pci_register_driver(&ice_driver);
5087 	if (status) {
5088 		pr_err("failed to register PCI driver, err %d\n", status);
5089 		destroy_workqueue(ice_wq);
5090 	}
5091 
5092 	return status;
5093 }
5094 module_init(ice_module_init);
5095 
5096 /**
5097  * ice_module_exit - Driver exit cleanup routine
5098  *
5099  * ice_module_exit is called just before the driver is removed
5100  * from memory.
5101  */
5102 static void __exit ice_module_exit(void)
5103 {
5104 	pci_unregister_driver(&ice_driver);
5105 	destroy_workqueue(ice_wq);
5106 	pr_info("module unloaded\n");
5107 }
5108 module_exit(ice_module_exit);
5109 
5110 /**
5111  * ice_set_mac_address - NDO callback to set MAC address
5112  * @netdev: network interface device structure
5113  * @pi: pointer to an address structure
5114  *
5115  * Returns 0 on success, negative on failure
5116  */
5117 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5118 {
5119 	struct ice_netdev_priv *np = netdev_priv(netdev);
5120 	struct ice_vsi *vsi = np->vsi;
5121 	struct ice_pf *pf = vsi->back;
5122 	struct ice_hw *hw = &pf->hw;
5123 	struct sockaddr *addr = pi;
5124 	enum ice_status status;
5125 	u8 old_mac[ETH_ALEN];
5126 	u8 flags = 0;
5127 	int err = 0;
5128 	u8 *mac;
5129 
5130 	mac = (u8 *)addr->sa_data;
5131 
5132 	if (!is_valid_ether_addr(mac))
5133 		return -EADDRNOTAVAIL;
5134 
5135 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5136 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5137 		return 0;
5138 	}
5139 
5140 	if (test_bit(ICE_DOWN, pf->state) ||
5141 	    ice_is_reset_in_progress(pf->state)) {
5142 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5143 			   mac);
5144 		return -EBUSY;
5145 	}
5146 
5147 	netif_addr_lock_bh(netdev);
5148 	ether_addr_copy(old_mac, netdev->dev_addr);
5149 	/* change the netdev's MAC address */
5150 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5151 	netif_addr_unlock_bh(netdev);
5152 
5153 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5154 	status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5155 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5156 		err = -EADDRNOTAVAIL;
5157 		goto err_update_filters;
5158 	}
5159 
5160 	/* Add filter for new MAC. If filter exists, return success */
5161 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5162 	if (status == ICE_ERR_ALREADY_EXISTS)
5163 		/* Although this MAC filter is already present in hardware it's
5164 		 * possible in some cases (e.g. bonding) that dev_addr was
5165 		 * modified outside of the driver and needs to be restored back
5166 		 * to this value.
5167 		 */
5168 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5169 	else if (status)
5170 		/* error if the new filter addition failed */
5171 		err = -EADDRNOTAVAIL;
5172 
5173 err_update_filters:
5174 	if (err) {
5175 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5176 			   mac);
5177 		netif_addr_lock_bh(netdev);
5178 		ether_addr_copy(netdev->dev_addr, old_mac);
5179 		netif_addr_unlock_bh(netdev);
5180 		return err;
5181 	}
5182 
5183 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5184 		   netdev->dev_addr);
5185 
5186 	/* write new MAC address to the firmware */
5187 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5188 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5189 	if (status) {
5190 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5191 			   mac, ice_stat_str(status));
5192 	}
5193 	return 0;
5194 }
5195 
5196 /**
5197  * ice_set_rx_mode - NDO callback to set the netdev filters
5198  * @netdev: network interface device structure
5199  */
5200 static void ice_set_rx_mode(struct net_device *netdev)
5201 {
5202 	struct ice_netdev_priv *np = netdev_priv(netdev);
5203 	struct ice_vsi *vsi = np->vsi;
5204 
5205 	if (!vsi)
5206 		return;
5207 
5208 	/* Set the flags to synchronize filters
5209 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5210 	 * flags
5211 	 */
5212 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5213 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5214 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5215 
5216 	/* schedule our worker thread which will take care of
5217 	 * applying the new filter changes
5218 	 */
5219 	ice_service_task_schedule(vsi->back);
5220 }
5221 
5222 /**
5223  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5224  * @netdev: network interface device structure
5225  * @queue_index: Queue ID
5226  * @maxrate: maximum bandwidth in Mbps
5227  */
5228 static int
5229 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5230 {
5231 	struct ice_netdev_priv *np = netdev_priv(netdev);
5232 	struct ice_vsi *vsi = np->vsi;
5233 	enum ice_status status;
5234 	u16 q_handle;
5235 	u8 tc;
5236 
5237 	/* Validate maxrate requested is within permitted range */
5238 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5239 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5240 			   maxrate, queue_index);
5241 		return -EINVAL;
5242 	}
5243 
5244 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5245 	tc = ice_dcb_get_tc(vsi, queue_index);
5246 
5247 	/* Set BW back to default, when user set maxrate to 0 */
5248 	if (!maxrate)
5249 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5250 					       q_handle, ICE_MAX_BW);
5251 	else
5252 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5253 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5254 	if (status) {
5255 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5256 			   ice_stat_str(status));
5257 		return -EIO;
5258 	}
5259 
5260 	return 0;
5261 }
5262 
5263 /**
5264  * ice_fdb_add - add an entry to the hardware database
5265  * @ndm: the input from the stack
5266  * @tb: pointer to array of nladdr (unused)
5267  * @dev: the net device pointer
5268  * @addr: the MAC address entry being added
5269  * @vid: VLAN ID
5270  * @flags: instructions from stack about fdb operation
5271  * @extack: netlink extended ack
5272  */
5273 static int
5274 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5275 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5276 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5277 {
5278 	int err;
5279 
5280 	if (vid) {
5281 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5282 		return -EINVAL;
5283 	}
5284 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5285 		netdev_err(dev, "FDB only supports static addresses\n");
5286 		return -EINVAL;
5287 	}
5288 
5289 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5290 		err = dev_uc_add_excl(dev, addr);
5291 	else if (is_multicast_ether_addr(addr))
5292 		err = dev_mc_add_excl(dev, addr);
5293 	else
5294 		err = -EINVAL;
5295 
5296 	/* Only return duplicate errors if NLM_F_EXCL is set */
5297 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5298 		err = 0;
5299 
5300 	return err;
5301 }
5302 
5303 /**
5304  * ice_fdb_del - delete an entry from the hardware database
5305  * @ndm: the input from the stack
5306  * @tb: pointer to array of nladdr (unused)
5307  * @dev: the net device pointer
5308  * @addr: the MAC address entry being added
5309  * @vid: VLAN ID
5310  */
5311 static int
5312 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5313 	    struct net_device *dev, const unsigned char *addr,
5314 	    __always_unused u16 vid)
5315 {
5316 	int err;
5317 
5318 	if (ndm->ndm_state & NUD_PERMANENT) {
5319 		netdev_err(dev, "FDB only supports static addresses\n");
5320 		return -EINVAL;
5321 	}
5322 
5323 	if (is_unicast_ether_addr(addr))
5324 		err = dev_uc_del(dev, addr);
5325 	else if (is_multicast_ether_addr(addr))
5326 		err = dev_mc_del(dev, addr);
5327 	else
5328 		err = -EINVAL;
5329 
5330 	return err;
5331 }
5332 
5333 /**
5334  * ice_set_features - set the netdev feature flags
5335  * @netdev: ptr to the netdev being adjusted
5336  * @features: the feature set that the stack is suggesting
5337  */
5338 static int
5339 ice_set_features(struct net_device *netdev, netdev_features_t features)
5340 {
5341 	struct ice_netdev_priv *np = netdev_priv(netdev);
5342 	struct ice_vsi *vsi = np->vsi;
5343 	struct ice_pf *pf = vsi->back;
5344 	int ret = 0;
5345 
5346 	/* Don't set any netdev advanced features with device in Safe Mode */
5347 	if (ice_is_safe_mode(vsi->back)) {
5348 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5349 		return ret;
5350 	}
5351 
5352 	/* Do not change setting during reset */
5353 	if (ice_is_reset_in_progress(pf->state)) {
5354 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5355 		return -EBUSY;
5356 	}
5357 
5358 	/* Multiple features can be changed in one call so keep features in
5359 	 * separate if/else statements to guarantee each feature is checked
5360 	 */
5361 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5362 		ice_vsi_manage_rss_lut(vsi, true);
5363 	else if (!(features & NETIF_F_RXHASH) &&
5364 		 netdev->features & NETIF_F_RXHASH)
5365 		ice_vsi_manage_rss_lut(vsi, false);
5366 
5367 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5368 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5369 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5370 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5371 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5372 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5373 
5374 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5375 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5376 		ret = ice_vsi_manage_vlan_insertion(vsi);
5377 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5378 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5379 		ret = ice_vsi_manage_vlan_insertion(vsi);
5380 
5381 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5382 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5383 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5384 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5385 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5386 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5387 
5388 	if ((features & NETIF_F_NTUPLE) &&
5389 	    !(netdev->features & NETIF_F_NTUPLE)) {
5390 		ice_vsi_manage_fdir(vsi, true);
5391 		ice_init_arfs(vsi);
5392 	} else if (!(features & NETIF_F_NTUPLE) &&
5393 		 (netdev->features & NETIF_F_NTUPLE)) {
5394 		ice_vsi_manage_fdir(vsi, false);
5395 		ice_clear_arfs(vsi);
5396 	}
5397 
5398 	return ret;
5399 }
5400 
5401 /**
5402  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5403  * @vsi: VSI to setup VLAN properties for
5404  */
5405 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5406 {
5407 	int ret = 0;
5408 
5409 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5410 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5411 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5412 		ret = ice_vsi_manage_vlan_insertion(vsi);
5413 
5414 	return ret;
5415 }
5416 
5417 /**
5418  * ice_vsi_cfg - Setup the VSI
5419  * @vsi: the VSI being configured
5420  *
5421  * Return 0 on success and negative value on error
5422  */
5423 int ice_vsi_cfg(struct ice_vsi *vsi)
5424 {
5425 	int err;
5426 
5427 	if (vsi->netdev) {
5428 		ice_set_rx_mode(vsi->netdev);
5429 
5430 		err = ice_vsi_vlan_setup(vsi);
5431 
5432 		if (err)
5433 			return err;
5434 	}
5435 	ice_vsi_cfg_dcb_rings(vsi);
5436 
5437 	err = ice_vsi_cfg_lan_txqs(vsi);
5438 	if (!err && ice_is_xdp_ena_vsi(vsi))
5439 		err = ice_vsi_cfg_xdp_txqs(vsi);
5440 	if (!err)
5441 		err = ice_vsi_cfg_rxqs(vsi);
5442 
5443 	return err;
5444 }
5445 
5446 /* THEORY OF MODERATION:
5447  * The below code creates custom DIM profiles for use by this driver, because
5448  * the ice driver hardware works differently than the hardware that DIMLIB was
5449  * originally made for. ice hardware doesn't have packet count limits that
5450  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5451  * and this code adds that capability to be used by the driver when it's using
5452  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5453  * for how to "respond" to traffic and interrupts, so this driver uses a
5454  * slightly different set of moderation parameters to get best performance.
5455  */
5456 struct ice_dim {
5457 	/* the throttle rate for interrupts, basically worst case delay before
5458 	 * an initial interrupt fires, value is stored in microseconds.
5459 	 */
5460 	u16 itr;
5461 	/* the rate limit for interrupts, which can cap a delay from a small
5462 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5463 	 * could yield as much as 500,000 interrupts per second, but with a
5464 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5465 	 * is stored in microseconds.
5466 	 */
5467 	u16 intrl;
5468 };
5469 
5470 /* Make a different profile for Rx that doesn't allow quite so aggressive
5471  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5472  * second. The INTRL/rate parameters here are only useful to cap small ITR
5473  * values, which is why for larger ITR's - like 128, which can only generate
5474  * 8k interrupts per second, there is no point to rate limit and the values
5475  * are set to zero. The rate limit values do affect latency, and so must
5476  * be reasonably small so to not impact latency sensitive tests.
5477  */
5478 static const struct ice_dim rx_profile[] = {
5479 	{2, 10},
5480 	{8, 16},
5481 	{32, 0},
5482 	{96, 0},
5483 	{128, 0}
5484 };
5485 
5486 /* The transmit profile, which has the same sorts of values
5487  * as the previous struct
5488  */
5489 static const struct ice_dim tx_profile[] = {
5490 	{2, 10},
5491 	{8, 16},
5492 	{64, 0},
5493 	{128, 0},
5494 	{256, 0}
5495 };
5496 
5497 static void ice_tx_dim_work(struct work_struct *work)
5498 {
5499 	struct ice_ring_container *rc;
5500 	struct ice_q_vector *q_vector;
5501 	struct dim *dim;
5502 	u16 itr, intrl;
5503 
5504 	dim = container_of(work, struct dim, work);
5505 	rc = container_of(dim, struct ice_ring_container, dim);
5506 	q_vector = container_of(rc, struct ice_q_vector, tx);
5507 
5508 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5509 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5510 
5511 	/* look up the values in our local table */
5512 	itr = tx_profile[dim->profile_ix].itr;
5513 	intrl = tx_profile[dim->profile_ix].intrl;
5514 
5515 	ice_trace(tx_dim_work, q_vector, dim);
5516 	ice_write_itr(rc, itr);
5517 	ice_write_intrl(q_vector, intrl);
5518 
5519 	dim->state = DIM_START_MEASURE;
5520 }
5521 
5522 static void ice_rx_dim_work(struct work_struct *work)
5523 {
5524 	struct ice_ring_container *rc;
5525 	struct ice_q_vector *q_vector;
5526 	struct dim *dim;
5527 	u16 itr, intrl;
5528 
5529 	dim = container_of(work, struct dim, work);
5530 	rc = container_of(dim, struct ice_ring_container, dim);
5531 	q_vector = container_of(rc, struct ice_q_vector, rx);
5532 
5533 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5534 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5535 
5536 	/* look up the values in our local table */
5537 	itr = rx_profile[dim->profile_ix].itr;
5538 	intrl = rx_profile[dim->profile_ix].intrl;
5539 
5540 	ice_trace(rx_dim_work, q_vector, dim);
5541 	ice_write_itr(rc, itr);
5542 	ice_write_intrl(q_vector, intrl);
5543 
5544 	dim->state = DIM_START_MEASURE;
5545 }
5546 
5547 /**
5548  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5549  * @vsi: the VSI being configured
5550  */
5551 static void ice_napi_enable_all(struct ice_vsi *vsi)
5552 {
5553 	int q_idx;
5554 
5555 	if (!vsi->netdev)
5556 		return;
5557 
5558 	ice_for_each_q_vector(vsi, q_idx) {
5559 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5560 
5561 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5562 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5563 
5564 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5565 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5566 
5567 		if (q_vector->rx.ring || q_vector->tx.ring)
5568 			napi_enable(&q_vector->napi);
5569 	}
5570 }
5571 
5572 /**
5573  * ice_up_complete - Finish the last steps of bringing up a connection
5574  * @vsi: The VSI being configured
5575  *
5576  * Return 0 on success and negative value on error
5577  */
5578 static int ice_up_complete(struct ice_vsi *vsi)
5579 {
5580 	struct ice_pf *pf = vsi->back;
5581 	int err;
5582 
5583 	ice_vsi_cfg_msix(vsi);
5584 
5585 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5586 	 * Tx queue group list was configured and the context bits were
5587 	 * programmed using ice_vsi_cfg_txqs
5588 	 */
5589 	err = ice_vsi_start_all_rx_rings(vsi);
5590 	if (err)
5591 		return err;
5592 
5593 	clear_bit(ICE_VSI_DOWN, vsi->state);
5594 	ice_napi_enable_all(vsi);
5595 	ice_vsi_ena_irq(vsi);
5596 
5597 	if (vsi->port_info &&
5598 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5599 	    vsi->netdev) {
5600 		ice_print_link_msg(vsi, true);
5601 		netif_tx_start_all_queues(vsi->netdev);
5602 		netif_carrier_on(vsi->netdev);
5603 	}
5604 
5605 	ice_service_task_schedule(pf);
5606 
5607 	return 0;
5608 }
5609 
5610 /**
5611  * ice_up - Bring the connection back up after being down
5612  * @vsi: VSI being configured
5613  */
5614 int ice_up(struct ice_vsi *vsi)
5615 {
5616 	int err;
5617 
5618 	err = ice_vsi_cfg(vsi);
5619 	if (!err)
5620 		err = ice_up_complete(vsi);
5621 
5622 	return err;
5623 }
5624 
5625 /**
5626  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5627  * @ring: Tx or Rx ring to read stats from
5628  * @pkts: packets stats counter
5629  * @bytes: bytes stats counter
5630  *
5631  * This function fetches stats from the ring considering the atomic operations
5632  * that needs to be performed to read u64 values in 32 bit machine.
5633  */
5634 static void
5635 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5636 {
5637 	unsigned int start;
5638 	*pkts = 0;
5639 	*bytes = 0;
5640 
5641 	if (!ring)
5642 		return;
5643 	do {
5644 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5645 		*pkts = ring->stats.pkts;
5646 		*bytes = ring->stats.bytes;
5647 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5648 }
5649 
5650 /**
5651  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5652  * @vsi: the VSI to be updated
5653  * @rings: rings to work on
5654  * @count: number of rings
5655  */
5656 static void
5657 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5658 			     u16 count)
5659 {
5660 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5661 	u16 i;
5662 
5663 	for (i = 0; i < count; i++) {
5664 		struct ice_ring *ring;
5665 		u64 pkts, bytes;
5666 
5667 		ring = READ_ONCE(rings[i]);
5668 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5669 		vsi_stats->tx_packets += pkts;
5670 		vsi_stats->tx_bytes += bytes;
5671 		vsi->tx_restart += ring->tx_stats.restart_q;
5672 		vsi->tx_busy += ring->tx_stats.tx_busy;
5673 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5674 	}
5675 }
5676 
5677 /**
5678  * ice_update_vsi_ring_stats - Update VSI stats counters
5679  * @vsi: the VSI to be updated
5680  */
5681 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5682 {
5683 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5684 	u64 pkts, bytes;
5685 	int i;
5686 
5687 	/* reset netdev stats */
5688 	vsi_stats->tx_packets = 0;
5689 	vsi_stats->tx_bytes = 0;
5690 	vsi_stats->rx_packets = 0;
5691 	vsi_stats->rx_bytes = 0;
5692 
5693 	/* reset non-netdev (extended) stats */
5694 	vsi->tx_restart = 0;
5695 	vsi->tx_busy = 0;
5696 	vsi->tx_linearize = 0;
5697 	vsi->rx_buf_failed = 0;
5698 	vsi->rx_page_failed = 0;
5699 
5700 	rcu_read_lock();
5701 
5702 	/* update Tx rings counters */
5703 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5704 
5705 	/* update Rx rings counters */
5706 	ice_for_each_rxq(vsi, i) {
5707 		struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5708 
5709 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5710 		vsi_stats->rx_packets += pkts;
5711 		vsi_stats->rx_bytes += bytes;
5712 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5713 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5714 	}
5715 
5716 	/* update XDP Tx rings counters */
5717 	if (ice_is_xdp_ena_vsi(vsi))
5718 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5719 					     vsi->num_xdp_txq);
5720 
5721 	rcu_read_unlock();
5722 }
5723 
5724 /**
5725  * ice_update_vsi_stats - Update VSI stats counters
5726  * @vsi: the VSI to be updated
5727  */
5728 void ice_update_vsi_stats(struct ice_vsi *vsi)
5729 {
5730 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5731 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5732 	struct ice_pf *pf = vsi->back;
5733 
5734 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5735 	    test_bit(ICE_CFG_BUSY, pf->state))
5736 		return;
5737 
5738 	/* get stats as recorded by Tx/Rx rings */
5739 	ice_update_vsi_ring_stats(vsi);
5740 
5741 	/* get VSI stats as recorded by the hardware */
5742 	ice_update_eth_stats(vsi);
5743 
5744 	cur_ns->tx_errors = cur_es->tx_errors;
5745 	cur_ns->rx_dropped = cur_es->rx_discards;
5746 	cur_ns->tx_dropped = cur_es->tx_discards;
5747 	cur_ns->multicast = cur_es->rx_multicast;
5748 
5749 	/* update some more netdev stats if this is main VSI */
5750 	if (vsi->type == ICE_VSI_PF) {
5751 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5752 		cur_ns->rx_errors = pf->stats.crc_errors +
5753 				    pf->stats.illegal_bytes +
5754 				    pf->stats.rx_len_errors +
5755 				    pf->stats.rx_undersize +
5756 				    pf->hw_csum_rx_error +
5757 				    pf->stats.rx_jabber +
5758 				    pf->stats.rx_fragments +
5759 				    pf->stats.rx_oversize;
5760 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5761 		/* record drops from the port level */
5762 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5763 	}
5764 }
5765 
5766 /**
5767  * ice_update_pf_stats - Update PF port stats counters
5768  * @pf: PF whose stats needs to be updated
5769  */
5770 void ice_update_pf_stats(struct ice_pf *pf)
5771 {
5772 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5773 	struct ice_hw *hw = &pf->hw;
5774 	u16 fd_ctr_base;
5775 	u8 port;
5776 
5777 	port = hw->port_info->lport;
5778 	prev_ps = &pf->stats_prev;
5779 	cur_ps = &pf->stats;
5780 
5781 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5782 			  &prev_ps->eth.rx_bytes,
5783 			  &cur_ps->eth.rx_bytes);
5784 
5785 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5786 			  &prev_ps->eth.rx_unicast,
5787 			  &cur_ps->eth.rx_unicast);
5788 
5789 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5790 			  &prev_ps->eth.rx_multicast,
5791 			  &cur_ps->eth.rx_multicast);
5792 
5793 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5794 			  &prev_ps->eth.rx_broadcast,
5795 			  &cur_ps->eth.rx_broadcast);
5796 
5797 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5798 			  &prev_ps->eth.rx_discards,
5799 			  &cur_ps->eth.rx_discards);
5800 
5801 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5802 			  &prev_ps->eth.tx_bytes,
5803 			  &cur_ps->eth.tx_bytes);
5804 
5805 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5806 			  &prev_ps->eth.tx_unicast,
5807 			  &cur_ps->eth.tx_unicast);
5808 
5809 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5810 			  &prev_ps->eth.tx_multicast,
5811 			  &cur_ps->eth.tx_multicast);
5812 
5813 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5814 			  &prev_ps->eth.tx_broadcast,
5815 			  &cur_ps->eth.tx_broadcast);
5816 
5817 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5818 			  &prev_ps->tx_dropped_link_down,
5819 			  &cur_ps->tx_dropped_link_down);
5820 
5821 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5822 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5823 
5824 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5825 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5826 
5827 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5828 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5829 
5830 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5831 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5832 
5833 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5834 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5835 
5836 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5837 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5838 
5839 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5840 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5841 
5842 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5843 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5844 
5845 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5846 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5847 
5848 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5849 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5850 
5851 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5852 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5853 
5854 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5855 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5856 
5857 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5858 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5859 
5860 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5861 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5862 
5863 	fd_ctr_base = hw->fd_ctr_base;
5864 
5865 	ice_stat_update40(hw,
5866 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5867 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5868 			  &cur_ps->fd_sb_match);
5869 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5870 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5871 
5872 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5873 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5874 
5875 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5876 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5877 
5878 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5879 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5880 
5881 	ice_update_dcb_stats(pf);
5882 
5883 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5884 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5885 
5886 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5887 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5888 
5889 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5890 			  &prev_ps->mac_local_faults,
5891 			  &cur_ps->mac_local_faults);
5892 
5893 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5894 			  &prev_ps->mac_remote_faults,
5895 			  &cur_ps->mac_remote_faults);
5896 
5897 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5898 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5899 
5900 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5901 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5902 
5903 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5904 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5905 
5906 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5907 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5908 
5909 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5910 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5911 
5912 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5913 
5914 	pf->stat_prev_loaded = true;
5915 }
5916 
5917 /**
5918  * ice_get_stats64 - get statistics for network device structure
5919  * @netdev: network interface device structure
5920  * @stats: main device statistics structure
5921  */
5922 static
5923 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5924 {
5925 	struct ice_netdev_priv *np = netdev_priv(netdev);
5926 	struct rtnl_link_stats64 *vsi_stats;
5927 	struct ice_vsi *vsi = np->vsi;
5928 
5929 	vsi_stats = &vsi->net_stats;
5930 
5931 	if (!vsi->num_txq || !vsi->num_rxq)
5932 		return;
5933 
5934 	/* netdev packet/byte stats come from ring counter. These are obtained
5935 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5936 	 * But, only call the update routine and read the registers if VSI is
5937 	 * not down.
5938 	 */
5939 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5940 		ice_update_vsi_ring_stats(vsi);
5941 	stats->tx_packets = vsi_stats->tx_packets;
5942 	stats->tx_bytes = vsi_stats->tx_bytes;
5943 	stats->rx_packets = vsi_stats->rx_packets;
5944 	stats->rx_bytes = vsi_stats->rx_bytes;
5945 
5946 	/* The rest of the stats can be read from the hardware but instead we
5947 	 * just return values that the watchdog task has already obtained from
5948 	 * the hardware.
5949 	 */
5950 	stats->multicast = vsi_stats->multicast;
5951 	stats->tx_errors = vsi_stats->tx_errors;
5952 	stats->tx_dropped = vsi_stats->tx_dropped;
5953 	stats->rx_errors = vsi_stats->rx_errors;
5954 	stats->rx_dropped = vsi_stats->rx_dropped;
5955 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5956 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5957 }
5958 
5959 /**
5960  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5961  * @vsi: VSI having NAPI disabled
5962  */
5963 static void ice_napi_disable_all(struct ice_vsi *vsi)
5964 {
5965 	int q_idx;
5966 
5967 	if (!vsi->netdev)
5968 		return;
5969 
5970 	ice_for_each_q_vector(vsi, q_idx) {
5971 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5972 
5973 		if (q_vector->rx.ring || q_vector->tx.ring)
5974 			napi_disable(&q_vector->napi);
5975 
5976 		cancel_work_sync(&q_vector->tx.dim.work);
5977 		cancel_work_sync(&q_vector->rx.dim.work);
5978 	}
5979 }
5980 
5981 /**
5982  * ice_down - Shutdown the connection
5983  * @vsi: The VSI being stopped
5984  */
5985 int ice_down(struct ice_vsi *vsi)
5986 {
5987 	int i, tx_err, rx_err, link_err = 0;
5988 
5989 	/* Caller of this function is expected to set the
5990 	 * vsi->state ICE_DOWN bit
5991 	 */
5992 	if (vsi->netdev) {
5993 		netif_carrier_off(vsi->netdev);
5994 		netif_tx_disable(vsi->netdev);
5995 	}
5996 
5997 	ice_vsi_dis_irq(vsi);
5998 
5999 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6000 	if (tx_err)
6001 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6002 			   vsi->vsi_num, tx_err);
6003 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6004 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6005 		if (tx_err)
6006 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6007 				   vsi->vsi_num, tx_err);
6008 	}
6009 
6010 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6011 	if (rx_err)
6012 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6013 			   vsi->vsi_num, rx_err);
6014 
6015 	ice_napi_disable_all(vsi);
6016 
6017 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6018 		link_err = ice_force_phys_link_state(vsi, false);
6019 		if (link_err)
6020 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6021 				   vsi->vsi_num, link_err);
6022 	}
6023 
6024 	ice_for_each_txq(vsi, i)
6025 		ice_clean_tx_ring(vsi->tx_rings[i]);
6026 
6027 	ice_for_each_rxq(vsi, i)
6028 		ice_clean_rx_ring(vsi->rx_rings[i]);
6029 
6030 	if (tx_err || rx_err || link_err) {
6031 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6032 			   vsi->vsi_num, vsi->vsw->sw_id);
6033 		return -EIO;
6034 	}
6035 
6036 	return 0;
6037 }
6038 
6039 /**
6040  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6041  * @vsi: VSI having resources allocated
6042  *
6043  * Return 0 on success, negative on failure
6044  */
6045 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6046 {
6047 	int i, err = 0;
6048 
6049 	if (!vsi->num_txq) {
6050 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6051 			vsi->vsi_num);
6052 		return -EINVAL;
6053 	}
6054 
6055 	ice_for_each_txq(vsi, i) {
6056 		struct ice_ring *ring = vsi->tx_rings[i];
6057 
6058 		if (!ring)
6059 			return -EINVAL;
6060 
6061 		ring->netdev = vsi->netdev;
6062 		err = ice_setup_tx_ring(ring);
6063 		if (err)
6064 			break;
6065 	}
6066 
6067 	return err;
6068 }
6069 
6070 /**
6071  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6072  * @vsi: VSI having resources allocated
6073  *
6074  * Return 0 on success, negative on failure
6075  */
6076 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6077 {
6078 	int i, err = 0;
6079 
6080 	if (!vsi->num_rxq) {
6081 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6082 			vsi->vsi_num);
6083 		return -EINVAL;
6084 	}
6085 
6086 	ice_for_each_rxq(vsi, i) {
6087 		struct ice_ring *ring = vsi->rx_rings[i];
6088 
6089 		if (!ring)
6090 			return -EINVAL;
6091 
6092 		ring->netdev = vsi->netdev;
6093 		err = ice_setup_rx_ring(ring);
6094 		if (err)
6095 			break;
6096 	}
6097 
6098 	return err;
6099 }
6100 
6101 /**
6102  * ice_vsi_open_ctrl - open control VSI for use
6103  * @vsi: the VSI to open
6104  *
6105  * Initialization of the Control VSI
6106  *
6107  * Returns 0 on success, negative value on error
6108  */
6109 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6110 {
6111 	char int_name[ICE_INT_NAME_STR_LEN];
6112 	struct ice_pf *pf = vsi->back;
6113 	struct device *dev;
6114 	int err;
6115 
6116 	dev = ice_pf_to_dev(pf);
6117 	/* allocate descriptors */
6118 	err = ice_vsi_setup_tx_rings(vsi);
6119 	if (err)
6120 		goto err_setup_tx;
6121 
6122 	err = ice_vsi_setup_rx_rings(vsi);
6123 	if (err)
6124 		goto err_setup_rx;
6125 
6126 	err = ice_vsi_cfg(vsi);
6127 	if (err)
6128 		goto err_setup_rx;
6129 
6130 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6131 		 dev_driver_string(dev), dev_name(dev));
6132 	err = ice_vsi_req_irq_msix(vsi, int_name);
6133 	if (err)
6134 		goto err_setup_rx;
6135 
6136 	ice_vsi_cfg_msix(vsi);
6137 
6138 	err = ice_vsi_start_all_rx_rings(vsi);
6139 	if (err)
6140 		goto err_up_complete;
6141 
6142 	clear_bit(ICE_VSI_DOWN, vsi->state);
6143 	ice_vsi_ena_irq(vsi);
6144 
6145 	return 0;
6146 
6147 err_up_complete:
6148 	ice_down(vsi);
6149 err_setup_rx:
6150 	ice_vsi_free_rx_rings(vsi);
6151 err_setup_tx:
6152 	ice_vsi_free_tx_rings(vsi);
6153 
6154 	return err;
6155 }
6156 
6157 /**
6158  * ice_vsi_open - Called when a network interface is made active
6159  * @vsi: the VSI to open
6160  *
6161  * Initialization of the VSI
6162  *
6163  * Returns 0 on success, negative value on error
6164  */
6165 static int ice_vsi_open(struct ice_vsi *vsi)
6166 {
6167 	char int_name[ICE_INT_NAME_STR_LEN];
6168 	struct ice_pf *pf = vsi->back;
6169 	int err;
6170 
6171 	/* allocate descriptors */
6172 	err = ice_vsi_setup_tx_rings(vsi);
6173 	if (err)
6174 		goto err_setup_tx;
6175 
6176 	err = ice_vsi_setup_rx_rings(vsi);
6177 	if (err)
6178 		goto err_setup_rx;
6179 
6180 	err = ice_vsi_cfg(vsi);
6181 	if (err)
6182 		goto err_setup_rx;
6183 
6184 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6185 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6186 	err = ice_vsi_req_irq_msix(vsi, int_name);
6187 	if (err)
6188 		goto err_setup_rx;
6189 
6190 	/* Notify the stack of the actual queue counts. */
6191 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6192 	if (err)
6193 		goto err_set_qs;
6194 
6195 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6196 	if (err)
6197 		goto err_set_qs;
6198 
6199 	err = ice_up_complete(vsi);
6200 	if (err)
6201 		goto err_up_complete;
6202 
6203 	return 0;
6204 
6205 err_up_complete:
6206 	ice_down(vsi);
6207 err_set_qs:
6208 	ice_vsi_free_irq(vsi);
6209 err_setup_rx:
6210 	ice_vsi_free_rx_rings(vsi);
6211 err_setup_tx:
6212 	ice_vsi_free_tx_rings(vsi);
6213 
6214 	return err;
6215 }
6216 
6217 /**
6218  * ice_vsi_release_all - Delete all VSIs
6219  * @pf: PF from which all VSIs are being removed
6220  */
6221 static void ice_vsi_release_all(struct ice_pf *pf)
6222 {
6223 	int err, i;
6224 
6225 	if (!pf->vsi)
6226 		return;
6227 
6228 	ice_for_each_vsi(pf, i) {
6229 		if (!pf->vsi[i])
6230 			continue;
6231 
6232 		err = ice_vsi_release(pf->vsi[i]);
6233 		if (err)
6234 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6235 				i, err, pf->vsi[i]->vsi_num);
6236 	}
6237 }
6238 
6239 /**
6240  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6241  * @pf: pointer to the PF instance
6242  * @type: VSI type to rebuild
6243  *
6244  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6245  */
6246 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6247 {
6248 	struct device *dev = ice_pf_to_dev(pf);
6249 	enum ice_status status;
6250 	int i, err;
6251 
6252 	ice_for_each_vsi(pf, i) {
6253 		struct ice_vsi *vsi = pf->vsi[i];
6254 
6255 		if (!vsi || vsi->type != type)
6256 			continue;
6257 
6258 		/* rebuild the VSI */
6259 		err = ice_vsi_rebuild(vsi, true);
6260 		if (err) {
6261 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6262 				err, vsi->idx, ice_vsi_type_str(type));
6263 			return err;
6264 		}
6265 
6266 		/* replay filters for the VSI */
6267 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6268 		if (status) {
6269 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6270 				ice_stat_str(status), vsi->idx,
6271 				ice_vsi_type_str(type));
6272 			return -EIO;
6273 		}
6274 
6275 		/* Re-map HW VSI number, using VSI handle that has been
6276 		 * previously validated in ice_replay_vsi() call above
6277 		 */
6278 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6279 
6280 		/* enable the VSI */
6281 		err = ice_ena_vsi(vsi, false);
6282 		if (err) {
6283 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6284 				err, vsi->idx, ice_vsi_type_str(type));
6285 			return err;
6286 		}
6287 
6288 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6289 			 ice_vsi_type_str(type));
6290 	}
6291 
6292 	return 0;
6293 }
6294 
6295 /**
6296  * ice_update_pf_netdev_link - Update PF netdev link status
6297  * @pf: pointer to the PF instance
6298  */
6299 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6300 {
6301 	bool link_up;
6302 	int i;
6303 
6304 	ice_for_each_vsi(pf, i) {
6305 		struct ice_vsi *vsi = pf->vsi[i];
6306 
6307 		if (!vsi || vsi->type != ICE_VSI_PF)
6308 			return;
6309 
6310 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6311 		if (link_up) {
6312 			netif_carrier_on(pf->vsi[i]->netdev);
6313 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6314 		} else {
6315 			netif_carrier_off(pf->vsi[i]->netdev);
6316 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6317 		}
6318 	}
6319 }
6320 
6321 /**
6322  * ice_rebuild - rebuild after reset
6323  * @pf: PF to rebuild
6324  * @reset_type: type of reset
6325  *
6326  * Do not rebuild VF VSI in this flow because that is already handled via
6327  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6328  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6329  * to reset/rebuild all the VF VSI twice.
6330  */
6331 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6332 {
6333 	struct device *dev = ice_pf_to_dev(pf);
6334 	struct ice_hw *hw = &pf->hw;
6335 	enum ice_status ret;
6336 	int err;
6337 
6338 	if (test_bit(ICE_DOWN, pf->state))
6339 		goto clear_recovery;
6340 
6341 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6342 
6343 	ret = ice_init_all_ctrlq(hw);
6344 	if (ret) {
6345 		dev_err(dev, "control queues init failed %s\n",
6346 			ice_stat_str(ret));
6347 		goto err_init_ctrlq;
6348 	}
6349 
6350 	/* if DDP was previously loaded successfully */
6351 	if (!ice_is_safe_mode(pf)) {
6352 		/* reload the SW DB of filter tables */
6353 		if (reset_type == ICE_RESET_PFR)
6354 			ice_fill_blk_tbls(hw);
6355 		else
6356 			/* Reload DDP Package after CORER/GLOBR reset */
6357 			ice_load_pkg(NULL, pf);
6358 	}
6359 
6360 	ret = ice_clear_pf_cfg(hw);
6361 	if (ret) {
6362 		dev_err(dev, "clear PF configuration failed %s\n",
6363 			ice_stat_str(ret));
6364 		goto err_init_ctrlq;
6365 	}
6366 
6367 	if (pf->first_sw->dflt_vsi_ena)
6368 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6369 	/* clear the default VSI configuration if it exists */
6370 	pf->first_sw->dflt_vsi = NULL;
6371 	pf->first_sw->dflt_vsi_ena = false;
6372 
6373 	ice_clear_pxe_mode(hw);
6374 
6375 	ret = ice_init_nvm(hw);
6376 	if (ret) {
6377 		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6378 		goto err_init_ctrlq;
6379 	}
6380 
6381 	ret = ice_get_caps(hw);
6382 	if (ret) {
6383 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6384 		goto err_init_ctrlq;
6385 	}
6386 
6387 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6388 	if (ret) {
6389 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6390 		goto err_init_ctrlq;
6391 	}
6392 
6393 	err = ice_sched_init_port(hw->port_info);
6394 	if (err)
6395 		goto err_sched_init_port;
6396 
6397 	/* start misc vector */
6398 	err = ice_req_irq_msix_misc(pf);
6399 	if (err) {
6400 		dev_err(dev, "misc vector setup failed: %d\n", err);
6401 		goto err_sched_init_port;
6402 	}
6403 
6404 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6405 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6406 		if (!rd32(hw, PFQF_FD_SIZE)) {
6407 			u16 unused, guar, b_effort;
6408 
6409 			guar = hw->func_caps.fd_fltr_guar;
6410 			b_effort = hw->func_caps.fd_fltr_best_effort;
6411 
6412 			/* force guaranteed filter pool for PF */
6413 			ice_alloc_fd_guar_item(hw, &unused, guar);
6414 			/* force shared filter pool for PF */
6415 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6416 		}
6417 	}
6418 
6419 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6420 		ice_dcb_rebuild(pf);
6421 
6422 	/* If the PF previously had enabled PTP, PTP init needs to happen before
6423 	 * the VSI rebuild. If not, this causes the PTP link status events to
6424 	 * fail.
6425 	 */
6426 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6427 		ice_ptp_init(pf);
6428 
6429 	/* rebuild PF VSI */
6430 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6431 	if (err) {
6432 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6433 		goto err_vsi_rebuild;
6434 	}
6435 
6436 	/* If Flow Director is active */
6437 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6438 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6439 		if (err) {
6440 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6441 			goto err_vsi_rebuild;
6442 		}
6443 
6444 		/* replay HW Flow Director recipes */
6445 		if (hw->fdir_prof)
6446 			ice_fdir_replay_flows(hw);
6447 
6448 		/* replay Flow Director filters */
6449 		ice_fdir_replay_fltrs(pf);
6450 
6451 		ice_rebuild_arfs(pf);
6452 	}
6453 
6454 	ice_update_pf_netdev_link(pf);
6455 
6456 	/* tell the firmware we are up */
6457 	ret = ice_send_version(pf);
6458 	if (ret) {
6459 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6460 			ice_stat_str(ret));
6461 		goto err_vsi_rebuild;
6462 	}
6463 
6464 	ice_replay_post(hw);
6465 
6466 	/* if we get here, reset flow is successful */
6467 	clear_bit(ICE_RESET_FAILED, pf->state);
6468 
6469 	ice_plug_aux_dev(pf);
6470 	return;
6471 
6472 err_vsi_rebuild:
6473 err_sched_init_port:
6474 	ice_sched_cleanup_all(hw);
6475 err_init_ctrlq:
6476 	ice_shutdown_all_ctrlq(hw);
6477 	set_bit(ICE_RESET_FAILED, pf->state);
6478 clear_recovery:
6479 	/* set this bit in PF state to control service task scheduling */
6480 	set_bit(ICE_NEEDS_RESTART, pf->state);
6481 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6482 }
6483 
6484 /**
6485  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6486  * @vsi: Pointer to VSI structure
6487  */
6488 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6489 {
6490 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6491 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6492 	else
6493 		return ICE_RXBUF_3072;
6494 }
6495 
6496 /**
6497  * ice_change_mtu - NDO callback to change the MTU
6498  * @netdev: network interface device structure
6499  * @new_mtu: new value for maximum frame size
6500  *
6501  * Returns 0 on success, negative on failure
6502  */
6503 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6504 {
6505 	struct ice_netdev_priv *np = netdev_priv(netdev);
6506 	struct ice_vsi *vsi = np->vsi;
6507 	struct ice_pf *pf = vsi->back;
6508 	struct iidc_event *event;
6509 	u8 count = 0;
6510 	int err = 0;
6511 
6512 	if (new_mtu == (int)netdev->mtu) {
6513 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6514 		return 0;
6515 	}
6516 
6517 	if (ice_is_xdp_ena_vsi(vsi)) {
6518 		int frame_size = ice_max_xdp_frame_size(vsi);
6519 
6520 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6521 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6522 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6523 			return -EINVAL;
6524 		}
6525 	}
6526 
6527 	/* if a reset is in progress, wait for some time for it to complete */
6528 	do {
6529 		if (ice_is_reset_in_progress(pf->state)) {
6530 			count++;
6531 			usleep_range(1000, 2000);
6532 		} else {
6533 			break;
6534 		}
6535 
6536 	} while (count < 100);
6537 
6538 	if (count == 100) {
6539 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6540 		return -EBUSY;
6541 	}
6542 
6543 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6544 	if (!event)
6545 		return -ENOMEM;
6546 
6547 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6548 	ice_send_event_to_aux(pf, event);
6549 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6550 
6551 	netdev->mtu = (unsigned int)new_mtu;
6552 
6553 	/* if VSI is up, bring it down and then back up */
6554 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6555 		err = ice_down(vsi);
6556 		if (err) {
6557 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6558 			goto event_after;
6559 		}
6560 
6561 		err = ice_up(vsi);
6562 		if (err) {
6563 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6564 			goto event_after;
6565 		}
6566 	}
6567 
6568 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6569 event_after:
6570 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6571 	ice_send_event_to_aux(pf, event);
6572 	kfree(event);
6573 
6574 	return err;
6575 }
6576 
6577 /**
6578  * ice_eth_ioctl - Access the hwtstamp interface
6579  * @netdev: network interface device structure
6580  * @ifr: interface request data
6581  * @cmd: ioctl command
6582  */
6583 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6584 {
6585 	struct ice_netdev_priv *np = netdev_priv(netdev);
6586 	struct ice_pf *pf = np->vsi->back;
6587 
6588 	switch (cmd) {
6589 	case SIOCGHWTSTAMP:
6590 		return ice_ptp_get_ts_config(pf, ifr);
6591 	case SIOCSHWTSTAMP:
6592 		return ice_ptp_set_ts_config(pf, ifr);
6593 	default:
6594 		return -EOPNOTSUPP;
6595 	}
6596 }
6597 
6598 /**
6599  * ice_aq_str - convert AQ err code to a string
6600  * @aq_err: the AQ error code to convert
6601  */
6602 const char *ice_aq_str(enum ice_aq_err aq_err)
6603 {
6604 	switch (aq_err) {
6605 	case ICE_AQ_RC_OK:
6606 		return "OK";
6607 	case ICE_AQ_RC_EPERM:
6608 		return "ICE_AQ_RC_EPERM";
6609 	case ICE_AQ_RC_ENOENT:
6610 		return "ICE_AQ_RC_ENOENT";
6611 	case ICE_AQ_RC_ENOMEM:
6612 		return "ICE_AQ_RC_ENOMEM";
6613 	case ICE_AQ_RC_EBUSY:
6614 		return "ICE_AQ_RC_EBUSY";
6615 	case ICE_AQ_RC_EEXIST:
6616 		return "ICE_AQ_RC_EEXIST";
6617 	case ICE_AQ_RC_EINVAL:
6618 		return "ICE_AQ_RC_EINVAL";
6619 	case ICE_AQ_RC_ENOSPC:
6620 		return "ICE_AQ_RC_ENOSPC";
6621 	case ICE_AQ_RC_ENOSYS:
6622 		return "ICE_AQ_RC_ENOSYS";
6623 	case ICE_AQ_RC_EMODE:
6624 		return "ICE_AQ_RC_EMODE";
6625 	case ICE_AQ_RC_ENOSEC:
6626 		return "ICE_AQ_RC_ENOSEC";
6627 	case ICE_AQ_RC_EBADSIG:
6628 		return "ICE_AQ_RC_EBADSIG";
6629 	case ICE_AQ_RC_ESVN:
6630 		return "ICE_AQ_RC_ESVN";
6631 	case ICE_AQ_RC_EBADMAN:
6632 		return "ICE_AQ_RC_EBADMAN";
6633 	case ICE_AQ_RC_EBADBUF:
6634 		return "ICE_AQ_RC_EBADBUF";
6635 	}
6636 
6637 	return "ICE_AQ_RC_UNKNOWN";
6638 }
6639 
6640 /**
6641  * ice_stat_str - convert status err code to a string
6642  * @stat_err: the status error code to convert
6643  */
6644 const char *ice_stat_str(enum ice_status stat_err)
6645 {
6646 	switch (stat_err) {
6647 	case ICE_SUCCESS:
6648 		return "OK";
6649 	case ICE_ERR_PARAM:
6650 		return "ICE_ERR_PARAM";
6651 	case ICE_ERR_NOT_IMPL:
6652 		return "ICE_ERR_NOT_IMPL";
6653 	case ICE_ERR_NOT_READY:
6654 		return "ICE_ERR_NOT_READY";
6655 	case ICE_ERR_NOT_SUPPORTED:
6656 		return "ICE_ERR_NOT_SUPPORTED";
6657 	case ICE_ERR_BAD_PTR:
6658 		return "ICE_ERR_BAD_PTR";
6659 	case ICE_ERR_INVAL_SIZE:
6660 		return "ICE_ERR_INVAL_SIZE";
6661 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6662 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6663 	case ICE_ERR_RESET_FAILED:
6664 		return "ICE_ERR_RESET_FAILED";
6665 	case ICE_ERR_FW_API_VER:
6666 		return "ICE_ERR_FW_API_VER";
6667 	case ICE_ERR_NO_MEMORY:
6668 		return "ICE_ERR_NO_MEMORY";
6669 	case ICE_ERR_CFG:
6670 		return "ICE_ERR_CFG";
6671 	case ICE_ERR_OUT_OF_RANGE:
6672 		return "ICE_ERR_OUT_OF_RANGE";
6673 	case ICE_ERR_ALREADY_EXISTS:
6674 		return "ICE_ERR_ALREADY_EXISTS";
6675 	case ICE_ERR_NVM:
6676 		return "ICE_ERR_NVM";
6677 	case ICE_ERR_NVM_CHECKSUM:
6678 		return "ICE_ERR_NVM_CHECKSUM";
6679 	case ICE_ERR_BUF_TOO_SHORT:
6680 		return "ICE_ERR_BUF_TOO_SHORT";
6681 	case ICE_ERR_NVM_BLANK_MODE:
6682 		return "ICE_ERR_NVM_BLANK_MODE";
6683 	case ICE_ERR_IN_USE:
6684 		return "ICE_ERR_IN_USE";
6685 	case ICE_ERR_MAX_LIMIT:
6686 		return "ICE_ERR_MAX_LIMIT";
6687 	case ICE_ERR_RESET_ONGOING:
6688 		return "ICE_ERR_RESET_ONGOING";
6689 	case ICE_ERR_HW_TABLE:
6690 		return "ICE_ERR_HW_TABLE";
6691 	case ICE_ERR_DOES_NOT_EXIST:
6692 		return "ICE_ERR_DOES_NOT_EXIST";
6693 	case ICE_ERR_FW_DDP_MISMATCH:
6694 		return "ICE_ERR_FW_DDP_MISMATCH";
6695 	case ICE_ERR_AQ_ERROR:
6696 		return "ICE_ERR_AQ_ERROR";
6697 	case ICE_ERR_AQ_TIMEOUT:
6698 		return "ICE_ERR_AQ_TIMEOUT";
6699 	case ICE_ERR_AQ_FULL:
6700 		return "ICE_ERR_AQ_FULL";
6701 	case ICE_ERR_AQ_NO_WORK:
6702 		return "ICE_ERR_AQ_NO_WORK";
6703 	case ICE_ERR_AQ_EMPTY:
6704 		return "ICE_ERR_AQ_EMPTY";
6705 	case ICE_ERR_AQ_FW_CRITICAL:
6706 		return "ICE_ERR_AQ_FW_CRITICAL";
6707 	}
6708 
6709 	return "ICE_ERR_UNKNOWN";
6710 }
6711 
6712 /**
6713  * ice_set_rss_lut - Set RSS LUT
6714  * @vsi: Pointer to VSI structure
6715  * @lut: Lookup table
6716  * @lut_size: Lookup table size
6717  *
6718  * Returns 0 on success, negative on failure
6719  */
6720 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6721 {
6722 	struct ice_aq_get_set_rss_lut_params params = {};
6723 	struct ice_hw *hw = &vsi->back->hw;
6724 	enum ice_status status;
6725 
6726 	if (!lut)
6727 		return -EINVAL;
6728 
6729 	params.vsi_handle = vsi->idx;
6730 	params.lut_size = lut_size;
6731 	params.lut_type = vsi->rss_lut_type;
6732 	params.lut = lut;
6733 
6734 	status = ice_aq_set_rss_lut(hw, &params);
6735 	if (status) {
6736 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6737 			ice_stat_str(status),
6738 			ice_aq_str(hw->adminq.sq_last_status));
6739 		return -EIO;
6740 	}
6741 
6742 	return 0;
6743 }
6744 
6745 /**
6746  * ice_set_rss_key - Set RSS key
6747  * @vsi: Pointer to the VSI structure
6748  * @seed: RSS hash seed
6749  *
6750  * Returns 0 on success, negative on failure
6751  */
6752 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6753 {
6754 	struct ice_hw *hw = &vsi->back->hw;
6755 	enum ice_status status;
6756 
6757 	if (!seed)
6758 		return -EINVAL;
6759 
6760 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6761 	if (status) {
6762 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6763 			ice_stat_str(status),
6764 			ice_aq_str(hw->adminq.sq_last_status));
6765 		return -EIO;
6766 	}
6767 
6768 	return 0;
6769 }
6770 
6771 /**
6772  * ice_get_rss_lut - Get RSS LUT
6773  * @vsi: Pointer to VSI structure
6774  * @lut: Buffer to store the lookup table entries
6775  * @lut_size: Size of buffer to store the lookup table entries
6776  *
6777  * Returns 0 on success, negative on failure
6778  */
6779 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6780 {
6781 	struct ice_aq_get_set_rss_lut_params params = {};
6782 	struct ice_hw *hw = &vsi->back->hw;
6783 	enum ice_status status;
6784 
6785 	if (!lut)
6786 		return -EINVAL;
6787 
6788 	params.vsi_handle = vsi->idx;
6789 	params.lut_size = lut_size;
6790 	params.lut_type = vsi->rss_lut_type;
6791 	params.lut = lut;
6792 
6793 	status = ice_aq_get_rss_lut(hw, &params);
6794 	if (status) {
6795 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6796 			ice_stat_str(status),
6797 			ice_aq_str(hw->adminq.sq_last_status));
6798 		return -EIO;
6799 	}
6800 
6801 	return 0;
6802 }
6803 
6804 /**
6805  * ice_get_rss_key - Get RSS key
6806  * @vsi: Pointer to VSI structure
6807  * @seed: Buffer to store the key in
6808  *
6809  * Returns 0 on success, negative on failure
6810  */
6811 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6812 {
6813 	struct ice_hw *hw = &vsi->back->hw;
6814 	enum ice_status status;
6815 
6816 	if (!seed)
6817 		return -EINVAL;
6818 
6819 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6820 	if (status) {
6821 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6822 			ice_stat_str(status),
6823 			ice_aq_str(hw->adminq.sq_last_status));
6824 		return -EIO;
6825 	}
6826 
6827 	return 0;
6828 }
6829 
6830 /**
6831  * ice_bridge_getlink - Get the hardware bridge mode
6832  * @skb: skb buff
6833  * @pid: process ID
6834  * @seq: RTNL message seq
6835  * @dev: the netdev being configured
6836  * @filter_mask: filter mask passed in
6837  * @nlflags: netlink flags passed in
6838  *
6839  * Return the bridge mode (VEB/VEPA)
6840  */
6841 static int
6842 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6843 		   struct net_device *dev, u32 filter_mask, int nlflags)
6844 {
6845 	struct ice_netdev_priv *np = netdev_priv(dev);
6846 	struct ice_vsi *vsi = np->vsi;
6847 	struct ice_pf *pf = vsi->back;
6848 	u16 bmode;
6849 
6850 	bmode = pf->first_sw->bridge_mode;
6851 
6852 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6853 				       filter_mask, NULL);
6854 }
6855 
6856 /**
6857  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6858  * @vsi: Pointer to VSI structure
6859  * @bmode: Hardware bridge mode (VEB/VEPA)
6860  *
6861  * Returns 0 on success, negative on failure
6862  */
6863 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6864 {
6865 	struct ice_aqc_vsi_props *vsi_props;
6866 	struct ice_hw *hw = &vsi->back->hw;
6867 	struct ice_vsi_ctx *ctxt;
6868 	enum ice_status status;
6869 	int ret = 0;
6870 
6871 	vsi_props = &vsi->info;
6872 
6873 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6874 	if (!ctxt)
6875 		return -ENOMEM;
6876 
6877 	ctxt->info = vsi->info;
6878 
6879 	if (bmode == BRIDGE_MODE_VEB)
6880 		/* change from VEPA to VEB mode */
6881 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6882 	else
6883 		/* change from VEB to VEPA mode */
6884 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6885 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6886 
6887 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6888 	if (status) {
6889 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6890 			bmode, ice_stat_str(status),
6891 			ice_aq_str(hw->adminq.sq_last_status));
6892 		ret = -EIO;
6893 		goto out;
6894 	}
6895 	/* Update sw flags for book keeping */
6896 	vsi_props->sw_flags = ctxt->info.sw_flags;
6897 
6898 out:
6899 	kfree(ctxt);
6900 	return ret;
6901 }
6902 
6903 /**
6904  * ice_bridge_setlink - Set the hardware bridge mode
6905  * @dev: the netdev being configured
6906  * @nlh: RTNL message
6907  * @flags: bridge setlink flags
6908  * @extack: netlink extended ack
6909  *
6910  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6911  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6912  * not already set for all VSIs connected to this switch. And also update the
6913  * unicast switch filter rules for the corresponding switch of the netdev.
6914  */
6915 static int
6916 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6917 		   u16 __always_unused flags,
6918 		   struct netlink_ext_ack __always_unused *extack)
6919 {
6920 	struct ice_netdev_priv *np = netdev_priv(dev);
6921 	struct ice_pf *pf = np->vsi->back;
6922 	struct nlattr *attr, *br_spec;
6923 	struct ice_hw *hw = &pf->hw;
6924 	enum ice_status status;
6925 	struct ice_sw *pf_sw;
6926 	int rem, v, err = 0;
6927 
6928 	pf_sw = pf->first_sw;
6929 	/* find the attribute in the netlink message */
6930 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6931 
6932 	nla_for_each_nested(attr, br_spec, rem) {
6933 		__u16 mode;
6934 
6935 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6936 			continue;
6937 		mode = nla_get_u16(attr);
6938 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6939 			return -EINVAL;
6940 		/* Continue  if bridge mode is not being flipped */
6941 		if (mode == pf_sw->bridge_mode)
6942 			continue;
6943 		/* Iterates through the PF VSI list and update the loopback
6944 		 * mode of the VSI
6945 		 */
6946 		ice_for_each_vsi(pf, v) {
6947 			if (!pf->vsi[v])
6948 				continue;
6949 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6950 			if (err)
6951 				return err;
6952 		}
6953 
6954 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6955 		/* Update the unicast switch filter rules for the corresponding
6956 		 * switch of the netdev
6957 		 */
6958 		status = ice_update_sw_rule_bridge_mode(hw);
6959 		if (status) {
6960 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6961 				   mode, ice_stat_str(status),
6962 				   ice_aq_str(hw->adminq.sq_last_status));
6963 			/* revert hw->evb_veb */
6964 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6965 			return -EIO;
6966 		}
6967 
6968 		pf_sw->bridge_mode = mode;
6969 	}
6970 
6971 	return 0;
6972 }
6973 
6974 /**
6975  * ice_tx_timeout - Respond to a Tx Hang
6976  * @netdev: network interface device structure
6977  * @txqueue: Tx queue
6978  */
6979 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6980 {
6981 	struct ice_netdev_priv *np = netdev_priv(netdev);
6982 	struct ice_ring *tx_ring = NULL;
6983 	struct ice_vsi *vsi = np->vsi;
6984 	struct ice_pf *pf = vsi->back;
6985 	u32 i;
6986 
6987 	pf->tx_timeout_count++;
6988 
6989 	/* Check if PFC is enabled for the TC to which the queue belongs
6990 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6991 	 * need to reset and rebuild
6992 	 */
6993 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6994 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6995 			 txqueue);
6996 		return;
6997 	}
6998 
6999 	/* now that we have an index, find the tx_ring struct */
7000 	for (i = 0; i < vsi->num_txq; i++)
7001 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7002 			if (txqueue == vsi->tx_rings[i]->q_index) {
7003 				tx_ring = vsi->tx_rings[i];
7004 				break;
7005 			}
7006 
7007 	/* Reset recovery level if enough time has elapsed after last timeout.
7008 	 * Also ensure no new reset action happens before next timeout period.
7009 	 */
7010 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7011 		pf->tx_timeout_recovery_level = 1;
7012 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7013 				       netdev->watchdog_timeo)))
7014 		return;
7015 
7016 	if (tx_ring) {
7017 		struct ice_hw *hw = &pf->hw;
7018 		u32 head, val = 0;
7019 
7020 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7021 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7022 		/* Read interrupt register */
7023 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7024 
7025 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7026 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7027 			    head, tx_ring->next_to_use, val);
7028 	}
7029 
7030 	pf->tx_timeout_last_recovery = jiffies;
7031 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7032 		    pf->tx_timeout_recovery_level, txqueue);
7033 
7034 	switch (pf->tx_timeout_recovery_level) {
7035 	case 1:
7036 		set_bit(ICE_PFR_REQ, pf->state);
7037 		break;
7038 	case 2:
7039 		set_bit(ICE_CORER_REQ, pf->state);
7040 		break;
7041 	case 3:
7042 		set_bit(ICE_GLOBR_REQ, pf->state);
7043 		break;
7044 	default:
7045 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7046 		set_bit(ICE_DOWN, pf->state);
7047 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7048 		set_bit(ICE_SERVICE_DIS, pf->state);
7049 		break;
7050 	}
7051 
7052 	ice_service_task_schedule(pf);
7053 	pf->tx_timeout_recovery_level++;
7054 }
7055 
7056 /**
7057  * ice_open - Called when a network interface becomes active
7058  * @netdev: network interface device structure
7059  *
7060  * The open entry point is called when a network interface is made
7061  * active by the system (IFF_UP). At this point all resources needed
7062  * for transmit and receive operations are allocated, the interrupt
7063  * handler is registered with the OS, the netdev watchdog is enabled,
7064  * and the stack is notified that the interface is ready.
7065  *
7066  * Returns 0 on success, negative value on failure
7067  */
7068 int ice_open(struct net_device *netdev)
7069 {
7070 	struct ice_netdev_priv *np = netdev_priv(netdev);
7071 	struct ice_pf *pf = np->vsi->back;
7072 
7073 	if (ice_is_reset_in_progress(pf->state)) {
7074 		netdev_err(netdev, "can't open net device while reset is in progress");
7075 		return -EBUSY;
7076 	}
7077 
7078 	return ice_open_internal(netdev);
7079 }
7080 
7081 /**
7082  * ice_open_internal - Called when a network interface becomes active
7083  * @netdev: network interface device structure
7084  *
7085  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7086  * handling routine
7087  *
7088  * Returns 0 on success, negative value on failure
7089  */
7090 int ice_open_internal(struct net_device *netdev)
7091 {
7092 	struct ice_netdev_priv *np = netdev_priv(netdev);
7093 	struct ice_vsi *vsi = np->vsi;
7094 	struct ice_pf *pf = vsi->back;
7095 	struct ice_port_info *pi;
7096 	enum ice_status status;
7097 	int err;
7098 
7099 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7100 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7101 		return -EIO;
7102 	}
7103 
7104 	netif_carrier_off(netdev);
7105 
7106 	pi = vsi->port_info;
7107 	status = ice_update_link_info(pi);
7108 	if (status) {
7109 		netdev_err(netdev, "Failed to get link info, error %s\n",
7110 			   ice_stat_str(status));
7111 		return -EIO;
7112 	}
7113 
7114 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7115 
7116 	/* Set PHY if there is media, otherwise, turn off PHY */
7117 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7118 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7119 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7120 			err = ice_init_phy_user_cfg(pi);
7121 			if (err) {
7122 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7123 					   err);
7124 				return err;
7125 			}
7126 		}
7127 
7128 		err = ice_configure_phy(vsi);
7129 		if (err) {
7130 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7131 				   err);
7132 			return err;
7133 		}
7134 	} else {
7135 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7136 		ice_set_link(vsi, false);
7137 	}
7138 
7139 	err = ice_vsi_open(vsi);
7140 	if (err)
7141 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7142 			   vsi->vsi_num, vsi->vsw->sw_id);
7143 
7144 	/* Update existing tunnels information */
7145 	udp_tunnel_get_rx_info(netdev);
7146 
7147 	return err;
7148 }
7149 
7150 /**
7151  * ice_stop - Disables a network interface
7152  * @netdev: network interface device structure
7153  *
7154  * The stop entry point is called when an interface is de-activated by the OS,
7155  * and the netdevice enters the DOWN state. The hardware is still under the
7156  * driver's control, but the netdev interface is disabled.
7157  *
7158  * Returns success only - not allowed to fail
7159  */
7160 int ice_stop(struct net_device *netdev)
7161 {
7162 	struct ice_netdev_priv *np = netdev_priv(netdev);
7163 	struct ice_vsi *vsi = np->vsi;
7164 	struct ice_pf *pf = vsi->back;
7165 
7166 	if (ice_is_reset_in_progress(pf->state)) {
7167 		netdev_err(netdev, "can't stop net device while reset is in progress");
7168 		return -EBUSY;
7169 	}
7170 
7171 	ice_vsi_close(vsi);
7172 
7173 	return 0;
7174 }
7175 
7176 /**
7177  * ice_features_check - Validate encapsulated packet conforms to limits
7178  * @skb: skb buffer
7179  * @netdev: This port's netdev
7180  * @features: Offload features that the stack believes apply
7181  */
7182 static netdev_features_t
7183 ice_features_check(struct sk_buff *skb,
7184 		   struct net_device __always_unused *netdev,
7185 		   netdev_features_t features)
7186 {
7187 	size_t len;
7188 
7189 	/* No point in doing any of this if neither checksum nor GSO are
7190 	 * being requested for this frame. We can rule out both by just
7191 	 * checking for CHECKSUM_PARTIAL
7192 	 */
7193 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7194 		return features;
7195 
7196 	/* We cannot support GSO if the MSS is going to be less than
7197 	 * 64 bytes. If it is then we need to drop support for GSO.
7198 	 */
7199 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7200 		features &= ~NETIF_F_GSO_MASK;
7201 
7202 	len = skb_network_header(skb) - skb->data;
7203 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7204 		goto out_rm_features;
7205 
7206 	len = skb_transport_header(skb) - skb_network_header(skb);
7207 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7208 		goto out_rm_features;
7209 
7210 	if (skb->encapsulation) {
7211 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7212 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7213 			goto out_rm_features;
7214 
7215 		len = skb_inner_transport_header(skb) -
7216 		      skb_inner_network_header(skb);
7217 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7218 			goto out_rm_features;
7219 	}
7220 
7221 	return features;
7222 out_rm_features:
7223 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7224 }
7225 
7226 static const struct net_device_ops ice_netdev_safe_mode_ops = {
7227 	.ndo_open = ice_open,
7228 	.ndo_stop = ice_stop,
7229 	.ndo_start_xmit = ice_start_xmit,
7230 	.ndo_set_mac_address = ice_set_mac_address,
7231 	.ndo_validate_addr = eth_validate_addr,
7232 	.ndo_change_mtu = ice_change_mtu,
7233 	.ndo_get_stats64 = ice_get_stats64,
7234 	.ndo_tx_timeout = ice_tx_timeout,
7235 	.ndo_bpf = ice_xdp_safe_mode,
7236 };
7237 
7238 static const struct net_device_ops ice_netdev_ops = {
7239 	.ndo_open = ice_open,
7240 	.ndo_stop = ice_stop,
7241 	.ndo_start_xmit = ice_start_xmit,
7242 	.ndo_features_check = ice_features_check,
7243 	.ndo_set_rx_mode = ice_set_rx_mode,
7244 	.ndo_set_mac_address = ice_set_mac_address,
7245 	.ndo_validate_addr = eth_validate_addr,
7246 	.ndo_change_mtu = ice_change_mtu,
7247 	.ndo_get_stats64 = ice_get_stats64,
7248 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7249 	.ndo_eth_ioctl = ice_eth_ioctl,
7250 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7251 	.ndo_set_vf_mac = ice_set_vf_mac,
7252 	.ndo_get_vf_config = ice_get_vf_cfg,
7253 	.ndo_set_vf_trust = ice_set_vf_trust,
7254 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7255 	.ndo_set_vf_link_state = ice_set_vf_link_state,
7256 	.ndo_get_vf_stats = ice_get_vf_stats,
7257 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7258 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
7259 	.ndo_set_features = ice_set_features,
7260 	.ndo_bridge_getlink = ice_bridge_getlink,
7261 	.ndo_bridge_setlink = ice_bridge_setlink,
7262 	.ndo_fdb_add = ice_fdb_add,
7263 	.ndo_fdb_del = ice_fdb_del,
7264 #ifdef CONFIG_RFS_ACCEL
7265 	.ndo_rx_flow_steer = ice_rx_flow_steer,
7266 #endif
7267 	.ndo_tx_timeout = ice_tx_timeout,
7268 	.ndo_bpf = ice_xdp,
7269 	.ndo_xdp_xmit = ice_xdp_xmit,
7270 	.ndo_xsk_wakeup = ice_xsk_wakeup,
7271 };
7272