1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 
23 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
24 static const char ice_driver_string[] = DRV_SUMMARY;
25 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
26 
27 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
28 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
29 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
30 
31 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
32 MODULE_DESCRIPTION(DRV_SUMMARY);
33 MODULE_LICENSE("GPL v2");
34 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
35 
36 static int debug = -1;
37 module_param(debug, int, 0644);
38 #ifndef CONFIG_DYNAMIC_DEBUG
39 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
40 #else
41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
42 #endif /* !CONFIG_DYNAMIC_DEBUG */
43 
44 static DEFINE_IDA(ice_aux_ida);
45 
46 static struct workqueue_struct *ice_wq;
47 static const struct net_device_ops ice_netdev_safe_mode_ops;
48 static const struct net_device_ops ice_netdev_ops;
49 static int ice_vsi_open(struct ice_vsi *vsi);
50 
51 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
52 
53 static void ice_vsi_release_all(struct ice_pf *pf);
54 
55 bool netif_is_ice(struct net_device *dev)
56 {
57 	return dev && (dev->netdev_ops == &ice_netdev_ops);
58 }
59 
60 /**
61  * ice_get_tx_pending - returns number of Tx descriptors not processed
62  * @ring: the ring of descriptors
63  */
64 static u16 ice_get_tx_pending(struct ice_ring *ring)
65 {
66 	u16 head, tail;
67 
68 	head = ring->next_to_clean;
69 	tail = ring->next_to_use;
70 
71 	if (head != tail)
72 		return (head < tail) ?
73 			tail - head : (tail + ring->count - head);
74 	return 0;
75 }
76 
77 /**
78  * ice_check_for_hang_subtask - check for and recover hung queues
79  * @pf: pointer to PF struct
80  */
81 static void ice_check_for_hang_subtask(struct ice_pf *pf)
82 {
83 	struct ice_vsi *vsi = NULL;
84 	struct ice_hw *hw;
85 	unsigned int i;
86 	int packets;
87 	u32 v;
88 
89 	ice_for_each_vsi(pf, v)
90 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
91 			vsi = pf->vsi[v];
92 			break;
93 		}
94 
95 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
96 		return;
97 
98 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
99 		return;
100 
101 	hw = &vsi->back->hw;
102 
103 	for (i = 0; i < vsi->num_txq; i++) {
104 		struct ice_ring *tx_ring = vsi->tx_rings[i];
105 
106 		if (tx_ring && tx_ring->desc) {
107 			/* If packet counter has not changed the queue is
108 			 * likely stalled, so force an interrupt for this
109 			 * queue.
110 			 *
111 			 * prev_pkt would be negative if there was no
112 			 * pending work.
113 			 */
114 			packets = tx_ring->stats.pkts & INT_MAX;
115 			if (tx_ring->tx_stats.prev_pkt == packets) {
116 				/* Trigger sw interrupt to revive the queue */
117 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
118 				continue;
119 			}
120 
121 			/* Memory barrier between read of packet count and call
122 			 * to ice_get_tx_pending()
123 			 */
124 			smp_rmb();
125 			tx_ring->tx_stats.prev_pkt =
126 			    ice_get_tx_pending(tx_ring) ? packets : -1;
127 		}
128 	}
129 }
130 
131 /**
132  * ice_init_mac_fltr - Set initial MAC filters
133  * @pf: board private structure
134  *
135  * Set initial set of MAC filters for PF VSI; configure filters for permanent
136  * address and broadcast address. If an error is encountered, netdevice will be
137  * unregistered.
138  */
139 static int ice_init_mac_fltr(struct ice_pf *pf)
140 {
141 	enum ice_status status;
142 	struct ice_vsi *vsi;
143 	u8 *perm_addr;
144 
145 	vsi = ice_get_main_vsi(pf);
146 	if (!vsi)
147 		return -EINVAL;
148 
149 	perm_addr = vsi->port_info->mac.perm_addr;
150 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
151 	if (status)
152 		return -EIO;
153 
154 	return 0;
155 }
156 
157 /**
158  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
159  * @netdev: the net device on which the sync is happening
160  * @addr: MAC address to sync
161  *
162  * This is a callback function which is called by the in kernel device sync
163  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
164  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
165  * MAC filters from the hardware.
166  */
167 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
168 {
169 	struct ice_netdev_priv *np = netdev_priv(netdev);
170 	struct ice_vsi *vsi = np->vsi;
171 
172 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
173 				     ICE_FWD_TO_VSI))
174 		return -EINVAL;
175 
176 	return 0;
177 }
178 
179 /**
180  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
181  * @netdev: the net device on which the unsync is happening
182  * @addr: MAC address to unsync
183  *
184  * This is a callback function which is called by the in kernel device unsync
185  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
186  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
187  * delete the MAC filters from the hardware.
188  */
189 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
190 {
191 	struct ice_netdev_priv *np = netdev_priv(netdev);
192 	struct ice_vsi *vsi = np->vsi;
193 
194 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
195 				     ICE_FWD_TO_VSI))
196 		return -EINVAL;
197 
198 	return 0;
199 }
200 
201 /**
202  * ice_vsi_fltr_changed - check if filter state changed
203  * @vsi: VSI to be checked
204  *
205  * returns true if filter state has changed, false otherwise.
206  */
207 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
208 {
209 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
210 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
211 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
212 }
213 
214 /**
215  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
216  * @vsi: the VSI being configured
217  * @promisc_m: mask of promiscuous config bits
218  * @set_promisc: enable or disable promisc flag request
219  *
220  */
221 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
222 {
223 	struct ice_hw *hw = &vsi->back->hw;
224 	enum ice_status status = 0;
225 
226 	if (vsi->type != ICE_VSI_PF)
227 		return 0;
228 
229 	if (vsi->num_vlan > 1) {
230 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
231 						  set_promisc);
232 	} else {
233 		if (set_promisc)
234 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
235 						     0);
236 		else
237 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
238 						       0);
239 	}
240 
241 	if (status)
242 		return -EIO;
243 
244 	return 0;
245 }
246 
247 /**
248  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
249  * @vsi: ptr to the VSI
250  *
251  * Push any outstanding VSI filter changes through the AdminQ.
252  */
253 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
254 {
255 	struct device *dev = ice_pf_to_dev(vsi->back);
256 	struct net_device *netdev = vsi->netdev;
257 	bool promisc_forced_on = false;
258 	struct ice_pf *pf = vsi->back;
259 	struct ice_hw *hw = &pf->hw;
260 	enum ice_status status = 0;
261 	u32 changed_flags = 0;
262 	u8 promisc_m;
263 	int err = 0;
264 
265 	if (!vsi->netdev)
266 		return -EINVAL;
267 
268 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
269 		usleep_range(1000, 2000);
270 
271 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
272 	vsi->current_netdev_flags = vsi->netdev->flags;
273 
274 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
275 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
276 
277 	if (ice_vsi_fltr_changed(vsi)) {
278 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
279 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
280 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
281 
282 		/* grab the netdev's addr_list_lock */
283 		netif_addr_lock_bh(netdev);
284 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
285 			      ice_add_mac_to_unsync_list);
286 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
287 			      ice_add_mac_to_unsync_list);
288 		/* our temp lists are populated. release lock */
289 		netif_addr_unlock_bh(netdev);
290 	}
291 
292 	/* Remove MAC addresses in the unsync list */
293 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
294 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
295 	if (status) {
296 		netdev_err(netdev, "Failed to delete MAC filters\n");
297 		/* if we failed because of alloc failures, just bail */
298 		if (status == ICE_ERR_NO_MEMORY) {
299 			err = -ENOMEM;
300 			goto out;
301 		}
302 	}
303 
304 	/* Add MAC addresses in the sync list */
305 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
306 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
307 	/* If filter is added successfully or already exists, do not go into
308 	 * 'if' condition and report it as error. Instead continue processing
309 	 * rest of the function.
310 	 */
311 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
312 		netdev_err(netdev, "Failed to add MAC filters\n");
313 		/* If there is no more space for new umac filters, VSI
314 		 * should go into promiscuous mode. There should be some
315 		 * space reserved for promiscuous filters.
316 		 */
317 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
318 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
319 				      vsi->state)) {
320 			promisc_forced_on = true;
321 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
322 				    vsi->vsi_num);
323 		} else {
324 			err = -EIO;
325 			goto out;
326 		}
327 	}
328 	/* check for changes in promiscuous modes */
329 	if (changed_flags & IFF_ALLMULTI) {
330 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
331 			if (vsi->num_vlan > 1)
332 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
333 			else
334 				promisc_m = ICE_MCAST_PROMISC_BITS;
335 
336 			err = ice_cfg_promisc(vsi, promisc_m, true);
337 			if (err) {
338 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
339 					   vsi->vsi_num);
340 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
341 				goto out_promisc;
342 			}
343 		} else {
344 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
345 			if (vsi->num_vlan > 1)
346 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
347 			else
348 				promisc_m = ICE_MCAST_PROMISC_BITS;
349 
350 			err = ice_cfg_promisc(vsi, promisc_m, false);
351 			if (err) {
352 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
353 					   vsi->vsi_num);
354 				vsi->current_netdev_flags |= IFF_ALLMULTI;
355 				goto out_promisc;
356 			}
357 		}
358 	}
359 
360 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
361 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
362 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
363 		if (vsi->current_netdev_flags & IFF_PROMISC) {
364 			/* Apply Rx filter rule to get traffic from wire */
365 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
366 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
367 				if (err && err != -EEXIST) {
368 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
369 						   err, vsi->vsi_num);
370 					vsi->current_netdev_flags &=
371 						~IFF_PROMISC;
372 					goto out_promisc;
373 				}
374 				ice_cfg_vlan_pruning(vsi, false, false);
375 			}
376 		} else {
377 			/* Clear Rx filter to remove traffic from wire */
378 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
379 				err = ice_clear_dflt_vsi(pf->first_sw);
380 				if (err) {
381 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
382 						   err, vsi->vsi_num);
383 					vsi->current_netdev_flags |=
384 						IFF_PROMISC;
385 					goto out_promisc;
386 				}
387 				if (vsi->num_vlan > 1)
388 					ice_cfg_vlan_pruning(vsi, true, false);
389 			}
390 		}
391 	}
392 	goto exit;
393 
394 out_promisc:
395 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
396 	goto exit;
397 out:
398 	/* if something went wrong then set the changed flag so we try again */
399 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
400 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
401 exit:
402 	clear_bit(ICE_CFG_BUSY, vsi->state);
403 	return err;
404 }
405 
406 /**
407  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
408  * @pf: board private structure
409  */
410 static void ice_sync_fltr_subtask(struct ice_pf *pf)
411 {
412 	int v;
413 
414 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
415 		return;
416 
417 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
418 
419 	ice_for_each_vsi(pf, v)
420 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
421 		    ice_vsi_sync_fltr(pf->vsi[v])) {
422 			/* come back and try again later */
423 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
424 			break;
425 		}
426 }
427 
428 /**
429  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
430  * @pf: the PF
431  * @locked: is the rtnl_lock already held
432  */
433 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
434 {
435 	int node;
436 	int v;
437 
438 	ice_for_each_vsi(pf, v)
439 		if (pf->vsi[v])
440 			ice_dis_vsi(pf->vsi[v], locked);
441 
442 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
443 		pf->pf_agg_node[node].num_vsis = 0;
444 
445 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
446 		pf->vf_agg_node[node].num_vsis = 0;
447 }
448 
449 /**
450  * ice_prepare_for_reset - prep for the core to reset
451  * @pf: board private structure
452  *
453  * Inform or close all dependent features in prep for reset.
454  */
455 static void
456 ice_prepare_for_reset(struct ice_pf *pf)
457 {
458 	struct ice_hw *hw = &pf->hw;
459 	unsigned int i;
460 
461 	/* already prepared for reset */
462 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
463 		return;
464 
465 	ice_unplug_aux_dev(pf);
466 
467 	/* Notify VFs of impending reset */
468 	if (ice_check_sq_alive(hw, &hw->mailboxq))
469 		ice_vc_notify_reset(pf);
470 
471 	/* Disable VFs until reset is completed */
472 	ice_for_each_vf(pf, i)
473 		ice_set_vf_state_qs_dis(&pf->vf[i]);
474 
475 	/* clear SW filtering DB */
476 	ice_clear_hw_tbls(hw);
477 	/* disable the VSIs and their queues that are not already DOWN */
478 	ice_pf_dis_all_vsi(pf, false);
479 
480 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
481 		ice_ptp_release(pf);
482 
483 	if (hw->port_info)
484 		ice_sched_clear_port(hw->port_info);
485 
486 	ice_shutdown_all_ctrlq(hw);
487 
488 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
489 }
490 
491 /**
492  * ice_do_reset - Initiate one of many types of resets
493  * @pf: board private structure
494  * @reset_type: reset type requested
495  * before this function was called.
496  */
497 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
498 {
499 	struct device *dev = ice_pf_to_dev(pf);
500 	struct ice_hw *hw = &pf->hw;
501 
502 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
503 
504 	ice_prepare_for_reset(pf);
505 
506 	/* trigger the reset */
507 	if (ice_reset(hw, reset_type)) {
508 		dev_err(dev, "reset %d failed\n", reset_type);
509 		set_bit(ICE_RESET_FAILED, pf->state);
510 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
511 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
512 		clear_bit(ICE_PFR_REQ, pf->state);
513 		clear_bit(ICE_CORER_REQ, pf->state);
514 		clear_bit(ICE_GLOBR_REQ, pf->state);
515 		wake_up(&pf->reset_wait_queue);
516 		return;
517 	}
518 
519 	/* PFR is a bit of a special case because it doesn't result in an OICR
520 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
521 	 * associated state bits.
522 	 */
523 	if (reset_type == ICE_RESET_PFR) {
524 		pf->pfr_count++;
525 		ice_rebuild(pf, reset_type);
526 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
527 		clear_bit(ICE_PFR_REQ, pf->state);
528 		wake_up(&pf->reset_wait_queue);
529 		ice_reset_all_vfs(pf, true);
530 	}
531 }
532 
533 /**
534  * ice_reset_subtask - Set up for resetting the device and driver
535  * @pf: board private structure
536  */
537 static void ice_reset_subtask(struct ice_pf *pf)
538 {
539 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
540 
541 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
542 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
543 	 * of reset is pending and sets bits in pf->state indicating the reset
544 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
545 	 * prepare for pending reset if not already (for PF software-initiated
546 	 * global resets the software should already be prepared for it as
547 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
548 	 * by firmware or software on other PFs, that bit is not set so prepare
549 	 * for the reset now), poll for reset done, rebuild and return.
550 	 */
551 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
552 		/* Perform the largest reset requested */
553 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
554 			reset_type = ICE_RESET_CORER;
555 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
556 			reset_type = ICE_RESET_GLOBR;
557 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
558 			reset_type = ICE_RESET_EMPR;
559 		/* return if no valid reset type requested */
560 		if (reset_type == ICE_RESET_INVAL)
561 			return;
562 		ice_prepare_for_reset(pf);
563 
564 		/* make sure we are ready to rebuild */
565 		if (ice_check_reset(&pf->hw)) {
566 			set_bit(ICE_RESET_FAILED, pf->state);
567 		} else {
568 			/* done with reset. start rebuild */
569 			pf->hw.reset_ongoing = false;
570 			ice_rebuild(pf, reset_type);
571 			/* clear bit to resume normal operations, but
572 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
573 			 */
574 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
575 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
576 			clear_bit(ICE_PFR_REQ, pf->state);
577 			clear_bit(ICE_CORER_REQ, pf->state);
578 			clear_bit(ICE_GLOBR_REQ, pf->state);
579 			wake_up(&pf->reset_wait_queue);
580 			ice_reset_all_vfs(pf, true);
581 		}
582 
583 		return;
584 	}
585 
586 	/* No pending resets to finish processing. Check for new resets */
587 	if (test_bit(ICE_PFR_REQ, pf->state))
588 		reset_type = ICE_RESET_PFR;
589 	if (test_bit(ICE_CORER_REQ, pf->state))
590 		reset_type = ICE_RESET_CORER;
591 	if (test_bit(ICE_GLOBR_REQ, pf->state))
592 		reset_type = ICE_RESET_GLOBR;
593 	/* If no valid reset type requested just return */
594 	if (reset_type == ICE_RESET_INVAL)
595 		return;
596 
597 	/* reset if not already down or busy */
598 	if (!test_bit(ICE_DOWN, pf->state) &&
599 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
600 		ice_do_reset(pf, reset_type);
601 	}
602 }
603 
604 /**
605  * ice_print_topo_conflict - print topology conflict message
606  * @vsi: the VSI whose topology status is being checked
607  */
608 static void ice_print_topo_conflict(struct ice_vsi *vsi)
609 {
610 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
611 	case ICE_AQ_LINK_TOPO_CONFLICT:
612 	case ICE_AQ_LINK_MEDIA_CONFLICT:
613 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
614 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
615 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
616 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
617 		break;
618 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
619 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
620 		break;
621 	default:
622 		break;
623 	}
624 }
625 
626 /**
627  * ice_print_link_msg - print link up or down message
628  * @vsi: the VSI whose link status is being queried
629  * @isup: boolean for if the link is now up or down
630  */
631 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
632 {
633 	struct ice_aqc_get_phy_caps_data *caps;
634 	const char *an_advertised;
635 	enum ice_status status;
636 	const char *fec_req;
637 	const char *speed;
638 	const char *fec;
639 	const char *fc;
640 	const char *an;
641 
642 	if (!vsi)
643 		return;
644 
645 	if (vsi->current_isup == isup)
646 		return;
647 
648 	vsi->current_isup = isup;
649 
650 	if (!isup) {
651 		netdev_info(vsi->netdev, "NIC Link is Down\n");
652 		return;
653 	}
654 
655 	switch (vsi->port_info->phy.link_info.link_speed) {
656 	case ICE_AQ_LINK_SPEED_100GB:
657 		speed = "100 G";
658 		break;
659 	case ICE_AQ_LINK_SPEED_50GB:
660 		speed = "50 G";
661 		break;
662 	case ICE_AQ_LINK_SPEED_40GB:
663 		speed = "40 G";
664 		break;
665 	case ICE_AQ_LINK_SPEED_25GB:
666 		speed = "25 G";
667 		break;
668 	case ICE_AQ_LINK_SPEED_20GB:
669 		speed = "20 G";
670 		break;
671 	case ICE_AQ_LINK_SPEED_10GB:
672 		speed = "10 G";
673 		break;
674 	case ICE_AQ_LINK_SPEED_5GB:
675 		speed = "5 G";
676 		break;
677 	case ICE_AQ_LINK_SPEED_2500MB:
678 		speed = "2.5 G";
679 		break;
680 	case ICE_AQ_LINK_SPEED_1000MB:
681 		speed = "1 G";
682 		break;
683 	case ICE_AQ_LINK_SPEED_100MB:
684 		speed = "100 M";
685 		break;
686 	default:
687 		speed = "Unknown ";
688 		break;
689 	}
690 
691 	switch (vsi->port_info->fc.current_mode) {
692 	case ICE_FC_FULL:
693 		fc = "Rx/Tx";
694 		break;
695 	case ICE_FC_TX_PAUSE:
696 		fc = "Tx";
697 		break;
698 	case ICE_FC_RX_PAUSE:
699 		fc = "Rx";
700 		break;
701 	case ICE_FC_NONE:
702 		fc = "None";
703 		break;
704 	default:
705 		fc = "Unknown";
706 		break;
707 	}
708 
709 	/* Get FEC mode based on negotiated link info */
710 	switch (vsi->port_info->phy.link_info.fec_info) {
711 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
712 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
713 		fec = "RS-FEC";
714 		break;
715 	case ICE_AQ_LINK_25G_KR_FEC_EN:
716 		fec = "FC-FEC/BASE-R";
717 		break;
718 	default:
719 		fec = "NONE";
720 		break;
721 	}
722 
723 	/* check if autoneg completed, might be false due to not supported */
724 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
725 		an = "True";
726 	else
727 		an = "False";
728 
729 	/* Get FEC mode requested based on PHY caps last SW configuration */
730 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
731 	if (!caps) {
732 		fec_req = "Unknown";
733 		an_advertised = "Unknown";
734 		goto done;
735 	}
736 
737 	status = ice_aq_get_phy_caps(vsi->port_info, false,
738 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
739 	if (status)
740 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
741 
742 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
743 
744 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
745 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
746 		fec_req = "RS-FEC";
747 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
748 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
749 		fec_req = "FC-FEC/BASE-R";
750 	else
751 		fec_req = "NONE";
752 
753 	kfree(caps);
754 
755 done:
756 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
757 		    speed, fec_req, fec, an_advertised, an, fc);
758 	ice_print_topo_conflict(vsi);
759 }
760 
761 /**
762  * ice_vsi_link_event - update the VSI's netdev
763  * @vsi: the VSI on which the link event occurred
764  * @link_up: whether or not the VSI needs to be set up or down
765  */
766 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
767 {
768 	if (!vsi)
769 		return;
770 
771 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
772 		return;
773 
774 	if (vsi->type == ICE_VSI_PF) {
775 		if (link_up == netif_carrier_ok(vsi->netdev))
776 			return;
777 
778 		if (link_up) {
779 			netif_carrier_on(vsi->netdev);
780 			netif_tx_wake_all_queues(vsi->netdev);
781 		} else {
782 			netif_carrier_off(vsi->netdev);
783 			netif_tx_stop_all_queues(vsi->netdev);
784 		}
785 	}
786 }
787 
788 /**
789  * ice_set_dflt_mib - send a default config MIB to the FW
790  * @pf: private PF struct
791  *
792  * This function sends a default configuration MIB to the FW.
793  *
794  * If this function errors out at any point, the driver is still able to
795  * function.  The main impact is that LFC may not operate as expected.
796  * Therefore an error state in this function should be treated with a DBG
797  * message and continue on with driver rebuild/reenable.
798  */
799 static void ice_set_dflt_mib(struct ice_pf *pf)
800 {
801 	struct device *dev = ice_pf_to_dev(pf);
802 	u8 mib_type, *buf, *lldpmib = NULL;
803 	u16 len, typelen, offset = 0;
804 	struct ice_lldp_org_tlv *tlv;
805 	struct ice_hw *hw = &pf->hw;
806 	u32 ouisubtype;
807 
808 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
809 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
810 	if (!lldpmib) {
811 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
812 			__func__);
813 		return;
814 	}
815 
816 	/* Add ETS CFG TLV */
817 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
818 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
819 		   ICE_IEEE_ETS_TLV_LEN);
820 	tlv->typelen = htons(typelen);
821 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
822 		      ICE_IEEE_SUBTYPE_ETS_CFG);
823 	tlv->ouisubtype = htonl(ouisubtype);
824 
825 	buf = tlv->tlvinfo;
826 	buf[0] = 0;
827 
828 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
829 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
830 	 * Octets 13 - 20 are TSA values - leave as zeros
831 	 */
832 	buf[5] = 0x64;
833 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
834 	offset += len + 2;
835 	tlv = (struct ice_lldp_org_tlv *)
836 		((char *)tlv + sizeof(tlv->typelen) + len);
837 
838 	/* Add ETS REC TLV */
839 	buf = tlv->tlvinfo;
840 	tlv->typelen = htons(typelen);
841 
842 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
843 		      ICE_IEEE_SUBTYPE_ETS_REC);
844 	tlv->ouisubtype = htonl(ouisubtype);
845 
846 	/* First octet of buf is reserved
847 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
848 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
849 	 * Octets 13 - 20 are TSA value - leave as zeros
850 	 */
851 	buf[5] = 0x64;
852 	offset += len + 2;
853 	tlv = (struct ice_lldp_org_tlv *)
854 		((char *)tlv + sizeof(tlv->typelen) + len);
855 
856 	/* Add PFC CFG TLV */
857 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
858 		   ICE_IEEE_PFC_TLV_LEN);
859 	tlv->typelen = htons(typelen);
860 
861 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
862 		      ICE_IEEE_SUBTYPE_PFC_CFG);
863 	tlv->ouisubtype = htonl(ouisubtype);
864 
865 	/* Octet 1 left as all zeros - PFC disabled */
866 	buf[0] = 0x08;
867 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
868 	offset += len + 2;
869 
870 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
871 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
872 
873 	kfree(lldpmib);
874 }
875 
876 /**
877  * ice_check_module_power
878  * @pf: pointer to PF struct
879  * @link_cfg_err: bitmap from the link info structure
880  *
881  * check module power level returned by a previous call to aq_get_link_info
882  * and print error messages if module power level is not supported
883  */
884 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
885 {
886 	/* if module power level is supported, clear the flag */
887 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
888 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
889 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
890 		return;
891 	}
892 
893 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
894 	 * above block didn't clear this bit, there's nothing to do
895 	 */
896 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
897 		return;
898 
899 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
900 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
901 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
902 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
903 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
904 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
905 	}
906 }
907 
908 /**
909  * ice_link_event - process the link event
910  * @pf: PF that the link event is associated with
911  * @pi: port_info for the port that the link event is associated with
912  * @link_up: true if the physical link is up and false if it is down
913  * @link_speed: current link speed received from the link event
914  *
915  * Returns 0 on success and negative on failure
916  */
917 static int
918 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
919 	       u16 link_speed)
920 {
921 	struct device *dev = ice_pf_to_dev(pf);
922 	struct ice_phy_info *phy_info;
923 	enum ice_status status;
924 	struct ice_vsi *vsi;
925 	u16 old_link_speed;
926 	bool old_link;
927 
928 	phy_info = &pi->phy;
929 	phy_info->link_info_old = phy_info->link_info;
930 
931 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
932 	old_link_speed = phy_info->link_info_old.link_speed;
933 
934 	/* update the link info structures and re-enable link events,
935 	 * don't bail on failure due to other book keeping needed
936 	 */
937 	status = ice_update_link_info(pi);
938 	if (status)
939 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
940 			pi->lport, ice_stat_str(status),
941 			ice_aq_str(pi->hw->adminq.sq_last_status));
942 
943 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
944 
945 	/* Check if the link state is up after updating link info, and treat
946 	 * this event as an UP event since the link is actually UP now.
947 	 */
948 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
949 		link_up = true;
950 
951 	vsi = ice_get_main_vsi(pf);
952 	if (!vsi || !vsi->port_info)
953 		return -EINVAL;
954 
955 	/* turn off PHY if media was removed */
956 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
957 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
958 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
959 		ice_set_link(vsi, false);
960 	}
961 
962 	/* if the old link up/down and speed is the same as the new */
963 	if (link_up == old_link && link_speed == old_link_speed)
964 		return 0;
965 
966 	if (ice_is_dcb_active(pf)) {
967 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
968 			ice_dcb_rebuild(pf);
969 	} else {
970 		if (link_up)
971 			ice_set_dflt_mib(pf);
972 	}
973 	ice_vsi_link_event(vsi, link_up);
974 	ice_print_link_msg(vsi, link_up);
975 
976 	ice_vc_notify_link_state(pf);
977 
978 	return 0;
979 }
980 
981 /**
982  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
983  * @pf: board private structure
984  */
985 static void ice_watchdog_subtask(struct ice_pf *pf)
986 {
987 	int i;
988 
989 	/* if interface is down do nothing */
990 	if (test_bit(ICE_DOWN, pf->state) ||
991 	    test_bit(ICE_CFG_BUSY, pf->state))
992 		return;
993 
994 	/* make sure we don't do these things too often */
995 	if (time_before(jiffies,
996 			pf->serv_tmr_prev + pf->serv_tmr_period))
997 		return;
998 
999 	pf->serv_tmr_prev = jiffies;
1000 
1001 	/* Update the stats for active netdevs so the network stack
1002 	 * can look at updated numbers whenever it cares to
1003 	 */
1004 	ice_update_pf_stats(pf);
1005 	ice_for_each_vsi(pf, i)
1006 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1007 			ice_update_vsi_stats(pf->vsi[i]);
1008 }
1009 
1010 /**
1011  * ice_init_link_events - enable/initialize link events
1012  * @pi: pointer to the port_info instance
1013  *
1014  * Returns -EIO on failure, 0 on success
1015  */
1016 static int ice_init_link_events(struct ice_port_info *pi)
1017 {
1018 	u16 mask;
1019 
1020 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1021 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
1022 
1023 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1024 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1025 			pi->lport);
1026 		return -EIO;
1027 	}
1028 
1029 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1030 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1031 			pi->lport);
1032 		return -EIO;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 /**
1039  * ice_handle_link_event - handle link event via ARQ
1040  * @pf: PF that the link event is associated with
1041  * @event: event structure containing link status info
1042  */
1043 static int
1044 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1045 {
1046 	struct ice_aqc_get_link_status_data *link_data;
1047 	struct ice_port_info *port_info;
1048 	int status;
1049 
1050 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1051 	port_info = pf->hw.port_info;
1052 	if (!port_info)
1053 		return -EINVAL;
1054 
1055 	status = ice_link_event(pf, port_info,
1056 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1057 				le16_to_cpu(link_data->link_speed));
1058 	if (status)
1059 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1060 			status);
1061 
1062 	return status;
1063 }
1064 
1065 enum ice_aq_task_state {
1066 	ICE_AQ_TASK_WAITING = 0,
1067 	ICE_AQ_TASK_COMPLETE,
1068 	ICE_AQ_TASK_CANCELED,
1069 };
1070 
1071 struct ice_aq_task {
1072 	struct hlist_node entry;
1073 
1074 	u16 opcode;
1075 	struct ice_rq_event_info *event;
1076 	enum ice_aq_task_state state;
1077 };
1078 
1079 /**
1080  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1081  * @pf: pointer to the PF private structure
1082  * @opcode: the opcode to wait for
1083  * @timeout: how long to wait, in jiffies
1084  * @event: storage for the event info
1085  *
1086  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1087  * current thread will be put to sleep until the specified event occurs or
1088  * until the given timeout is reached.
1089  *
1090  * To obtain only the descriptor contents, pass an event without an allocated
1091  * msg_buf. If the complete data buffer is desired, allocate the
1092  * event->msg_buf with enough space ahead of time.
1093  *
1094  * Returns: zero on success, or a negative error code on failure.
1095  */
1096 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1097 			  struct ice_rq_event_info *event)
1098 {
1099 	struct device *dev = ice_pf_to_dev(pf);
1100 	struct ice_aq_task *task;
1101 	unsigned long start;
1102 	long ret;
1103 	int err;
1104 
1105 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1106 	if (!task)
1107 		return -ENOMEM;
1108 
1109 	INIT_HLIST_NODE(&task->entry);
1110 	task->opcode = opcode;
1111 	task->event = event;
1112 	task->state = ICE_AQ_TASK_WAITING;
1113 
1114 	spin_lock_bh(&pf->aq_wait_lock);
1115 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1116 	spin_unlock_bh(&pf->aq_wait_lock);
1117 
1118 	start = jiffies;
1119 
1120 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1121 					       timeout);
1122 	switch (task->state) {
1123 	case ICE_AQ_TASK_WAITING:
1124 		err = ret < 0 ? ret : -ETIMEDOUT;
1125 		break;
1126 	case ICE_AQ_TASK_CANCELED:
1127 		err = ret < 0 ? ret : -ECANCELED;
1128 		break;
1129 	case ICE_AQ_TASK_COMPLETE:
1130 		err = ret < 0 ? ret : 0;
1131 		break;
1132 	default:
1133 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1134 		err = -EINVAL;
1135 		break;
1136 	}
1137 
1138 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1139 		jiffies_to_msecs(jiffies - start),
1140 		jiffies_to_msecs(timeout),
1141 		opcode);
1142 
1143 	spin_lock_bh(&pf->aq_wait_lock);
1144 	hlist_del(&task->entry);
1145 	spin_unlock_bh(&pf->aq_wait_lock);
1146 	kfree(task);
1147 
1148 	return err;
1149 }
1150 
1151 /**
1152  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1153  * @pf: pointer to the PF private structure
1154  * @opcode: the opcode of the event
1155  * @event: the event to check
1156  *
1157  * Loops over the current list of pending threads waiting for an AdminQ event.
1158  * For each matching task, copy the contents of the event into the task
1159  * structure and wake up the thread.
1160  *
1161  * If multiple threads wait for the same opcode, they will all be woken up.
1162  *
1163  * Note that event->msg_buf will only be duplicated if the event has a buffer
1164  * with enough space already allocated. Otherwise, only the descriptor and
1165  * message length will be copied.
1166  *
1167  * Returns: true if an event was found, false otherwise
1168  */
1169 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1170 				struct ice_rq_event_info *event)
1171 {
1172 	struct ice_aq_task *task;
1173 	bool found = false;
1174 
1175 	spin_lock_bh(&pf->aq_wait_lock);
1176 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1177 		if (task->state || task->opcode != opcode)
1178 			continue;
1179 
1180 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1181 		task->event->msg_len = event->msg_len;
1182 
1183 		/* Only copy the data buffer if a destination was set */
1184 		if (task->event->msg_buf &&
1185 		    task->event->buf_len > event->buf_len) {
1186 			memcpy(task->event->msg_buf, event->msg_buf,
1187 			       event->buf_len);
1188 			task->event->buf_len = event->buf_len;
1189 		}
1190 
1191 		task->state = ICE_AQ_TASK_COMPLETE;
1192 		found = true;
1193 	}
1194 	spin_unlock_bh(&pf->aq_wait_lock);
1195 
1196 	if (found)
1197 		wake_up(&pf->aq_wait_queue);
1198 }
1199 
1200 /**
1201  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1202  * @pf: the PF private structure
1203  *
1204  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1205  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1206  */
1207 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1208 {
1209 	struct ice_aq_task *task;
1210 
1211 	spin_lock_bh(&pf->aq_wait_lock);
1212 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1213 		task->state = ICE_AQ_TASK_CANCELED;
1214 	spin_unlock_bh(&pf->aq_wait_lock);
1215 
1216 	wake_up(&pf->aq_wait_queue);
1217 }
1218 
1219 /**
1220  * __ice_clean_ctrlq - helper function to clean controlq rings
1221  * @pf: ptr to struct ice_pf
1222  * @q_type: specific Control queue type
1223  */
1224 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1225 {
1226 	struct device *dev = ice_pf_to_dev(pf);
1227 	struct ice_rq_event_info event;
1228 	struct ice_hw *hw = &pf->hw;
1229 	struct ice_ctl_q_info *cq;
1230 	u16 pending, i = 0;
1231 	const char *qtype;
1232 	u32 oldval, val;
1233 
1234 	/* Do not clean control queue if/when PF reset fails */
1235 	if (test_bit(ICE_RESET_FAILED, pf->state))
1236 		return 0;
1237 
1238 	switch (q_type) {
1239 	case ICE_CTL_Q_ADMIN:
1240 		cq = &hw->adminq;
1241 		qtype = "Admin";
1242 		break;
1243 	case ICE_CTL_Q_SB:
1244 		cq = &hw->sbq;
1245 		qtype = "Sideband";
1246 		break;
1247 	case ICE_CTL_Q_MAILBOX:
1248 		cq = &hw->mailboxq;
1249 		qtype = "Mailbox";
1250 		/* we are going to try to detect a malicious VF, so set the
1251 		 * state to begin detection
1252 		 */
1253 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1254 		break;
1255 	default:
1256 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1257 		return 0;
1258 	}
1259 
1260 	/* check for error indications - PF_xx_AxQLEN register layout for
1261 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1262 	 */
1263 	val = rd32(hw, cq->rq.len);
1264 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1265 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1266 		oldval = val;
1267 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1268 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1269 				qtype);
1270 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1271 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1272 				qtype);
1273 		}
1274 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1275 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1276 				qtype);
1277 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1278 			 PF_FW_ARQLEN_ARQCRIT_M);
1279 		if (oldval != val)
1280 			wr32(hw, cq->rq.len, val);
1281 	}
1282 
1283 	val = rd32(hw, cq->sq.len);
1284 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1285 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1286 		oldval = val;
1287 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1288 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1289 				qtype);
1290 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1291 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1292 				qtype);
1293 		}
1294 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1295 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1296 				qtype);
1297 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1298 			 PF_FW_ATQLEN_ATQCRIT_M);
1299 		if (oldval != val)
1300 			wr32(hw, cq->sq.len, val);
1301 	}
1302 
1303 	event.buf_len = cq->rq_buf_size;
1304 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1305 	if (!event.msg_buf)
1306 		return 0;
1307 
1308 	do {
1309 		enum ice_status ret;
1310 		u16 opcode;
1311 
1312 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1313 		if (ret == ICE_ERR_AQ_NO_WORK)
1314 			break;
1315 		if (ret) {
1316 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1317 				ice_stat_str(ret));
1318 			break;
1319 		}
1320 
1321 		opcode = le16_to_cpu(event.desc.opcode);
1322 
1323 		/* Notify any thread that might be waiting for this event */
1324 		ice_aq_check_events(pf, opcode, &event);
1325 
1326 		switch (opcode) {
1327 		case ice_aqc_opc_get_link_status:
1328 			if (ice_handle_link_event(pf, &event))
1329 				dev_err(dev, "Could not handle link event\n");
1330 			break;
1331 		case ice_aqc_opc_event_lan_overflow:
1332 			ice_vf_lan_overflow_event(pf, &event);
1333 			break;
1334 		case ice_mbx_opc_send_msg_to_pf:
1335 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1336 				ice_vc_process_vf_msg(pf, &event);
1337 			break;
1338 		case ice_aqc_opc_fw_logging:
1339 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1340 			break;
1341 		case ice_aqc_opc_lldp_set_mib_change:
1342 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1343 			break;
1344 		default:
1345 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1346 				qtype, opcode);
1347 			break;
1348 		}
1349 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1350 
1351 	kfree(event.msg_buf);
1352 
1353 	return pending && (i == ICE_DFLT_IRQ_WORK);
1354 }
1355 
1356 /**
1357  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1358  * @hw: pointer to hardware info
1359  * @cq: control queue information
1360  *
1361  * returns true if there are pending messages in a queue, false if there aren't
1362  */
1363 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1364 {
1365 	u16 ntu;
1366 
1367 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1368 	return cq->rq.next_to_clean != ntu;
1369 }
1370 
1371 /**
1372  * ice_clean_adminq_subtask - clean the AdminQ rings
1373  * @pf: board private structure
1374  */
1375 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1376 {
1377 	struct ice_hw *hw = &pf->hw;
1378 
1379 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1380 		return;
1381 
1382 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1383 		return;
1384 
1385 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1386 
1387 	/* There might be a situation where new messages arrive to a control
1388 	 * queue between processing the last message and clearing the
1389 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1390 	 * ice_ctrlq_pending) and process new messages if any.
1391 	 */
1392 	if (ice_ctrlq_pending(hw, &hw->adminq))
1393 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1394 
1395 	ice_flush(hw);
1396 }
1397 
1398 /**
1399  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1400  * @pf: board private structure
1401  */
1402 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1403 {
1404 	struct ice_hw *hw = &pf->hw;
1405 
1406 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1407 		return;
1408 
1409 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1410 		return;
1411 
1412 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1413 
1414 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1415 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1416 
1417 	ice_flush(hw);
1418 }
1419 
1420 /**
1421  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1422  * @pf: board private structure
1423  */
1424 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1425 {
1426 	struct ice_hw *hw = &pf->hw;
1427 
1428 	/* Nothing to do here if sideband queue is not supported */
1429 	if (!ice_is_sbq_supported(hw)) {
1430 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1431 		return;
1432 	}
1433 
1434 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1435 		return;
1436 
1437 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1438 		return;
1439 
1440 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1441 
1442 	if (ice_ctrlq_pending(hw, &hw->sbq))
1443 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1444 
1445 	ice_flush(hw);
1446 }
1447 
1448 /**
1449  * ice_service_task_schedule - schedule the service task to wake up
1450  * @pf: board private structure
1451  *
1452  * If not already scheduled, this puts the task into the work queue.
1453  */
1454 void ice_service_task_schedule(struct ice_pf *pf)
1455 {
1456 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1457 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1458 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1459 		queue_work(ice_wq, &pf->serv_task);
1460 }
1461 
1462 /**
1463  * ice_service_task_complete - finish up the service task
1464  * @pf: board private structure
1465  */
1466 static void ice_service_task_complete(struct ice_pf *pf)
1467 {
1468 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1469 
1470 	/* force memory (pf->state) to sync before next service task */
1471 	smp_mb__before_atomic();
1472 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1473 }
1474 
1475 /**
1476  * ice_service_task_stop - stop service task and cancel works
1477  * @pf: board private structure
1478  *
1479  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1480  * 1 otherwise.
1481  */
1482 static int ice_service_task_stop(struct ice_pf *pf)
1483 {
1484 	int ret;
1485 
1486 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1487 
1488 	if (pf->serv_tmr.function)
1489 		del_timer_sync(&pf->serv_tmr);
1490 	if (pf->serv_task.func)
1491 		cancel_work_sync(&pf->serv_task);
1492 
1493 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1494 	return ret;
1495 }
1496 
1497 /**
1498  * ice_service_task_restart - restart service task and schedule works
1499  * @pf: board private structure
1500  *
1501  * This function is needed for suspend and resume works (e.g WoL scenario)
1502  */
1503 static void ice_service_task_restart(struct ice_pf *pf)
1504 {
1505 	clear_bit(ICE_SERVICE_DIS, pf->state);
1506 	ice_service_task_schedule(pf);
1507 }
1508 
1509 /**
1510  * ice_service_timer - timer callback to schedule service task
1511  * @t: pointer to timer_list
1512  */
1513 static void ice_service_timer(struct timer_list *t)
1514 {
1515 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1516 
1517 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1518 	ice_service_task_schedule(pf);
1519 }
1520 
1521 /**
1522  * ice_handle_mdd_event - handle malicious driver detect event
1523  * @pf: pointer to the PF structure
1524  *
1525  * Called from service task. OICR interrupt handler indicates MDD event.
1526  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1527  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1528  * disable the queue, the PF can be configured to reset the VF using ethtool
1529  * private flag mdd-auto-reset-vf.
1530  */
1531 static void ice_handle_mdd_event(struct ice_pf *pf)
1532 {
1533 	struct device *dev = ice_pf_to_dev(pf);
1534 	struct ice_hw *hw = &pf->hw;
1535 	unsigned int i;
1536 	u32 reg;
1537 
1538 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1539 		/* Since the VF MDD event logging is rate limited, check if
1540 		 * there are pending MDD events.
1541 		 */
1542 		ice_print_vfs_mdd_events(pf);
1543 		return;
1544 	}
1545 
1546 	/* find what triggered an MDD event */
1547 	reg = rd32(hw, GL_MDET_TX_PQM);
1548 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1549 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1550 				GL_MDET_TX_PQM_PF_NUM_S;
1551 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1552 				GL_MDET_TX_PQM_VF_NUM_S;
1553 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1554 				GL_MDET_TX_PQM_MAL_TYPE_S;
1555 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1556 				GL_MDET_TX_PQM_QNUM_S);
1557 
1558 		if (netif_msg_tx_err(pf))
1559 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1560 				 event, queue, pf_num, vf_num);
1561 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1562 	}
1563 
1564 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1565 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1566 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1567 				GL_MDET_TX_TCLAN_PF_NUM_S;
1568 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1569 				GL_MDET_TX_TCLAN_VF_NUM_S;
1570 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1571 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1572 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1573 				GL_MDET_TX_TCLAN_QNUM_S);
1574 
1575 		if (netif_msg_tx_err(pf))
1576 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1577 				 event, queue, pf_num, vf_num);
1578 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1579 	}
1580 
1581 	reg = rd32(hw, GL_MDET_RX);
1582 	if (reg & GL_MDET_RX_VALID_M) {
1583 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1584 				GL_MDET_RX_PF_NUM_S;
1585 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1586 				GL_MDET_RX_VF_NUM_S;
1587 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1588 				GL_MDET_RX_MAL_TYPE_S;
1589 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1590 				GL_MDET_RX_QNUM_S);
1591 
1592 		if (netif_msg_rx_err(pf))
1593 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1594 				 event, queue, pf_num, vf_num);
1595 		wr32(hw, GL_MDET_RX, 0xffffffff);
1596 	}
1597 
1598 	/* check to see if this PF caused an MDD event */
1599 	reg = rd32(hw, PF_MDET_TX_PQM);
1600 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1601 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1602 		if (netif_msg_tx_err(pf))
1603 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1604 	}
1605 
1606 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1607 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1608 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1609 		if (netif_msg_tx_err(pf))
1610 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1611 	}
1612 
1613 	reg = rd32(hw, PF_MDET_RX);
1614 	if (reg & PF_MDET_RX_VALID_M) {
1615 		wr32(hw, PF_MDET_RX, 0xFFFF);
1616 		if (netif_msg_rx_err(pf))
1617 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1618 	}
1619 
1620 	/* Check to see if one of the VFs caused an MDD event, and then
1621 	 * increment counters and set print pending
1622 	 */
1623 	ice_for_each_vf(pf, i) {
1624 		struct ice_vf *vf = &pf->vf[i];
1625 
1626 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1627 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1628 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1629 			vf->mdd_tx_events.count++;
1630 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1631 			if (netif_msg_tx_err(pf))
1632 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1633 					 i);
1634 		}
1635 
1636 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1637 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1638 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1639 			vf->mdd_tx_events.count++;
1640 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1641 			if (netif_msg_tx_err(pf))
1642 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1643 					 i);
1644 		}
1645 
1646 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1647 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1648 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1649 			vf->mdd_tx_events.count++;
1650 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1651 			if (netif_msg_tx_err(pf))
1652 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1653 					 i);
1654 		}
1655 
1656 		reg = rd32(hw, VP_MDET_RX(i));
1657 		if (reg & VP_MDET_RX_VALID_M) {
1658 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1659 			vf->mdd_rx_events.count++;
1660 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1661 			if (netif_msg_rx_err(pf))
1662 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1663 					 i);
1664 
1665 			/* Since the queue is disabled on VF Rx MDD events, the
1666 			 * PF can be configured to reset the VF through ethtool
1667 			 * private flag mdd-auto-reset-vf.
1668 			 */
1669 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1670 				/* VF MDD event counters will be cleared by
1671 				 * reset, so print the event prior to reset.
1672 				 */
1673 				ice_print_vf_rx_mdd_event(vf);
1674 				ice_reset_vf(&pf->vf[i], false);
1675 			}
1676 		}
1677 	}
1678 
1679 	ice_print_vfs_mdd_events(pf);
1680 }
1681 
1682 /**
1683  * ice_force_phys_link_state - Force the physical link state
1684  * @vsi: VSI to force the physical link state to up/down
1685  * @link_up: true/false indicates to set the physical link to up/down
1686  *
1687  * Force the physical link state by getting the current PHY capabilities from
1688  * hardware and setting the PHY config based on the determined capabilities. If
1689  * link changes a link event will be triggered because both the Enable Automatic
1690  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1691  *
1692  * Returns 0 on success, negative on failure
1693  */
1694 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1695 {
1696 	struct ice_aqc_get_phy_caps_data *pcaps;
1697 	struct ice_aqc_set_phy_cfg_data *cfg;
1698 	struct ice_port_info *pi;
1699 	struct device *dev;
1700 	int retcode;
1701 
1702 	if (!vsi || !vsi->port_info || !vsi->back)
1703 		return -EINVAL;
1704 	if (vsi->type != ICE_VSI_PF)
1705 		return 0;
1706 
1707 	dev = ice_pf_to_dev(vsi->back);
1708 
1709 	pi = vsi->port_info;
1710 
1711 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1712 	if (!pcaps)
1713 		return -ENOMEM;
1714 
1715 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1716 				      NULL);
1717 	if (retcode) {
1718 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1719 			vsi->vsi_num, retcode);
1720 		retcode = -EIO;
1721 		goto out;
1722 	}
1723 
1724 	/* No change in link */
1725 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1726 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1727 		goto out;
1728 
1729 	/* Use the current user PHY configuration. The current user PHY
1730 	 * configuration is initialized during probe from PHY capabilities
1731 	 * software mode, and updated on set PHY configuration.
1732 	 */
1733 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1734 	if (!cfg) {
1735 		retcode = -ENOMEM;
1736 		goto out;
1737 	}
1738 
1739 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1740 	if (link_up)
1741 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1742 	else
1743 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1744 
1745 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1746 	if (retcode) {
1747 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1748 			vsi->vsi_num, retcode);
1749 		retcode = -EIO;
1750 	}
1751 
1752 	kfree(cfg);
1753 out:
1754 	kfree(pcaps);
1755 	return retcode;
1756 }
1757 
1758 /**
1759  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1760  * @pi: port info structure
1761  *
1762  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1763  */
1764 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1765 {
1766 	struct ice_aqc_get_phy_caps_data *pcaps;
1767 	struct ice_pf *pf = pi->hw->back;
1768 	enum ice_status status;
1769 	int err = 0;
1770 
1771 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1772 	if (!pcaps)
1773 		return -ENOMEM;
1774 
1775 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1776 				     NULL);
1777 
1778 	if (status) {
1779 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1780 		err = -EIO;
1781 		goto out;
1782 	}
1783 
1784 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1785 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1786 
1787 out:
1788 	kfree(pcaps);
1789 	return err;
1790 }
1791 
1792 /**
1793  * ice_init_link_dflt_override - Initialize link default override
1794  * @pi: port info structure
1795  *
1796  * Initialize link default override and PHY total port shutdown during probe
1797  */
1798 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1799 {
1800 	struct ice_link_default_override_tlv *ldo;
1801 	struct ice_pf *pf = pi->hw->back;
1802 
1803 	ldo = &pf->link_dflt_override;
1804 	if (ice_get_link_default_override(ldo, pi))
1805 		return;
1806 
1807 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1808 		return;
1809 
1810 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1811 	 * ethtool private flag) for ports with Port Disable bit set.
1812 	 */
1813 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1814 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1815 }
1816 
1817 /**
1818  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1819  * @pi: port info structure
1820  *
1821  * If default override is enabled, initialize the user PHY cfg speed and FEC
1822  * settings using the default override mask from the NVM.
1823  *
1824  * The PHY should only be configured with the default override settings the
1825  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1826  * is used to indicate that the user PHY cfg default override is initialized
1827  * and the PHY has not been configured with the default override settings. The
1828  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1829  * configured.
1830  *
1831  * This function should be called only if the FW doesn't support default
1832  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1833  */
1834 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1835 {
1836 	struct ice_link_default_override_tlv *ldo;
1837 	struct ice_aqc_set_phy_cfg_data *cfg;
1838 	struct ice_phy_info *phy = &pi->phy;
1839 	struct ice_pf *pf = pi->hw->back;
1840 
1841 	ldo = &pf->link_dflt_override;
1842 
1843 	/* If link default override is enabled, use to mask NVM PHY capabilities
1844 	 * for speed and FEC default configuration.
1845 	 */
1846 	cfg = &phy->curr_user_phy_cfg;
1847 
1848 	if (ldo->phy_type_low || ldo->phy_type_high) {
1849 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1850 				    cpu_to_le64(ldo->phy_type_low);
1851 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1852 				     cpu_to_le64(ldo->phy_type_high);
1853 	}
1854 	cfg->link_fec_opt = ldo->fec_options;
1855 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1856 
1857 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1858 }
1859 
1860 /**
1861  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1862  * @pi: port info structure
1863  *
1864  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1865  * mode to default. The PHY defaults are from get PHY capabilities topology
1866  * with media so call when media is first available. An error is returned if
1867  * called when media is not available. The PHY initialization completed state is
1868  * set here.
1869  *
1870  * These configurations are used when setting PHY
1871  * configuration. The user PHY configuration is updated on set PHY
1872  * configuration. Returns 0 on success, negative on failure
1873  */
1874 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1875 {
1876 	struct ice_aqc_get_phy_caps_data *pcaps;
1877 	struct ice_phy_info *phy = &pi->phy;
1878 	struct ice_pf *pf = pi->hw->back;
1879 	enum ice_status status;
1880 	int err = 0;
1881 
1882 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1883 		return -EIO;
1884 
1885 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1886 	if (!pcaps)
1887 		return -ENOMEM;
1888 
1889 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1890 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1891 					     pcaps, NULL);
1892 	else
1893 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1894 					     pcaps, NULL);
1895 	if (status) {
1896 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1897 		err = -EIO;
1898 		goto err_out;
1899 	}
1900 
1901 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1902 
1903 	/* check if lenient mode is supported and enabled */
1904 	if (ice_fw_supports_link_override(pi->hw) &&
1905 	    !(pcaps->module_compliance_enforcement &
1906 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1907 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1908 
1909 		/* if the FW supports default PHY configuration mode, then the driver
1910 		 * does not have to apply link override settings. If not,
1911 		 * initialize user PHY configuration with link override values
1912 		 */
1913 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1914 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1915 			ice_init_phy_cfg_dflt_override(pi);
1916 			goto out;
1917 		}
1918 	}
1919 
1920 	/* if link default override is not enabled, set user flow control and
1921 	 * FEC settings based on what get_phy_caps returned
1922 	 */
1923 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1924 						      pcaps->link_fec_options);
1925 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1926 
1927 out:
1928 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1929 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1930 err_out:
1931 	kfree(pcaps);
1932 	return err;
1933 }
1934 
1935 /**
1936  * ice_configure_phy - configure PHY
1937  * @vsi: VSI of PHY
1938  *
1939  * Set the PHY configuration. If the current PHY configuration is the same as
1940  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1941  * configure the based get PHY capabilities for topology with media.
1942  */
1943 static int ice_configure_phy(struct ice_vsi *vsi)
1944 {
1945 	struct device *dev = ice_pf_to_dev(vsi->back);
1946 	struct ice_port_info *pi = vsi->port_info;
1947 	struct ice_aqc_get_phy_caps_data *pcaps;
1948 	struct ice_aqc_set_phy_cfg_data *cfg;
1949 	struct ice_phy_info *phy = &pi->phy;
1950 	struct ice_pf *pf = vsi->back;
1951 	enum ice_status status;
1952 	int err = 0;
1953 
1954 	/* Ensure we have media as we cannot configure a medialess port */
1955 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1956 		return -EPERM;
1957 
1958 	ice_print_topo_conflict(vsi);
1959 
1960 	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1961 		return -EPERM;
1962 
1963 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1964 		return ice_force_phys_link_state(vsi, true);
1965 
1966 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1967 	if (!pcaps)
1968 		return -ENOMEM;
1969 
1970 	/* Get current PHY config */
1971 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1972 				     NULL);
1973 	if (status) {
1974 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1975 			vsi->vsi_num, ice_stat_str(status));
1976 		err = -EIO;
1977 		goto done;
1978 	}
1979 
1980 	/* If PHY enable link is configured and configuration has not changed,
1981 	 * there's nothing to do
1982 	 */
1983 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1984 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1985 		goto done;
1986 
1987 	/* Use PHY topology as baseline for configuration */
1988 	memset(pcaps, 0, sizeof(*pcaps));
1989 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1990 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1991 					     pcaps, NULL);
1992 	else
1993 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1994 					     pcaps, NULL);
1995 	if (status) {
1996 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
1997 			vsi->vsi_num, ice_stat_str(status));
1998 		err = -EIO;
1999 		goto done;
2000 	}
2001 
2002 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2003 	if (!cfg) {
2004 		err = -ENOMEM;
2005 		goto done;
2006 	}
2007 
2008 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2009 
2010 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2011 	 * ice_init_phy_user_cfg_ldo.
2012 	 */
2013 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2014 			       vsi->back->state)) {
2015 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2016 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2017 	} else {
2018 		u64 phy_low = 0, phy_high = 0;
2019 
2020 		ice_update_phy_type(&phy_low, &phy_high,
2021 				    pi->phy.curr_user_speed_req);
2022 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2023 		cfg->phy_type_high = pcaps->phy_type_high &
2024 				     cpu_to_le64(phy_high);
2025 	}
2026 
2027 	/* Can't provide what was requested; use PHY capabilities */
2028 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2029 		cfg->phy_type_low = pcaps->phy_type_low;
2030 		cfg->phy_type_high = pcaps->phy_type_high;
2031 	}
2032 
2033 	/* FEC */
2034 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2035 
2036 	/* Can't provide what was requested; use PHY capabilities */
2037 	if (cfg->link_fec_opt !=
2038 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2039 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2040 		cfg->link_fec_opt = pcaps->link_fec_options;
2041 	}
2042 
2043 	/* Flow Control - always supported; no need to check against
2044 	 * capabilities
2045 	 */
2046 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2047 
2048 	/* Enable link and link update */
2049 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2050 
2051 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2052 	if (status) {
2053 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2054 			vsi->vsi_num, ice_stat_str(status));
2055 		err = -EIO;
2056 	}
2057 
2058 	kfree(cfg);
2059 done:
2060 	kfree(pcaps);
2061 	return err;
2062 }
2063 
2064 /**
2065  * ice_check_media_subtask - Check for media
2066  * @pf: pointer to PF struct
2067  *
2068  * If media is available, then initialize PHY user configuration if it is not
2069  * been, and configure the PHY if the interface is up.
2070  */
2071 static void ice_check_media_subtask(struct ice_pf *pf)
2072 {
2073 	struct ice_port_info *pi;
2074 	struct ice_vsi *vsi;
2075 	int err;
2076 
2077 	/* No need to check for media if it's already present */
2078 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2079 		return;
2080 
2081 	vsi = ice_get_main_vsi(pf);
2082 	if (!vsi)
2083 		return;
2084 
2085 	/* Refresh link info and check if media is present */
2086 	pi = vsi->port_info;
2087 	err = ice_update_link_info(pi);
2088 	if (err)
2089 		return;
2090 
2091 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2092 
2093 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2094 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2095 			ice_init_phy_user_cfg(pi);
2096 
2097 		/* PHY settings are reset on media insertion, reconfigure
2098 		 * PHY to preserve settings.
2099 		 */
2100 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2101 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2102 			return;
2103 
2104 		err = ice_configure_phy(vsi);
2105 		if (!err)
2106 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2107 
2108 		/* A Link Status Event will be generated; the event handler
2109 		 * will complete bringing the interface up
2110 		 */
2111 	}
2112 }
2113 
2114 /**
2115  * ice_service_task - manage and run subtasks
2116  * @work: pointer to work_struct contained by the PF struct
2117  */
2118 static void ice_service_task(struct work_struct *work)
2119 {
2120 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2121 	unsigned long start_time = jiffies;
2122 
2123 	/* subtasks */
2124 
2125 	/* process reset requests first */
2126 	ice_reset_subtask(pf);
2127 
2128 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2129 	if (ice_is_reset_in_progress(pf->state) ||
2130 	    test_bit(ICE_SUSPENDED, pf->state) ||
2131 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2132 		ice_service_task_complete(pf);
2133 		return;
2134 	}
2135 
2136 	ice_clean_adminq_subtask(pf);
2137 	ice_check_media_subtask(pf);
2138 	ice_check_for_hang_subtask(pf);
2139 	ice_sync_fltr_subtask(pf);
2140 	ice_handle_mdd_event(pf);
2141 	ice_watchdog_subtask(pf);
2142 
2143 	if (ice_is_safe_mode(pf)) {
2144 		ice_service_task_complete(pf);
2145 		return;
2146 	}
2147 
2148 	ice_process_vflr_event(pf);
2149 	ice_clean_mailboxq_subtask(pf);
2150 	ice_clean_sbq_subtask(pf);
2151 	ice_sync_arfs_fltrs(pf);
2152 	ice_flush_fdir_ctx(pf);
2153 
2154 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2155 	ice_service_task_complete(pf);
2156 
2157 	/* If the tasks have taken longer than one service timer period
2158 	 * or there is more work to be done, reset the service timer to
2159 	 * schedule the service task now.
2160 	 */
2161 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2162 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2163 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2164 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2165 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2166 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2167 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2168 		mod_timer(&pf->serv_tmr, jiffies);
2169 }
2170 
2171 /**
2172  * ice_set_ctrlq_len - helper function to set controlq length
2173  * @hw: pointer to the HW instance
2174  */
2175 static void ice_set_ctrlq_len(struct ice_hw *hw)
2176 {
2177 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2178 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2179 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2180 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2181 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2182 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2183 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2184 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2185 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2186 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2187 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2188 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2189 }
2190 
2191 /**
2192  * ice_schedule_reset - schedule a reset
2193  * @pf: board private structure
2194  * @reset: reset being requested
2195  */
2196 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2197 {
2198 	struct device *dev = ice_pf_to_dev(pf);
2199 
2200 	/* bail out if earlier reset has failed */
2201 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2202 		dev_dbg(dev, "earlier reset has failed\n");
2203 		return -EIO;
2204 	}
2205 	/* bail if reset/recovery already in progress */
2206 	if (ice_is_reset_in_progress(pf->state)) {
2207 		dev_dbg(dev, "Reset already in progress\n");
2208 		return -EBUSY;
2209 	}
2210 
2211 	ice_unplug_aux_dev(pf);
2212 
2213 	switch (reset) {
2214 	case ICE_RESET_PFR:
2215 		set_bit(ICE_PFR_REQ, pf->state);
2216 		break;
2217 	case ICE_RESET_CORER:
2218 		set_bit(ICE_CORER_REQ, pf->state);
2219 		break;
2220 	case ICE_RESET_GLOBR:
2221 		set_bit(ICE_GLOBR_REQ, pf->state);
2222 		break;
2223 	default:
2224 		return -EINVAL;
2225 	}
2226 
2227 	ice_service_task_schedule(pf);
2228 	return 0;
2229 }
2230 
2231 /**
2232  * ice_irq_affinity_notify - Callback for affinity changes
2233  * @notify: context as to what irq was changed
2234  * @mask: the new affinity mask
2235  *
2236  * This is a callback function used by the irq_set_affinity_notifier function
2237  * so that we may register to receive changes to the irq affinity masks.
2238  */
2239 static void
2240 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2241 			const cpumask_t *mask)
2242 {
2243 	struct ice_q_vector *q_vector =
2244 		container_of(notify, struct ice_q_vector, affinity_notify);
2245 
2246 	cpumask_copy(&q_vector->affinity_mask, mask);
2247 }
2248 
2249 /**
2250  * ice_irq_affinity_release - Callback for affinity notifier release
2251  * @ref: internal core kernel usage
2252  *
2253  * This is a callback function used by the irq_set_affinity_notifier function
2254  * to inform the current notification subscriber that they will no longer
2255  * receive notifications.
2256  */
2257 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2258 
2259 /**
2260  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2261  * @vsi: the VSI being configured
2262  */
2263 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2264 {
2265 	struct ice_hw *hw = &vsi->back->hw;
2266 	int i;
2267 
2268 	ice_for_each_q_vector(vsi, i)
2269 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2270 
2271 	ice_flush(hw);
2272 	return 0;
2273 }
2274 
2275 /**
2276  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2277  * @vsi: the VSI being configured
2278  * @basename: name for the vector
2279  */
2280 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2281 {
2282 	int q_vectors = vsi->num_q_vectors;
2283 	struct ice_pf *pf = vsi->back;
2284 	int base = vsi->base_vector;
2285 	struct device *dev;
2286 	int rx_int_idx = 0;
2287 	int tx_int_idx = 0;
2288 	int vector, err;
2289 	int irq_num;
2290 
2291 	dev = ice_pf_to_dev(pf);
2292 	for (vector = 0; vector < q_vectors; vector++) {
2293 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2294 
2295 		irq_num = pf->msix_entries[base + vector].vector;
2296 
2297 		if (q_vector->tx.ring && q_vector->rx.ring) {
2298 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2299 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2300 			tx_int_idx++;
2301 		} else if (q_vector->rx.ring) {
2302 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2303 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2304 		} else if (q_vector->tx.ring) {
2305 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2306 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2307 		} else {
2308 			/* skip this unused q_vector */
2309 			continue;
2310 		}
2311 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2312 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2313 					       IRQF_SHARED, q_vector->name,
2314 					       q_vector);
2315 		else
2316 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2317 					       0, q_vector->name, q_vector);
2318 		if (err) {
2319 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2320 				   err);
2321 			goto free_q_irqs;
2322 		}
2323 
2324 		/* register for affinity change notifications */
2325 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2326 			struct irq_affinity_notify *affinity_notify;
2327 
2328 			affinity_notify = &q_vector->affinity_notify;
2329 			affinity_notify->notify = ice_irq_affinity_notify;
2330 			affinity_notify->release = ice_irq_affinity_release;
2331 			irq_set_affinity_notifier(irq_num, affinity_notify);
2332 		}
2333 
2334 		/* assign the mask for this irq */
2335 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2336 	}
2337 
2338 	vsi->irqs_ready = true;
2339 	return 0;
2340 
2341 free_q_irqs:
2342 	while (vector) {
2343 		vector--;
2344 		irq_num = pf->msix_entries[base + vector].vector;
2345 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2346 			irq_set_affinity_notifier(irq_num, NULL);
2347 		irq_set_affinity_hint(irq_num, NULL);
2348 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2349 	}
2350 	return err;
2351 }
2352 
2353 /**
2354  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2355  * @vsi: VSI to setup Tx rings used by XDP
2356  *
2357  * Return 0 on success and negative value on error
2358  */
2359 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2360 {
2361 	struct device *dev = ice_pf_to_dev(vsi->back);
2362 	int i;
2363 
2364 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2365 		u16 xdp_q_idx = vsi->alloc_txq + i;
2366 		struct ice_ring *xdp_ring;
2367 
2368 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2369 
2370 		if (!xdp_ring)
2371 			goto free_xdp_rings;
2372 
2373 		xdp_ring->q_index = xdp_q_idx;
2374 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2375 		xdp_ring->ring_active = false;
2376 		xdp_ring->vsi = vsi;
2377 		xdp_ring->netdev = NULL;
2378 		xdp_ring->dev = dev;
2379 		xdp_ring->count = vsi->num_tx_desc;
2380 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2381 		if (ice_setup_tx_ring(xdp_ring))
2382 			goto free_xdp_rings;
2383 		ice_set_ring_xdp(xdp_ring);
2384 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2385 	}
2386 
2387 	return 0;
2388 
2389 free_xdp_rings:
2390 	for (; i >= 0; i--)
2391 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2392 			ice_free_tx_ring(vsi->xdp_rings[i]);
2393 	return -ENOMEM;
2394 }
2395 
2396 /**
2397  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2398  * @vsi: VSI to set the bpf prog on
2399  * @prog: the bpf prog pointer
2400  */
2401 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2402 {
2403 	struct bpf_prog *old_prog;
2404 	int i;
2405 
2406 	old_prog = xchg(&vsi->xdp_prog, prog);
2407 	if (old_prog)
2408 		bpf_prog_put(old_prog);
2409 
2410 	ice_for_each_rxq(vsi, i)
2411 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2412 }
2413 
2414 /**
2415  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2416  * @vsi: VSI to bring up Tx rings used by XDP
2417  * @prog: bpf program that will be assigned to VSI
2418  *
2419  * Return 0 on success and negative value on error
2420  */
2421 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2422 {
2423 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2424 	int xdp_rings_rem = vsi->num_xdp_txq;
2425 	struct ice_pf *pf = vsi->back;
2426 	struct ice_qs_cfg xdp_qs_cfg = {
2427 		.qs_mutex = &pf->avail_q_mutex,
2428 		.pf_map = pf->avail_txqs,
2429 		.pf_map_size = pf->max_pf_txqs,
2430 		.q_count = vsi->num_xdp_txq,
2431 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2432 		.vsi_map = vsi->txq_map,
2433 		.vsi_map_offset = vsi->alloc_txq,
2434 		.mapping_mode = ICE_VSI_MAP_CONTIG
2435 	};
2436 	enum ice_status status;
2437 	struct device *dev;
2438 	int i, v_idx;
2439 
2440 	dev = ice_pf_to_dev(pf);
2441 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2442 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2443 	if (!vsi->xdp_rings)
2444 		return -ENOMEM;
2445 
2446 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2447 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2448 		goto err_map_xdp;
2449 
2450 	if (ice_xdp_alloc_setup_rings(vsi))
2451 		goto clear_xdp_rings;
2452 
2453 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2454 	ice_for_each_q_vector(vsi, v_idx) {
2455 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2456 		int xdp_rings_per_v, q_id, q_base;
2457 
2458 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2459 					       vsi->num_q_vectors - v_idx);
2460 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2461 
2462 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2463 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2464 
2465 			xdp_ring->q_vector = q_vector;
2466 			xdp_ring->next = q_vector->tx.ring;
2467 			q_vector->tx.ring = xdp_ring;
2468 		}
2469 		xdp_rings_rem -= xdp_rings_per_v;
2470 	}
2471 
2472 	/* omit the scheduler update if in reset path; XDP queues will be
2473 	 * taken into account at the end of ice_vsi_rebuild, where
2474 	 * ice_cfg_vsi_lan is being called
2475 	 */
2476 	if (ice_is_reset_in_progress(pf->state))
2477 		return 0;
2478 
2479 	/* tell the Tx scheduler that right now we have
2480 	 * additional queues
2481 	 */
2482 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2483 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2484 
2485 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2486 				 max_txqs);
2487 	if (status) {
2488 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2489 			ice_stat_str(status));
2490 		goto clear_xdp_rings;
2491 	}
2492 	ice_vsi_assign_bpf_prog(vsi, prog);
2493 
2494 	return 0;
2495 clear_xdp_rings:
2496 	for (i = 0; i < vsi->num_xdp_txq; i++)
2497 		if (vsi->xdp_rings[i]) {
2498 			kfree_rcu(vsi->xdp_rings[i], rcu);
2499 			vsi->xdp_rings[i] = NULL;
2500 		}
2501 
2502 err_map_xdp:
2503 	mutex_lock(&pf->avail_q_mutex);
2504 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2505 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2506 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2507 	}
2508 	mutex_unlock(&pf->avail_q_mutex);
2509 
2510 	devm_kfree(dev, vsi->xdp_rings);
2511 	return -ENOMEM;
2512 }
2513 
2514 /**
2515  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2516  * @vsi: VSI to remove XDP rings
2517  *
2518  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2519  * resources
2520  */
2521 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2522 {
2523 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2524 	struct ice_pf *pf = vsi->back;
2525 	int i, v_idx;
2526 
2527 	/* q_vectors are freed in reset path so there's no point in detaching
2528 	 * rings; in case of rebuild being triggered not from reset bits
2529 	 * in pf->state won't be set, so additionally check first q_vector
2530 	 * against NULL
2531 	 */
2532 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2533 		goto free_qmap;
2534 
2535 	ice_for_each_q_vector(vsi, v_idx) {
2536 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2537 		struct ice_ring *ring;
2538 
2539 		ice_for_each_ring(ring, q_vector->tx)
2540 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2541 				break;
2542 
2543 		/* restore the value of last node prior to XDP setup */
2544 		q_vector->tx.ring = ring;
2545 	}
2546 
2547 free_qmap:
2548 	mutex_lock(&pf->avail_q_mutex);
2549 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2550 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2551 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2552 	}
2553 	mutex_unlock(&pf->avail_q_mutex);
2554 
2555 	for (i = 0; i < vsi->num_xdp_txq; i++)
2556 		if (vsi->xdp_rings[i]) {
2557 			if (vsi->xdp_rings[i]->desc)
2558 				ice_free_tx_ring(vsi->xdp_rings[i]);
2559 			kfree_rcu(vsi->xdp_rings[i], rcu);
2560 			vsi->xdp_rings[i] = NULL;
2561 		}
2562 
2563 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2564 	vsi->xdp_rings = NULL;
2565 
2566 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2567 		return 0;
2568 
2569 	ice_vsi_assign_bpf_prog(vsi, NULL);
2570 
2571 	/* notify Tx scheduler that we destroyed XDP queues and bring
2572 	 * back the old number of child nodes
2573 	 */
2574 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2575 		max_txqs[i] = vsi->num_txq;
2576 
2577 	/* change number of XDP Tx queues to 0 */
2578 	vsi->num_xdp_txq = 0;
2579 
2580 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2581 			       max_txqs);
2582 }
2583 
2584 /**
2585  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2586  * @vsi: VSI to schedule napi on
2587  */
2588 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2589 {
2590 	int i;
2591 
2592 	ice_for_each_rxq(vsi, i) {
2593 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2594 
2595 		if (rx_ring->xsk_pool)
2596 			napi_schedule(&rx_ring->q_vector->napi);
2597 	}
2598 }
2599 
2600 /**
2601  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2602  * @vsi: VSI to setup XDP for
2603  * @prog: XDP program
2604  * @extack: netlink extended ack
2605  */
2606 static int
2607 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2608 		   struct netlink_ext_ack *extack)
2609 {
2610 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2611 	bool if_running = netif_running(vsi->netdev);
2612 	int ret = 0, xdp_ring_err = 0;
2613 
2614 	if (frame_size > vsi->rx_buf_len) {
2615 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2616 		return -EOPNOTSUPP;
2617 	}
2618 
2619 	/* need to stop netdev while setting up the program for Rx rings */
2620 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2621 		ret = ice_down(vsi);
2622 		if (ret) {
2623 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2624 			return ret;
2625 		}
2626 	}
2627 
2628 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2629 		vsi->num_xdp_txq = vsi->alloc_rxq;
2630 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2631 		if (xdp_ring_err)
2632 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2633 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2634 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2635 		if (xdp_ring_err)
2636 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2637 	} else {
2638 		ice_vsi_assign_bpf_prog(vsi, prog);
2639 	}
2640 
2641 	if (if_running)
2642 		ret = ice_up(vsi);
2643 
2644 	if (!ret && prog)
2645 		ice_vsi_rx_napi_schedule(vsi);
2646 
2647 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2648 }
2649 
2650 /**
2651  * ice_xdp_safe_mode - XDP handler for safe mode
2652  * @dev: netdevice
2653  * @xdp: XDP command
2654  */
2655 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2656 			     struct netdev_bpf *xdp)
2657 {
2658 	NL_SET_ERR_MSG_MOD(xdp->extack,
2659 			   "Please provide working DDP firmware package in order to use XDP\n"
2660 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2661 	return -EOPNOTSUPP;
2662 }
2663 
2664 /**
2665  * ice_xdp - implements XDP handler
2666  * @dev: netdevice
2667  * @xdp: XDP command
2668  */
2669 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2670 {
2671 	struct ice_netdev_priv *np = netdev_priv(dev);
2672 	struct ice_vsi *vsi = np->vsi;
2673 
2674 	if (vsi->type != ICE_VSI_PF) {
2675 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2676 		return -EINVAL;
2677 	}
2678 
2679 	switch (xdp->command) {
2680 	case XDP_SETUP_PROG:
2681 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2682 	case XDP_SETUP_XSK_POOL:
2683 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2684 					  xdp->xsk.queue_id);
2685 	default:
2686 		return -EINVAL;
2687 	}
2688 }
2689 
2690 /**
2691  * ice_ena_misc_vector - enable the non-queue interrupts
2692  * @pf: board private structure
2693  */
2694 static void ice_ena_misc_vector(struct ice_pf *pf)
2695 {
2696 	struct ice_hw *hw = &pf->hw;
2697 	u32 val;
2698 
2699 	/* Disable anti-spoof detection interrupt to prevent spurious event
2700 	 * interrupts during a function reset. Anti-spoof functionally is
2701 	 * still supported.
2702 	 */
2703 	val = rd32(hw, GL_MDCK_TX_TDPU);
2704 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2705 	wr32(hw, GL_MDCK_TX_TDPU, val);
2706 
2707 	/* clear things first */
2708 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2709 	rd32(hw, PFINT_OICR);		/* read to clear */
2710 
2711 	val = (PFINT_OICR_ECC_ERR_M |
2712 	       PFINT_OICR_MAL_DETECT_M |
2713 	       PFINT_OICR_GRST_M |
2714 	       PFINT_OICR_PCI_EXCEPTION_M |
2715 	       PFINT_OICR_VFLR_M |
2716 	       PFINT_OICR_HMC_ERR_M |
2717 	       PFINT_OICR_PE_PUSH_M |
2718 	       PFINT_OICR_PE_CRITERR_M);
2719 
2720 	wr32(hw, PFINT_OICR_ENA, val);
2721 
2722 	/* SW_ITR_IDX = 0, but don't change INTENA */
2723 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2724 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2725 }
2726 
2727 /**
2728  * ice_misc_intr - misc interrupt handler
2729  * @irq: interrupt number
2730  * @data: pointer to a q_vector
2731  */
2732 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2733 {
2734 	struct ice_pf *pf = (struct ice_pf *)data;
2735 	struct ice_hw *hw = &pf->hw;
2736 	irqreturn_t ret = IRQ_NONE;
2737 	struct device *dev;
2738 	u32 oicr, ena_mask;
2739 
2740 	dev = ice_pf_to_dev(pf);
2741 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2742 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2743 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2744 
2745 	oicr = rd32(hw, PFINT_OICR);
2746 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2747 
2748 	if (oicr & PFINT_OICR_SWINT_M) {
2749 		ena_mask &= ~PFINT_OICR_SWINT_M;
2750 		pf->sw_int_count++;
2751 	}
2752 
2753 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2754 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2755 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2756 	}
2757 	if (oicr & PFINT_OICR_VFLR_M) {
2758 		/* disable any further VFLR event notifications */
2759 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2760 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2761 
2762 			reg &= ~PFINT_OICR_VFLR_M;
2763 			wr32(hw, PFINT_OICR_ENA, reg);
2764 		} else {
2765 			ena_mask &= ~PFINT_OICR_VFLR_M;
2766 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2767 		}
2768 	}
2769 
2770 	if (oicr & PFINT_OICR_GRST_M) {
2771 		u32 reset;
2772 
2773 		/* we have a reset warning */
2774 		ena_mask &= ~PFINT_OICR_GRST_M;
2775 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2776 			GLGEN_RSTAT_RESET_TYPE_S;
2777 
2778 		if (reset == ICE_RESET_CORER)
2779 			pf->corer_count++;
2780 		else if (reset == ICE_RESET_GLOBR)
2781 			pf->globr_count++;
2782 		else if (reset == ICE_RESET_EMPR)
2783 			pf->empr_count++;
2784 		else
2785 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2786 
2787 		/* If a reset cycle isn't already in progress, we set a bit in
2788 		 * pf->state so that the service task can start a reset/rebuild.
2789 		 */
2790 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2791 			if (reset == ICE_RESET_CORER)
2792 				set_bit(ICE_CORER_RECV, pf->state);
2793 			else if (reset == ICE_RESET_GLOBR)
2794 				set_bit(ICE_GLOBR_RECV, pf->state);
2795 			else
2796 				set_bit(ICE_EMPR_RECV, pf->state);
2797 
2798 			/* There are couple of different bits at play here.
2799 			 * hw->reset_ongoing indicates whether the hardware is
2800 			 * in reset. This is set to true when a reset interrupt
2801 			 * is received and set back to false after the driver
2802 			 * has determined that the hardware is out of reset.
2803 			 *
2804 			 * ICE_RESET_OICR_RECV in pf->state indicates
2805 			 * that a post reset rebuild is required before the
2806 			 * driver is operational again. This is set above.
2807 			 *
2808 			 * As this is the start of the reset/rebuild cycle, set
2809 			 * both to indicate that.
2810 			 */
2811 			hw->reset_ongoing = true;
2812 		}
2813 	}
2814 
2815 	if (oicr & PFINT_OICR_TSYN_TX_M) {
2816 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2817 		ice_ptp_process_ts(pf);
2818 	}
2819 
2820 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
2821 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2822 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
2823 
2824 		/* Save EVENTs from GTSYN register */
2825 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
2826 						     GLTSYN_STAT_EVENT1_M |
2827 						     GLTSYN_STAT_EVENT2_M);
2828 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
2829 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
2830 	}
2831 
2832 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2833 	if (oicr & ICE_AUX_CRIT_ERR) {
2834 		struct iidc_event *event;
2835 
2836 		ena_mask &= ~ICE_AUX_CRIT_ERR;
2837 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2838 		if (event) {
2839 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2840 			/* report the entire OICR value to AUX driver */
2841 			event->reg = oicr;
2842 			ice_send_event_to_aux(pf, event);
2843 			kfree(event);
2844 		}
2845 	}
2846 
2847 	/* Report any remaining unexpected interrupts */
2848 	oicr &= ena_mask;
2849 	if (oicr) {
2850 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2851 		/* If a critical error is pending there is no choice but to
2852 		 * reset the device.
2853 		 */
2854 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2855 			    PFINT_OICR_ECC_ERR_M)) {
2856 			set_bit(ICE_PFR_REQ, pf->state);
2857 			ice_service_task_schedule(pf);
2858 		}
2859 	}
2860 	ret = IRQ_HANDLED;
2861 
2862 	ice_service_task_schedule(pf);
2863 	ice_irq_dynamic_ena(hw, NULL, NULL);
2864 
2865 	return ret;
2866 }
2867 
2868 /**
2869  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2870  * @hw: pointer to HW structure
2871  */
2872 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2873 {
2874 	/* disable Admin queue Interrupt causes */
2875 	wr32(hw, PFINT_FW_CTL,
2876 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2877 
2878 	/* disable Mailbox queue Interrupt causes */
2879 	wr32(hw, PFINT_MBX_CTL,
2880 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2881 
2882 	wr32(hw, PFINT_SB_CTL,
2883 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2884 
2885 	/* disable Control queue Interrupt causes */
2886 	wr32(hw, PFINT_OICR_CTL,
2887 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2888 
2889 	ice_flush(hw);
2890 }
2891 
2892 /**
2893  * ice_free_irq_msix_misc - Unroll misc vector setup
2894  * @pf: board private structure
2895  */
2896 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2897 {
2898 	struct ice_hw *hw = &pf->hw;
2899 
2900 	ice_dis_ctrlq_interrupts(hw);
2901 
2902 	/* disable OICR interrupt */
2903 	wr32(hw, PFINT_OICR_ENA, 0);
2904 	ice_flush(hw);
2905 
2906 	if (pf->msix_entries) {
2907 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2908 		devm_free_irq(ice_pf_to_dev(pf),
2909 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2910 	}
2911 
2912 	pf->num_avail_sw_msix += 1;
2913 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2914 }
2915 
2916 /**
2917  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2918  * @hw: pointer to HW structure
2919  * @reg_idx: HW vector index to associate the control queue interrupts with
2920  */
2921 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2922 {
2923 	u32 val;
2924 
2925 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2926 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2927 	wr32(hw, PFINT_OICR_CTL, val);
2928 
2929 	/* enable Admin queue Interrupt causes */
2930 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2931 	       PFINT_FW_CTL_CAUSE_ENA_M);
2932 	wr32(hw, PFINT_FW_CTL, val);
2933 
2934 	/* enable Mailbox queue Interrupt causes */
2935 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2936 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2937 	wr32(hw, PFINT_MBX_CTL, val);
2938 
2939 	/* This enables Sideband queue Interrupt causes */
2940 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
2941 	       PFINT_SB_CTL_CAUSE_ENA_M);
2942 	wr32(hw, PFINT_SB_CTL, val);
2943 
2944 	ice_flush(hw);
2945 }
2946 
2947 /**
2948  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2949  * @pf: board private structure
2950  *
2951  * This sets up the handler for MSIX 0, which is used to manage the
2952  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2953  * when in MSI or Legacy interrupt mode.
2954  */
2955 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2956 {
2957 	struct device *dev = ice_pf_to_dev(pf);
2958 	struct ice_hw *hw = &pf->hw;
2959 	int oicr_idx, err = 0;
2960 
2961 	if (!pf->int_name[0])
2962 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2963 			 dev_driver_string(dev), dev_name(dev));
2964 
2965 	/* Do not request IRQ but do enable OICR interrupt since settings are
2966 	 * lost during reset. Note that this function is called only during
2967 	 * rebuild path and not while reset is in progress.
2968 	 */
2969 	if (ice_is_reset_in_progress(pf->state))
2970 		goto skip_req_irq;
2971 
2972 	/* reserve one vector in irq_tracker for misc interrupts */
2973 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2974 	if (oicr_idx < 0)
2975 		return oicr_idx;
2976 
2977 	pf->num_avail_sw_msix -= 1;
2978 	pf->oicr_idx = (u16)oicr_idx;
2979 
2980 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2981 			       ice_misc_intr, 0, pf->int_name, pf);
2982 	if (err) {
2983 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2984 			pf->int_name, err);
2985 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2986 		pf->num_avail_sw_msix += 1;
2987 		return err;
2988 	}
2989 
2990 skip_req_irq:
2991 	ice_ena_misc_vector(pf);
2992 
2993 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2994 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2995 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2996 
2997 	ice_flush(hw);
2998 	ice_irq_dynamic_ena(hw, NULL, NULL);
2999 
3000 	return 0;
3001 }
3002 
3003 /**
3004  * ice_napi_add - register NAPI handler for the VSI
3005  * @vsi: VSI for which NAPI handler is to be registered
3006  *
3007  * This function is only called in the driver's load path. Registering the NAPI
3008  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3009  * reset/rebuild, etc.)
3010  */
3011 static void ice_napi_add(struct ice_vsi *vsi)
3012 {
3013 	int v_idx;
3014 
3015 	if (!vsi->netdev)
3016 		return;
3017 
3018 	ice_for_each_q_vector(vsi, v_idx)
3019 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3020 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3021 }
3022 
3023 /**
3024  * ice_set_ops - set netdev and ethtools ops for the given netdev
3025  * @netdev: netdev instance
3026  */
3027 static void ice_set_ops(struct net_device *netdev)
3028 {
3029 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3030 
3031 	if (ice_is_safe_mode(pf)) {
3032 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3033 		ice_set_ethtool_safe_mode_ops(netdev);
3034 		return;
3035 	}
3036 
3037 	netdev->netdev_ops = &ice_netdev_ops;
3038 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3039 	ice_set_ethtool_ops(netdev);
3040 }
3041 
3042 /**
3043  * ice_set_netdev_features - set features for the given netdev
3044  * @netdev: netdev instance
3045  */
3046 static void ice_set_netdev_features(struct net_device *netdev)
3047 {
3048 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3049 	netdev_features_t csumo_features;
3050 	netdev_features_t vlano_features;
3051 	netdev_features_t dflt_features;
3052 	netdev_features_t tso_features;
3053 
3054 	if (ice_is_safe_mode(pf)) {
3055 		/* safe mode */
3056 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3057 		netdev->hw_features = netdev->features;
3058 		return;
3059 	}
3060 
3061 	dflt_features = NETIF_F_SG	|
3062 			NETIF_F_HIGHDMA	|
3063 			NETIF_F_NTUPLE	|
3064 			NETIF_F_RXHASH;
3065 
3066 	csumo_features = NETIF_F_RXCSUM	  |
3067 			 NETIF_F_IP_CSUM  |
3068 			 NETIF_F_SCTP_CRC |
3069 			 NETIF_F_IPV6_CSUM;
3070 
3071 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3072 			 NETIF_F_HW_VLAN_CTAG_TX     |
3073 			 NETIF_F_HW_VLAN_CTAG_RX;
3074 
3075 	tso_features = NETIF_F_TSO			|
3076 		       NETIF_F_TSO_ECN			|
3077 		       NETIF_F_TSO6			|
3078 		       NETIF_F_GSO_GRE			|
3079 		       NETIF_F_GSO_UDP_TUNNEL		|
3080 		       NETIF_F_GSO_GRE_CSUM		|
3081 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3082 		       NETIF_F_GSO_PARTIAL		|
3083 		       NETIF_F_GSO_IPXIP4		|
3084 		       NETIF_F_GSO_IPXIP6		|
3085 		       NETIF_F_GSO_UDP_L4;
3086 
3087 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3088 					NETIF_F_GSO_GRE_CSUM;
3089 	/* set features that user can change */
3090 	netdev->hw_features = dflt_features | csumo_features |
3091 			      vlano_features | tso_features;
3092 
3093 	/* add support for HW_CSUM on packets with MPLS header */
3094 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3095 
3096 	/* enable features */
3097 	netdev->features |= netdev->hw_features;
3098 	/* encap and VLAN devices inherit default, csumo and tso features */
3099 	netdev->hw_enc_features |= dflt_features | csumo_features |
3100 				   tso_features;
3101 	netdev->vlan_features |= dflt_features | csumo_features |
3102 				 tso_features;
3103 }
3104 
3105 /**
3106  * ice_cfg_netdev - Allocate, configure and register a netdev
3107  * @vsi: the VSI associated with the new netdev
3108  *
3109  * Returns 0 on success, negative value on failure
3110  */
3111 static int ice_cfg_netdev(struct ice_vsi *vsi)
3112 {
3113 	struct ice_netdev_priv *np;
3114 	struct net_device *netdev;
3115 	u8 mac_addr[ETH_ALEN];
3116 
3117 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3118 				    vsi->alloc_rxq);
3119 	if (!netdev)
3120 		return -ENOMEM;
3121 
3122 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3123 	vsi->netdev = netdev;
3124 	np = netdev_priv(netdev);
3125 	np->vsi = vsi;
3126 
3127 	ice_set_netdev_features(netdev);
3128 
3129 	ice_set_ops(netdev);
3130 
3131 	if (vsi->type == ICE_VSI_PF) {
3132 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3133 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3134 		ether_addr_copy(netdev->dev_addr, mac_addr);
3135 		ether_addr_copy(netdev->perm_addr, mac_addr);
3136 	}
3137 
3138 	netdev->priv_flags |= IFF_UNICAST_FLT;
3139 
3140 	/* Setup netdev TC information */
3141 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3142 
3143 	/* setup watchdog timeout value to be 5 second */
3144 	netdev->watchdog_timeo = 5 * HZ;
3145 
3146 	netdev->min_mtu = ETH_MIN_MTU;
3147 	netdev->max_mtu = ICE_MAX_MTU;
3148 
3149 	return 0;
3150 }
3151 
3152 /**
3153  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3154  * @lut: Lookup table
3155  * @rss_table_size: Lookup table size
3156  * @rss_size: Range of queue number for hashing
3157  */
3158 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3159 {
3160 	u16 i;
3161 
3162 	for (i = 0; i < rss_table_size; i++)
3163 		lut[i] = i % rss_size;
3164 }
3165 
3166 /**
3167  * ice_pf_vsi_setup - Set up a PF VSI
3168  * @pf: board private structure
3169  * @pi: pointer to the port_info instance
3170  *
3171  * Returns pointer to the successfully allocated VSI software struct
3172  * on success, otherwise returns NULL on failure.
3173  */
3174 static struct ice_vsi *
3175 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3176 {
3177 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3178 }
3179 
3180 /**
3181  * ice_ctrl_vsi_setup - Set up a control VSI
3182  * @pf: board private structure
3183  * @pi: pointer to the port_info instance
3184  *
3185  * Returns pointer to the successfully allocated VSI software struct
3186  * on success, otherwise returns NULL on failure.
3187  */
3188 static struct ice_vsi *
3189 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3190 {
3191 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3192 }
3193 
3194 /**
3195  * ice_lb_vsi_setup - Set up a loopback VSI
3196  * @pf: board private structure
3197  * @pi: pointer to the port_info instance
3198  *
3199  * Returns pointer to the successfully allocated VSI software struct
3200  * on success, otherwise returns NULL on failure.
3201  */
3202 struct ice_vsi *
3203 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3204 {
3205 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3206 }
3207 
3208 /**
3209  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3210  * @netdev: network interface to be adjusted
3211  * @proto: unused protocol
3212  * @vid: VLAN ID to be added
3213  *
3214  * net_device_ops implementation for adding VLAN IDs
3215  */
3216 static int
3217 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3218 		    u16 vid)
3219 {
3220 	struct ice_netdev_priv *np = netdev_priv(netdev);
3221 	struct ice_vsi *vsi = np->vsi;
3222 	int ret;
3223 
3224 	/* VLAN 0 is added by default during load/reset */
3225 	if (!vid)
3226 		return 0;
3227 
3228 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3229 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3230 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3231 		if (ret)
3232 			return ret;
3233 	}
3234 
3235 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3236 	 * packets aren't pruned by the device's internal switch on Rx
3237 	 */
3238 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3239 	if (!ret)
3240 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3241 
3242 	return ret;
3243 }
3244 
3245 /**
3246  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3247  * @netdev: network interface to be adjusted
3248  * @proto: unused protocol
3249  * @vid: VLAN ID to be removed
3250  *
3251  * net_device_ops implementation for removing VLAN IDs
3252  */
3253 static int
3254 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3255 		     u16 vid)
3256 {
3257 	struct ice_netdev_priv *np = netdev_priv(netdev);
3258 	struct ice_vsi *vsi = np->vsi;
3259 	int ret;
3260 
3261 	/* don't allow removal of VLAN 0 */
3262 	if (!vid)
3263 		return 0;
3264 
3265 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3266 	 * information
3267 	 */
3268 	ret = ice_vsi_kill_vlan(vsi, vid);
3269 	if (ret)
3270 		return ret;
3271 
3272 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3273 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3274 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3275 
3276 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3277 	return ret;
3278 }
3279 
3280 /**
3281  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3282  * @pf: board private structure
3283  *
3284  * Returns 0 on success, negative value on failure
3285  */
3286 static int ice_setup_pf_sw(struct ice_pf *pf)
3287 {
3288 	struct ice_vsi *vsi;
3289 	int status = 0;
3290 
3291 	if (ice_is_reset_in_progress(pf->state))
3292 		return -EBUSY;
3293 
3294 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3295 	if (!vsi)
3296 		return -ENOMEM;
3297 
3298 	status = ice_cfg_netdev(vsi);
3299 	if (status) {
3300 		status = -ENODEV;
3301 		goto unroll_vsi_setup;
3302 	}
3303 	/* netdev has to be configured before setting frame size */
3304 	ice_vsi_cfg_frame_size(vsi);
3305 
3306 	/* Setup DCB netlink interface */
3307 	ice_dcbnl_setup(vsi);
3308 
3309 	/* registering the NAPI handler requires both the queues and
3310 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3311 	 * and ice_cfg_netdev() respectively
3312 	 */
3313 	ice_napi_add(vsi);
3314 
3315 	status = ice_set_cpu_rx_rmap(vsi);
3316 	if (status) {
3317 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3318 			vsi->vsi_num, status);
3319 		status = -EINVAL;
3320 		goto unroll_napi_add;
3321 	}
3322 	status = ice_init_mac_fltr(pf);
3323 	if (status)
3324 		goto free_cpu_rx_map;
3325 
3326 	return status;
3327 
3328 free_cpu_rx_map:
3329 	ice_free_cpu_rx_rmap(vsi);
3330 
3331 unroll_napi_add:
3332 	if (vsi) {
3333 		ice_napi_del(vsi);
3334 		if (vsi->netdev) {
3335 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3336 			free_netdev(vsi->netdev);
3337 			vsi->netdev = NULL;
3338 		}
3339 	}
3340 
3341 unroll_vsi_setup:
3342 	ice_vsi_release(vsi);
3343 	return status;
3344 }
3345 
3346 /**
3347  * ice_get_avail_q_count - Get count of queues in use
3348  * @pf_qmap: bitmap to get queue use count from
3349  * @lock: pointer to a mutex that protects access to pf_qmap
3350  * @size: size of the bitmap
3351  */
3352 static u16
3353 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3354 {
3355 	unsigned long bit;
3356 	u16 count = 0;
3357 
3358 	mutex_lock(lock);
3359 	for_each_clear_bit(bit, pf_qmap, size)
3360 		count++;
3361 	mutex_unlock(lock);
3362 
3363 	return count;
3364 }
3365 
3366 /**
3367  * ice_get_avail_txq_count - Get count of Tx queues in use
3368  * @pf: pointer to an ice_pf instance
3369  */
3370 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3371 {
3372 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3373 				     pf->max_pf_txqs);
3374 }
3375 
3376 /**
3377  * ice_get_avail_rxq_count - Get count of Rx queues in use
3378  * @pf: pointer to an ice_pf instance
3379  */
3380 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3381 {
3382 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3383 				     pf->max_pf_rxqs);
3384 }
3385 
3386 /**
3387  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3388  * @pf: board private structure to initialize
3389  */
3390 static void ice_deinit_pf(struct ice_pf *pf)
3391 {
3392 	ice_service_task_stop(pf);
3393 	mutex_destroy(&pf->sw_mutex);
3394 	mutex_destroy(&pf->tc_mutex);
3395 	mutex_destroy(&pf->avail_q_mutex);
3396 
3397 	if (pf->avail_txqs) {
3398 		bitmap_free(pf->avail_txqs);
3399 		pf->avail_txqs = NULL;
3400 	}
3401 
3402 	if (pf->avail_rxqs) {
3403 		bitmap_free(pf->avail_rxqs);
3404 		pf->avail_rxqs = NULL;
3405 	}
3406 
3407 	if (pf->ptp.clock)
3408 		ptp_clock_unregister(pf->ptp.clock);
3409 }
3410 
3411 /**
3412  * ice_set_pf_caps - set PFs capability flags
3413  * @pf: pointer to the PF instance
3414  */
3415 static void ice_set_pf_caps(struct ice_pf *pf)
3416 {
3417 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3418 
3419 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3420 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3421 	if (func_caps->common_cap.rdma) {
3422 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3423 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3424 	}
3425 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3426 	if (func_caps->common_cap.dcb)
3427 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3428 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3429 	if (func_caps->common_cap.sr_iov_1_1) {
3430 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3431 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3432 					      ICE_MAX_VF_COUNT);
3433 	}
3434 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3435 	if (func_caps->common_cap.rss_table_size)
3436 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3437 
3438 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3439 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3440 		u16 unused;
3441 
3442 		/* ctrl_vsi_idx will be set to a valid value when flow director
3443 		 * is setup by ice_init_fdir
3444 		 */
3445 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3446 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3447 		/* force guaranteed filter pool for PF */
3448 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3449 				       func_caps->fd_fltr_guar);
3450 		/* force shared filter pool for PF */
3451 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3452 				       func_caps->fd_fltr_best_effort);
3453 	}
3454 
3455 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3456 	if (func_caps->common_cap.ieee_1588)
3457 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3458 
3459 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3460 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3461 }
3462 
3463 /**
3464  * ice_init_pf - Initialize general software structures (struct ice_pf)
3465  * @pf: board private structure to initialize
3466  */
3467 static int ice_init_pf(struct ice_pf *pf)
3468 {
3469 	ice_set_pf_caps(pf);
3470 
3471 	mutex_init(&pf->sw_mutex);
3472 	mutex_init(&pf->tc_mutex);
3473 
3474 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3475 	spin_lock_init(&pf->aq_wait_lock);
3476 	init_waitqueue_head(&pf->aq_wait_queue);
3477 
3478 	init_waitqueue_head(&pf->reset_wait_queue);
3479 
3480 	/* setup service timer and periodic service task */
3481 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3482 	pf->serv_tmr_period = HZ;
3483 	INIT_WORK(&pf->serv_task, ice_service_task);
3484 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3485 
3486 	mutex_init(&pf->avail_q_mutex);
3487 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3488 	if (!pf->avail_txqs)
3489 		return -ENOMEM;
3490 
3491 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3492 	if (!pf->avail_rxqs) {
3493 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3494 		pf->avail_txqs = NULL;
3495 		return -ENOMEM;
3496 	}
3497 
3498 	return 0;
3499 }
3500 
3501 /**
3502  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3503  * @pf: board private structure
3504  *
3505  * compute the number of MSIX vectors required (v_budget) and request from
3506  * the OS. Return the number of vectors reserved or negative on failure
3507  */
3508 static int ice_ena_msix_range(struct ice_pf *pf)
3509 {
3510 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3511 	struct device *dev = ice_pf_to_dev(pf);
3512 	int needed, err, i;
3513 
3514 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3515 	num_cpus = num_online_cpus();
3516 
3517 	/* reserve for LAN miscellaneous handler */
3518 	needed = ICE_MIN_LAN_OICR_MSIX;
3519 	if (v_left < needed)
3520 		goto no_hw_vecs_left_err;
3521 	v_budget += needed;
3522 	v_left -= needed;
3523 
3524 	/* reserve for flow director */
3525 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3526 		needed = ICE_FDIR_MSIX;
3527 		if (v_left < needed)
3528 			goto no_hw_vecs_left_err;
3529 		v_budget += needed;
3530 		v_left -= needed;
3531 	}
3532 
3533 	/* total used for non-traffic vectors */
3534 	v_other = v_budget;
3535 
3536 	/* reserve vectors for LAN traffic */
3537 	needed = num_cpus;
3538 	if (v_left < needed)
3539 		goto no_hw_vecs_left_err;
3540 	pf->num_lan_msix = needed;
3541 	v_budget += needed;
3542 	v_left -= needed;
3543 
3544 	/* reserve vectors for RDMA auxiliary driver */
3545 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3546 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3547 		if (v_left < needed)
3548 			goto no_hw_vecs_left_err;
3549 		pf->num_rdma_msix = needed;
3550 		v_budget += needed;
3551 		v_left -= needed;
3552 	}
3553 
3554 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3555 					sizeof(*pf->msix_entries), GFP_KERNEL);
3556 	if (!pf->msix_entries) {
3557 		err = -ENOMEM;
3558 		goto exit_err;
3559 	}
3560 
3561 	for (i = 0; i < v_budget; i++)
3562 		pf->msix_entries[i].entry = i;
3563 
3564 	/* actually reserve the vectors */
3565 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3566 					 ICE_MIN_MSIX, v_budget);
3567 	if (v_actual < 0) {
3568 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3569 		err = v_actual;
3570 		goto msix_err;
3571 	}
3572 
3573 	if (v_actual < v_budget) {
3574 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3575 			 v_budget, v_actual);
3576 
3577 		if (v_actual < ICE_MIN_MSIX) {
3578 			/* error if we can't get minimum vectors */
3579 			pci_disable_msix(pf->pdev);
3580 			err = -ERANGE;
3581 			goto msix_err;
3582 		} else {
3583 			int v_remain = v_actual - v_other;
3584 			int v_rdma = 0, v_min_rdma = 0;
3585 
3586 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3587 				/* Need at least 1 interrupt in addition to
3588 				 * AEQ MSIX
3589 				 */
3590 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3591 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3592 			}
3593 
3594 			if (v_actual == ICE_MIN_MSIX ||
3595 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3596 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3597 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3598 
3599 				pf->num_rdma_msix = 0;
3600 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3601 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3602 				   (v_remain - v_rdma < v_rdma)) {
3603 				/* Support minimum RDMA and give remaining
3604 				 * vectors to LAN MSIX
3605 				 */
3606 				pf->num_rdma_msix = v_min_rdma;
3607 				pf->num_lan_msix = v_remain - v_min_rdma;
3608 			} else {
3609 				/* Split remaining MSIX with RDMA after
3610 				 * accounting for AEQ MSIX
3611 				 */
3612 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3613 						    ICE_RDMA_NUM_AEQ_MSIX;
3614 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3615 			}
3616 
3617 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3618 				   pf->num_lan_msix);
3619 
3620 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3621 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3622 					   pf->num_rdma_msix);
3623 		}
3624 	}
3625 
3626 	return v_actual;
3627 
3628 msix_err:
3629 	devm_kfree(dev, pf->msix_entries);
3630 	goto exit_err;
3631 
3632 no_hw_vecs_left_err:
3633 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3634 		needed, v_left);
3635 	err = -ERANGE;
3636 exit_err:
3637 	pf->num_rdma_msix = 0;
3638 	pf->num_lan_msix = 0;
3639 	return err;
3640 }
3641 
3642 /**
3643  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3644  * @pf: board private structure
3645  */
3646 static void ice_dis_msix(struct ice_pf *pf)
3647 {
3648 	pci_disable_msix(pf->pdev);
3649 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3650 	pf->msix_entries = NULL;
3651 }
3652 
3653 /**
3654  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3655  * @pf: board private structure
3656  */
3657 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3658 {
3659 	ice_dis_msix(pf);
3660 
3661 	if (pf->irq_tracker) {
3662 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3663 		pf->irq_tracker = NULL;
3664 	}
3665 }
3666 
3667 /**
3668  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3669  * @pf: board private structure to initialize
3670  */
3671 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3672 {
3673 	int vectors;
3674 
3675 	vectors = ice_ena_msix_range(pf);
3676 
3677 	if (vectors < 0)
3678 		return vectors;
3679 
3680 	/* set up vector assignment tracking */
3681 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3682 				       struct_size(pf->irq_tracker, list, vectors),
3683 				       GFP_KERNEL);
3684 	if (!pf->irq_tracker) {
3685 		ice_dis_msix(pf);
3686 		return -ENOMEM;
3687 	}
3688 
3689 	/* populate SW interrupts pool with number of OS granted IRQs. */
3690 	pf->num_avail_sw_msix = (u16)vectors;
3691 	pf->irq_tracker->num_entries = (u16)vectors;
3692 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3693 
3694 	return 0;
3695 }
3696 
3697 /**
3698  * ice_is_wol_supported - check if WoL is supported
3699  * @hw: pointer to hardware info
3700  *
3701  * Check if WoL is supported based on the HW configuration.
3702  * Returns true if NVM supports and enables WoL for this port, false otherwise
3703  */
3704 bool ice_is_wol_supported(struct ice_hw *hw)
3705 {
3706 	u16 wol_ctrl;
3707 
3708 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3709 	 * word) indicates WoL is not supported on the corresponding PF ID.
3710 	 */
3711 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3712 		return false;
3713 
3714 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3715 }
3716 
3717 /**
3718  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3719  * @vsi: VSI being changed
3720  * @new_rx: new number of Rx queues
3721  * @new_tx: new number of Tx queues
3722  *
3723  * Only change the number of queues if new_tx, or new_rx is non-0.
3724  *
3725  * Returns 0 on success.
3726  */
3727 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3728 {
3729 	struct ice_pf *pf = vsi->back;
3730 	int err = 0, timeout = 50;
3731 
3732 	if (!new_rx && !new_tx)
3733 		return -EINVAL;
3734 
3735 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3736 		timeout--;
3737 		if (!timeout)
3738 			return -EBUSY;
3739 		usleep_range(1000, 2000);
3740 	}
3741 
3742 	if (new_tx)
3743 		vsi->req_txq = (u16)new_tx;
3744 	if (new_rx)
3745 		vsi->req_rxq = (u16)new_rx;
3746 
3747 	/* set for the next time the netdev is started */
3748 	if (!netif_running(vsi->netdev)) {
3749 		ice_vsi_rebuild(vsi, false);
3750 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3751 		goto done;
3752 	}
3753 
3754 	ice_vsi_close(vsi);
3755 	ice_vsi_rebuild(vsi, false);
3756 	ice_pf_dcb_recfg(pf);
3757 	ice_vsi_open(vsi);
3758 done:
3759 	clear_bit(ICE_CFG_BUSY, pf->state);
3760 	return err;
3761 }
3762 
3763 /**
3764  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3765  * @pf: PF to configure
3766  *
3767  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3768  * VSI can still Tx/Rx VLAN tagged packets.
3769  */
3770 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3771 {
3772 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3773 	struct ice_vsi_ctx *ctxt;
3774 	enum ice_status status;
3775 	struct ice_hw *hw;
3776 
3777 	if (!vsi)
3778 		return;
3779 
3780 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3781 	if (!ctxt)
3782 		return;
3783 
3784 	hw = &pf->hw;
3785 	ctxt->info = vsi->info;
3786 
3787 	ctxt->info.valid_sections =
3788 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3789 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3790 			    ICE_AQ_VSI_PROP_SW_VALID);
3791 
3792 	/* disable VLAN anti-spoof */
3793 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3794 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3795 
3796 	/* disable VLAN pruning and keep all other settings */
3797 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3798 
3799 	/* allow all VLANs on Tx and don't strip on Rx */
3800 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3801 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3802 
3803 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3804 	if (status) {
3805 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3806 			ice_stat_str(status),
3807 			ice_aq_str(hw->adminq.sq_last_status));
3808 	} else {
3809 		vsi->info.sec_flags = ctxt->info.sec_flags;
3810 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3811 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3812 	}
3813 
3814 	kfree(ctxt);
3815 }
3816 
3817 /**
3818  * ice_log_pkg_init - log result of DDP package load
3819  * @hw: pointer to hardware info
3820  * @status: status of package load
3821  */
3822 static void
3823 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3824 {
3825 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3826 	struct device *dev = ice_pf_to_dev(pf);
3827 
3828 	switch (*status) {
3829 	case ICE_SUCCESS:
3830 		/* The package download AdminQ command returned success because
3831 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3832 		 * already a package loaded on the device.
3833 		 */
3834 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3835 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3836 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3837 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3838 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3839 			    sizeof(hw->pkg_name))) {
3840 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3841 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3842 					 hw->active_pkg_name,
3843 					 hw->active_pkg_ver.major,
3844 					 hw->active_pkg_ver.minor,
3845 					 hw->active_pkg_ver.update,
3846 					 hw->active_pkg_ver.draft);
3847 			else
3848 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3849 					 hw->active_pkg_name,
3850 					 hw->active_pkg_ver.major,
3851 					 hw->active_pkg_ver.minor,
3852 					 hw->active_pkg_ver.update,
3853 					 hw->active_pkg_ver.draft);
3854 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3855 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3856 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3857 				hw->active_pkg_name,
3858 				hw->active_pkg_ver.major,
3859 				hw->active_pkg_ver.minor,
3860 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3861 			*status = ICE_ERR_NOT_SUPPORTED;
3862 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3863 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3864 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3865 				 hw->active_pkg_name,
3866 				 hw->active_pkg_ver.major,
3867 				 hw->active_pkg_ver.minor,
3868 				 hw->active_pkg_ver.update,
3869 				 hw->active_pkg_ver.draft,
3870 				 hw->pkg_name,
3871 				 hw->pkg_ver.major,
3872 				 hw->pkg_ver.minor,
3873 				 hw->pkg_ver.update,
3874 				 hw->pkg_ver.draft);
3875 		} else {
3876 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3877 			*status = ICE_ERR_NOT_SUPPORTED;
3878 		}
3879 		break;
3880 	case ICE_ERR_FW_DDP_MISMATCH:
3881 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3882 		break;
3883 	case ICE_ERR_BUF_TOO_SHORT:
3884 	case ICE_ERR_CFG:
3885 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3886 		break;
3887 	case ICE_ERR_NOT_SUPPORTED:
3888 		/* Package File version not supported */
3889 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3890 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3891 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3892 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3893 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3894 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3895 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3896 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3897 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3898 		break;
3899 	case ICE_ERR_AQ_ERROR:
3900 		switch (hw->pkg_dwnld_status) {
3901 		case ICE_AQ_RC_ENOSEC:
3902 		case ICE_AQ_RC_EBADSIG:
3903 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3904 			return;
3905 		case ICE_AQ_RC_ESVN:
3906 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3907 			return;
3908 		case ICE_AQ_RC_EBADMAN:
3909 		case ICE_AQ_RC_EBADBUF:
3910 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3911 			/* poll for reset to complete */
3912 			if (ice_check_reset(hw))
3913 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3914 			return;
3915 		default:
3916 			break;
3917 		}
3918 		fallthrough;
3919 	default:
3920 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3921 			*status);
3922 		break;
3923 	}
3924 }
3925 
3926 /**
3927  * ice_load_pkg - load/reload the DDP Package file
3928  * @firmware: firmware structure when firmware requested or NULL for reload
3929  * @pf: pointer to the PF instance
3930  *
3931  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3932  * initialize HW tables.
3933  */
3934 static void
3935 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3936 {
3937 	enum ice_status status = ICE_ERR_PARAM;
3938 	struct device *dev = ice_pf_to_dev(pf);
3939 	struct ice_hw *hw = &pf->hw;
3940 
3941 	/* Load DDP Package */
3942 	if (firmware && !hw->pkg_copy) {
3943 		status = ice_copy_and_init_pkg(hw, firmware->data,
3944 					       firmware->size);
3945 		ice_log_pkg_init(hw, &status);
3946 	} else if (!firmware && hw->pkg_copy) {
3947 		/* Reload package during rebuild after CORER/GLOBR reset */
3948 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3949 		ice_log_pkg_init(hw, &status);
3950 	} else {
3951 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3952 	}
3953 
3954 	if (status) {
3955 		/* Safe Mode */
3956 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3957 		return;
3958 	}
3959 
3960 	/* Successful download package is the precondition for advanced
3961 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3962 	 */
3963 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3964 }
3965 
3966 /**
3967  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3968  * @pf: pointer to the PF structure
3969  *
3970  * There is no error returned here because the driver should be able to handle
3971  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3972  * specifically with Tx.
3973  */
3974 static void ice_verify_cacheline_size(struct ice_pf *pf)
3975 {
3976 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3977 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3978 			 ICE_CACHE_LINE_BYTES);
3979 }
3980 
3981 /**
3982  * ice_send_version - update firmware with driver version
3983  * @pf: PF struct
3984  *
3985  * Returns ICE_SUCCESS on success, else error code
3986  */
3987 static enum ice_status ice_send_version(struct ice_pf *pf)
3988 {
3989 	struct ice_driver_ver dv;
3990 
3991 	dv.major_ver = 0xff;
3992 	dv.minor_ver = 0xff;
3993 	dv.build_ver = 0xff;
3994 	dv.subbuild_ver = 0;
3995 	strscpy((char *)dv.driver_string, UTS_RELEASE,
3996 		sizeof(dv.driver_string));
3997 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3998 }
3999 
4000 /**
4001  * ice_init_fdir - Initialize flow director VSI and configuration
4002  * @pf: pointer to the PF instance
4003  *
4004  * returns 0 on success, negative on error
4005  */
4006 static int ice_init_fdir(struct ice_pf *pf)
4007 {
4008 	struct device *dev = ice_pf_to_dev(pf);
4009 	struct ice_vsi *ctrl_vsi;
4010 	int err;
4011 
4012 	/* Side Band Flow Director needs to have a control VSI.
4013 	 * Allocate it and store it in the PF.
4014 	 */
4015 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4016 	if (!ctrl_vsi) {
4017 		dev_dbg(dev, "could not create control VSI\n");
4018 		return -ENOMEM;
4019 	}
4020 
4021 	err = ice_vsi_open_ctrl(ctrl_vsi);
4022 	if (err) {
4023 		dev_dbg(dev, "could not open control VSI\n");
4024 		goto err_vsi_open;
4025 	}
4026 
4027 	mutex_init(&pf->hw.fdir_fltr_lock);
4028 
4029 	err = ice_fdir_create_dflt_rules(pf);
4030 	if (err)
4031 		goto err_fdir_rule;
4032 
4033 	return 0;
4034 
4035 err_fdir_rule:
4036 	ice_fdir_release_flows(&pf->hw);
4037 	ice_vsi_close(ctrl_vsi);
4038 err_vsi_open:
4039 	ice_vsi_release(ctrl_vsi);
4040 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4041 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4042 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4043 	}
4044 	return err;
4045 }
4046 
4047 /**
4048  * ice_get_opt_fw_name - return optional firmware file name or NULL
4049  * @pf: pointer to the PF instance
4050  */
4051 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4052 {
4053 	/* Optional firmware name same as default with additional dash
4054 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4055 	 */
4056 	struct pci_dev *pdev = pf->pdev;
4057 	char *opt_fw_filename;
4058 	u64 dsn;
4059 
4060 	/* Determine the name of the optional file using the DSN (two
4061 	 * dwords following the start of the DSN Capability).
4062 	 */
4063 	dsn = pci_get_dsn(pdev);
4064 	if (!dsn)
4065 		return NULL;
4066 
4067 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4068 	if (!opt_fw_filename)
4069 		return NULL;
4070 
4071 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4072 		 ICE_DDP_PKG_PATH, dsn);
4073 
4074 	return opt_fw_filename;
4075 }
4076 
4077 /**
4078  * ice_request_fw - Device initialization routine
4079  * @pf: pointer to the PF instance
4080  */
4081 static void ice_request_fw(struct ice_pf *pf)
4082 {
4083 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4084 	const struct firmware *firmware = NULL;
4085 	struct device *dev = ice_pf_to_dev(pf);
4086 	int err = 0;
4087 
4088 	/* optional device-specific DDP (if present) overrides the default DDP
4089 	 * package file. kernel logs a debug message if the file doesn't exist,
4090 	 * and warning messages for other errors.
4091 	 */
4092 	if (opt_fw_filename) {
4093 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4094 		if (err) {
4095 			kfree(opt_fw_filename);
4096 			goto dflt_pkg_load;
4097 		}
4098 
4099 		/* request for firmware was successful. Download to device */
4100 		ice_load_pkg(firmware, pf);
4101 		kfree(opt_fw_filename);
4102 		release_firmware(firmware);
4103 		return;
4104 	}
4105 
4106 dflt_pkg_load:
4107 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4108 	if (err) {
4109 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4110 		return;
4111 	}
4112 
4113 	/* request for firmware was successful. Download to device */
4114 	ice_load_pkg(firmware, pf);
4115 	release_firmware(firmware);
4116 }
4117 
4118 /**
4119  * ice_print_wake_reason - show the wake up cause in the log
4120  * @pf: pointer to the PF struct
4121  */
4122 static void ice_print_wake_reason(struct ice_pf *pf)
4123 {
4124 	u32 wus = pf->wakeup_reason;
4125 	const char *wake_str;
4126 
4127 	/* if no wake event, nothing to print */
4128 	if (!wus)
4129 		return;
4130 
4131 	if (wus & PFPM_WUS_LNKC_M)
4132 		wake_str = "Link\n";
4133 	else if (wus & PFPM_WUS_MAG_M)
4134 		wake_str = "Magic Packet\n";
4135 	else if (wus & PFPM_WUS_MNG_M)
4136 		wake_str = "Management\n";
4137 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4138 		wake_str = "Firmware Reset\n";
4139 	else
4140 		wake_str = "Unknown\n";
4141 
4142 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4143 }
4144 
4145 /**
4146  * ice_register_netdev - register netdev and devlink port
4147  * @pf: pointer to the PF struct
4148  */
4149 static int ice_register_netdev(struct ice_pf *pf)
4150 {
4151 	struct ice_vsi *vsi;
4152 	int err = 0;
4153 
4154 	vsi = ice_get_main_vsi(pf);
4155 	if (!vsi || !vsi->netdev)
4156 		return -EIO;
4157 
4158 	err = register_netdev(vsi->netdev);
4159 	if (err)
4160 		goto err_register_netdev;
4161 
4162 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4163 	netif_carrier_off(vsi->netdev);
4164 	netif_tx_stop_all_queues(vsi->netdev);
4165 	err = ice_devlink_create_port(vsi);
4166 	if (err)
4167 		goto err_devlink_create;
4168 
4169 	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
4170 
4171 	return 0;
4172 err_devlink_create:
4173 	unregister_netdev(vsi->netdev);
4174 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4175 err_register_netdev:
4176 	free_netdev(vsi->netdev);
4177 	vsi->netdev = NULL;
4178 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4179 	return err;
4180 }
4181 
4182 /**
4183  * ice_probe - Device initialization routine
4184  * @pdev: PCI device information struct
4185  * @ent: entry in ice_pci_tbl
4186  *
4187  * Returns 0 on success, negative on failure
4188  */
4189 static int
4190 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4191 {
4192 	struct device *dev = &pdev->dev;
4193 	struct ice_pf *pf;
4194 	struct ice_hw *hw;
4195 	int i, err;
4196 
4197 	/* this driver uses devres, see
4198 	 * Documentation/driver-api/driver-model/devres.rst
4199 	 */
4200 	err = pcim_enable_device(pdev);
4201 	if (err)
4202 		return err;
4203 
4204 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4205 	if (err) {
4206 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4207 		return err;
4208 	}
4209 
4210 	pf = ice_allocate_pf(dev);
4211 	if (!pf)
4212 		return -ENOMEM;
4213 
4214 	/* set up for high or low DMA */
4215 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4216 	if (err)
4217 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4218 	if (err) {
4219 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4220 		return err;
4221 	}
4222 
4223 	pci_enable_pcie_error_reporting(pdev);
4224 	pci_set_master(pdev);
4225 
4226 	pf->pdev = pdev;
4227 	pci_set_drvdata(pdev, pf);
4228 	set_bit(ICE_DOWN, pf->state);
4229 	/* Disable service task until DOWN bit is cleared */
4230 	set_bit(ICE_SERVICE_DIS, pf->state);
4231 
4232 	hw = &pf->hw;
4233 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4234 	pci_save_state(pdev);
4235 
4236 	hw->back = pf;
4237 	hw->vendor_id = pdev->vendor;
4238 	hw->device_id = pdev->device;
4239 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4240 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4241 	hw->subsystem_device_id = pdev->subsystem_device;
4242 	hw->bus.device = PCI_SLOT(pdev->devfn);
4243 	hw->bus.func = PCI_FUNC(pdev->devfn);
4244 	ice_set_ctrlq_len(hw);
4245 
4246 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4247 
4248 	err = ice_devlink_register(pf);
4249 	if (err) {
4250 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4251 		goto err_exit_unroll;
4252 	}
4253 
4254 #ifndef CONFIG_DYNAMIC_DEBUG
4255 	if (debug < -1)
4256 		hw->debug_mask = debug;
4257 #endif
4258 
4259 	err = ice_init_hw(hw);
4260 	if (err) {
4261 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4262 		err = -EIO;
4263 		goto err_exit_unroll;
4264 	}
4265 
4266 	ice_request_fw(pf);
4267 
4268 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4269 	 * set in pf->state, which will cause ice_is_safe_mode to return
4270 	 * true
4271 	 */
4272 	if (ice_is_safe_mode(pf)) {
4273 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4274 		/* we already got function/device capabilities but these don't
4275 		 * reflect what the driver needs to do in safe mode. Instead of
4276 		 * adding conditional logic everywhere to ignore these
4277 		 * device/function capabilities, override them.
4278 		 */
4279 		ice_set_safe_mode_caps(hw);
4280 	}
4281 
4282 	err = ice_init_pf(pf);
4283 	if (err) {
4284 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4285 		goto err_init_pf_unroll;
4286 	}
4287 
4288 	ice_devlink_init_regions(pf);
4289 
4290 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4291 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4292 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4293 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4294 	i = 0;
4295 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4296 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4297 			pf->hw.tnl.valid_count[TNL_VXLAN];
4298 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4299 			UDP_TUNNEL_TYPE_VXLAN;
4300 		i++;
4301 	}
4302 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4303 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4304 			pf->hw.tnl.valid_count[TNL_GENEVE];
4305 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4306 			UDP_TUNNEL_TYPE_GENEVE;
4307 		i++;
4308 	}
4309 
4310 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4311 	if (!pf->num_alloc_vsi) {
4312 		err = -EIO;
4313 		goto err_init_pf_unroll;
4314 	}
4315 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4316 		dev_warn(&pf->pdev->dev,
4317 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4318 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4319 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4320 	}
4321 
4322 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4323 			       GFP_KERNEL);
4324 	if (!pf->vsi) {
4325 		err = -ENOMEM;
4326 		goto err_init_pf_unroll;
4327 	}
4328 
4329 	err = ice_init_interrupt_scheme(pf);
4330 	if (err) {
4331 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4332 		err = -EIO;
4333 		goto err_init_vsi_unroll;
4334 	}
4335 
4336 	/* In case of MSIX we are going to setup the misc vector right here
4337 	 * to handle admin queue events etc. In case of legacy and MSI
4338 	 * the misc functionality and queue processing is combined in
4339 	 * the same vector and that gets setup at open.
4340 	 */
4341 	err = ice_req_irq_msix_misc(pf);
4342 	if (err) {
4343 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4344 		goto err_init_interrupt_unroll;
4345 	}
4346 
4347 	/* create switch struct for the switch element created by FW on boot */
4348 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4349 	if (!pf->first_sw) {
4350 		err = -ENOMEM;
4351 		goto err_msix_misc_unroll;
4352 	}
4353 
4354 	if (hw->evb_veb)
4355 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4356 	else
4357 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4358 
4359 	pf->first_sw->pf = pf;
4360 
4361 	/* record the sw_id available for later use */
4362 	pf->first_sw->sw_id = hw->port_info->sw_id;
4363 
4364 	err = ice_setup_pf_sw(pf);
4365 	if (err) {
4366 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4367 		goto err_alloc_sw_unroll;
4368 	}
4369 
4370 	clear_bit(ICE_SERVICE_DIS, pf->state);
4371 
4372 	/* tell the firmware we are up */
4373 	err = ice_send_version(pf);
4374 	if (err) {
4375 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4376 			UTS_RELEASE, err);
4377 		goto err_send_version_unroll;
4378 	}
4379 
4380 	/* since everything is good, start the service timer */
4381 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4382 
4383 	err = ice_init_link_events(pf->hw.port_info);
4384 	if (err) {
4385 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4386 		goto err_send_version_unroll;
4387 	}
4388 
4389 	/* not a fatal error if this fails */
4390 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4391 	if (err)
4392 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4393 
4394 	/* not a fatal error if this fails */
4395 	err = ice_update_link_info(pf->hw.port_info);
4396 	if (err)
4397 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4398 
4399 	ice_init_link_dflt_override(pf->hw.port_info);
4400 
4401 	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
4402 
4403 	/* if media available, initialize PHY settings */
4404 	if (pf->hw.port_info->phy.link_info.link_info &
4405 	    ICE_AQ_MEDIA_AVAILABLE) {
4406 		/* not a fatal error if this fails */
4407 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4408 		if (err)
4409 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4410 
4411 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4412 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4413 
4414 			if (vsi)
4415 				ice_configure_phy(vsi);
4416 		}
4417 	} else {
4418 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4419 	}
4420 
4421 	ice_verify_cacheline_size(pf);
4422 
4423 	/* Save wakeup reason register for later use */
4424 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4425 
4426 	/* check for a power management event */
4427 	ice_print_wake_reason(pf);
4428 
4429 	/* clear wake status, all bits */
4430 	wr32(hw, PFPM_WUS, U32_MAX);
4431 
4432 	/* Disable WoL at init, wait for user to enable */
4433 	device_set_wakeup_enable(dev, false);
4434 
4435 	if (ice_is_safe_mode(pf)) {
4436 		ice_set_safe_mode_vlan_cfg(pf);
4437 		goto probe_done;
4438 	}
4439 
4440 	/* initialize DDP driven features */
4441 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4442 		ice_ptp_init(pf);
4443 
4444 	/* Note: Flow director init failure is non-fatal to load */
4445 	if (ice_init_fdir(pf))
4446 		dev_err(dev, "could not initialize flow director\n");
4447 
4448 	/* Note: DCB init failure is non-fatal to load */
4449 	if (ice_init_pf_dcb(pf, false)) {
4450 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4451 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4452 	} else {
4453 		ice_cfg_lldp_mib_change(&pf->hw, true);
4454 	}
4455 
4456 	if (ice_init_lag(pf))
4457 		dev_warn(dev, "Failed to init link aggregation support\n");
4458 
4459 	/* print PCI link speed and width */
4460 	pcie_print_link_status(pf->pdev);
4461 
4462 probe_done:
4463 	err = ice_register_netdev(pf);
4464 	if (err)
4465 		goto err_netdev_reg;
4466 
4467 	/* ready to go, so clear down state bit */
4468 	clear_bit(ICE_DOWN, pf->state);
4469 	if (ice_is_aux_ena(pf)) {
4470 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4471 		if (pf->aux_idx < 0) {
4472 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4473 			err = -ENOMEM;
4474 			goto err_netdev_reg;
4475 		}
4476 
4477 		err = ice_init_rdma(pf);
4478 		if (err) {
4479 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4480 			err = -EIO;
4481 			goto err_init_aux_unroll;
4482 		}
4483 	} else {
4484 		dev_warn(dev, "RDMA is not supported on this device\n");
4485 	}
4486 
4487 	return 0;
4488 
4489 err_init_aux_unroll:
4490 	pf->adev = NULL;
4491 	ida_free(&ice_aux_ida, pf->aux_idx);
4492 err_netdev_reg:
4493 err_send_version_unroll:
4494 	ice_vsi_release_all(pf);
4495 err_alloc_sw_unroll:
4496 	set_bit(ICE_SERVICE_DIS, pf->state);
4497 	set_bit(ICE_DOWN, pf->state);
4498 	devm_kfree(dev, pf->first_sw);
4499 err_msix_misc_unroll:
4500 	ice_free_irq_msix_misc(pf);
4501 err_init_interrupt_unroll:
4502 	ice_clear_interrupt_scheme(pf);
4503 err_init_vsi_unroll:
4504 	devm_kfree(dev, pf->vsi);
4505 err_init_pf_unroll:
4506 	ice_deinit_pf(pf);
4507 	ice_devlink_destroy_regions(pf);
4508 	ice_deinit_hw(hw);
4509 err_exit_unroll:
4510 	ice_devlink_unregister(pf);
4511 	pci_disable_pcie_error_reporting(pdev);
4512 	pci_disable_device(pdev);
4513 	return err;
4514 }
4515 
4516 /**
4517  * ice_set_wake - enable or disable Wake on LAN
4518  * @pf: pointer to the PF struct
4519  *
4520  * Simple helper for WoL control
4521  */
4522 static void ice_set_wake(struct ice_pf *pf)
4523 {
4524 	struct ice_hw *hw = &pf->hw;
4525 	bool wol = pf->wol_ena;
4526 
4527 	/* clear wake state, otherwise new wake events won't fire */
4528 	wr32(hw, PFPM_WUS, U32_MAX);
4529 
4530 	/* enable / disable APM wake up, no RMW needed */
4531 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4532 
4533 	/* set magic packet filter enabled */
4534 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4535 }
4536 
4537 /**
4538  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4539  * @pf: pointer to the PF struct
4540  *
4541  * Issue firmware command to enable multicast magic wake, making
4542  * sure that any locally administered address (LAA) is used for
4543  * wake, and that PF reset doesn't undo the LAA.
4544  */
4545 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4546 {
4547 	struct device *dev = ice_pf_to_dev(pf);
4548 	struct ice_hw *hw = &pf->hw;
4549 	enum ice_status status;
4550 	u8 mac_addr[ETH_ALEN];
4551 	struct ice_vsi *vsi;
4552 	u8 flags;
4553 
4554 	if (!pf->wol_ena)
4555 		return;
4556 
4557 	vsi = ice_get_main_vsi(pf);
4558 	if (!vsi)
4559 		return;
4560 
4561 	/* Get current MAC address in case it's an LAA */
4562 	if (vsi->netdev)
4563 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4564 	else
4565 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4566 
4567 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4568 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4569 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4570 
4571 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4572 	if (status)
4573 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4574 			ice_stat_str(status),
4575 			ice_aq_str(hw->adminq.sq_last_status));
4576 }
4577 
4578 /**
4579  * ice_remove - Device removal routine
4580  * @pdev: PCI device information struct
4581  */
4582 static void ice_remove(struct pci_dev *pdev)
4583 {
4584 	struct ice_pf *pf = pci_get_drvdata(pdev);
4585 	int i;
4586 
4587 	if (!pf)
4588 		return;
4589 
4590 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4591 		if (!ice_is_reset_in_progress(pf->state))
4592 			break;
4593 		msleep(100);
4594 	}
4595 
4596 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4597 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4598 		ice_free_vfs(pf);
4599 	}
4600 
4601 	ice_service_task_stop(pf);
4602 
4603 	ice_aq_cancel_waiting_tasks(pf);
4604 	ice_unplug_aux_dev(pf);
4605 	ida_free(&ice_aux_ida, pf->aux_idx);
4606 	set_bit(ICE_DOWN, pf->state);
4607 
4608 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4609 	ice_deinit_lag(pf);
4610 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4611 		ice_ptp_release(pf);
4612 	if (!ice_is_safe_mode(pf))
4613 		ice_remove_arfs(pf);
4614 	ice_setup_mc_magic_wake(pf);
4615 	ice_vsi_release_all(pf);
4616 	ice_set_wake(pf);
4617 	ice_free_irq_msix_misc(pf);
4618 	ice_for_each_vsi(pf, i) {
4619 		if (!pf->vsi[i])
4620 			continue;
4621 		ice_vsi_free_q_vectors(pf->vsi[i]);
4622 	}
4623 	ice_deinit_pf(pf);
4624 	ice_devlink_destroy_regions(pf);
4625 	ice_deinit_hw(&pf->hw);
4626 	ice_devlink_unregister(pf);
4627 
4628 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4629 	 * do it via ice_schedule_reset() since there is no need to rebuild
4630 	 * and the service task is already stopped.
4631 	 */
4632 	ice_reset(&pf->hw, ICE_RESET_PFR);
4633 	pci_wait_for_pending_transaction(pdev);
4634 	ice_clear_interrupt_scheme(pf);
4635 	pci_disable_pcie_error_reporting(pdev);
4636 	pci_disable_device(pdev);
4637 }
4638 
4639 /**
4640  * ice_shutdown - PCI callback for shutting down device
4641  * @pdev: PCI device information struct
4642  */
4643 static void ice_shutdown(struct pci_dev *pdev)
4644 {
4645 	struct ice_pf *pf = pci_get_drvdata(pdev);
4646 
4647 	ice_remove(pdev);
4648 
4649 	if (system_state == SYSTEM_POWER_OFF) {
4650 		pci_wake_from_d3(pdev, pf->wol_ena);
4651 		pci_set_power_state(pdev, PCI_D3hot);
4652 	}
4653 }
4654 
4655 #ifdef CONFIG_PM
4656 /**
4657  * ice_prepare_for_shutdown - prep for PCI shutdown
4658  * @pf: board private structure
4659  *
4660  * Inform or close all dependent features in prep for PCI device shutdown
4661  */
4662 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4663 {
4664 	struct ice_hw *hw = &pf->hw;
4665 	u32 v;
4666 
4667 	/* Notify VFs of impending reset */
4668 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4669 		ice_vc_notify_reset(pf);
4670 
4671 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4672 
4673 	/* disable the VSIs and their queues that are not already DOWN */
4674 	ice_pf_dis_all_vsi(pf, false);
4675 
4676 	ice_for_each_vsi(pf, v)
4677 		if (pf->vsi[v])
4678 			pf->vsi[v]->vsi_num = 0;
4679 
4680 	ice_shutdown_all_ctrlq(hw);
4681 }
4682 
4683 /**
4684  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4685  * @pf: board private structure to reinitialize
4686  *
4687  * This routine reinitialize interrupt scheme that was cleared during
4688  * power management suspend callback.
4689  *
4690  * This should be called during resume routine to re-allocate the q_vectors
4691  * and reacquire interrupts.
4692  */
4693 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4694 {
4695 	struct device *dev = ice_pf_to_dev(pf);
4696 	int ret, v;
4697 
4698 	/* Since we clear MSIX flag during suspend, we need to
4699 	 * set it back during resume...
4700 	 */
4701 
4702 	ret = ice_init_interrupt_scheme(pf);
4703 	if (ret) {
4704 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4705 		return ret;
4706 	}
4707 
4708 	/* Remap vectors and rings, after successful re-init interrupts */
4709 	ice_for_each_vsi(pf, v) {
4710 		if (!pf->vsi[v])
4711 			continue;
4712 
4713 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4714 		if (ret)
4715 			goto err_reinit;
4716 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4717 	}
4718 
4719 	ret = ice_req_irq_msix_misc(pf);
4720 	if (ret) {
4721 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4722 			ret);
4723 		goto err_reinit;
4724 	}
4725 
4726 	return 0;
4727 
4728 err_reinit:
4729 	while (v--)
4730 		if (pf->vsi[v])
4731 			ice_vsi_free_q_vectors(pf->vsi[v]);
4732 
4733 	return ret;
4734 }
4735 
4736 /**
4737  * ice_suspend
4738  * @dev: generic device information structure
4739  *
4740  * Power Management callback to quiesce the device and prepare
4741  * for D3 transition.
4742  */
4743 static int __maybe_unused ice_suspend(struct device *dev)
4744 {
4745 	struct pci_dev *pdev = to_pci_dev(dev);
4746 	struct ice_pf *pf;
4747 	int disabled, v;
4748 
4749 	pf = pci_get_drvdata(pdev);
4750 
4751 	if (!ice_pf_state_is_nominal(pf)) {
4752 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4753 		return -EBUSY;
4754 	}
4755 
4756 	/* Stop watchdog tasks until resume completion.
4757 	 * Even though it is most likely that the service task is
4758 	 * disabled if the device is suspended or down, the service task's
4759 	 * state is controlled by a different state bit, and we should
4760 	 * store and honor whatever state that bit is in at this point.
4761 	 */
4762 	disabled = ice_service_task_stop(pf);
4763 
4764 	ice_unplug_aux_dev(pf);
4765 
4766 	/* Already suspended?, then there is nothing to do */
4767 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4768 		if (!disabled)
4769 			ice_service_task_restart(pf);
4770 		return 0;
4771 	}
4772 
4773 	if (test_bit(ICE_DOWN, pf->state) ||
4774 	    ice_is_reset_in_progress(pf->state)) {
4775 		dev_err(dev, "can't suspend device in reset or already down\n");
4776 		if (!disabled)
4777 			ice_service_task_restart(pf);
4778 		return 0;
4779 	}
4780 
4781 	ice_setup_mc_magic_wake(pf);
4782 
4783 	ice_prepare_for_shutdown(pf);
4784 
4785 	ice_set_wake(pf);
4786 
4787 	/* Free vectors, clear the interrupt scheme and release IRQs
4788 	 * for proper hibernation, especially with large number of CPUs.
4789 	 * Otherwise hibernation might fail when mapping all the vectors back
4790 	 * to CPU0.
4791 	 */
4792 	ice_free_irq_msix_misc(pf);
4793 	ice_for_each_vsi(pf, v) {
4794 		if (!pf->vsi[v])
4795 			continue;
4796 		ice_vsi_free_q_vectors(pf->vsi[v]);
4797 	}
4798 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4799 	ice_clear_interrupt_scheme(pf);
4800 
4801 	pci_save_state(pdev);
4802 	pci_wake_from_d3(pdev, pf->wol_ena);
4803 	pci_set_power_state(pdev, PCI_D3hot);
4804 	return 0;
4805 }
4806 
4807 /**
4808  * ice_resume - PM callback for waking up from D3
4809  * @dev: generic device information structure
4810  */
4811 static int __maybe_unused ice_resume(struct device *dev)
4812 {
4813 	struct pci_dev *pdev = to_pci_dev(dev);
4814 	enum ice_reset_req reset_type;
4815 	struct ice_pf *pf;
4816 	struct ice_hw *hw;
4817 	int ret;
4818 
4819 	pci_set_power_state(pdev, PCI_D0);
4820 	pci_restore_state(pdev);
4821 	pci_save_state(pdev);
4822 
4823 	if (!pci_device_is_present(pdev))
4824 		return -ENODEV;
4825 
4826 	ret = pci_enable_device_mem(pdev);
4827 	if (ret) {
4828 		dev_err(dev, "Cannot enable device after suspend\n");
4829 		return ret;
4830 	}
4831 
4832 	pf = pci_get_drvdata(pdev);
4833 	hw = &pf->hw;
4834 
4835 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4836 	ice_print_wake_reason(pf);
4837 
4838 	/* We cleared the interrupt scheme when we suspended, so we need to
4839 	 * restore it now to resume device functionality.
4840 	 */
4841 	ret = ice_reinit_interrupt_scheme(pf);
4842 	if (ret)
4843 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4844 
4845 	clear_bit(ICE_DOWN, pf->state);
4846 	/* Now perform PF reset and rebuild */
4847 	reset_type = ICE_RESET_PFR;
4848 	/* re-enable service task for reset, but allow reset to schedule it */
4849 	clear_bit(ICE_SERVICE_DIS, pf->state);
4850 
4851 	if (ice_schedule_reset(pf, reset_type))
4852 		dev_err(dev, "Reset during resume failed.\n");
4853 
4854 	clear_bit(ICE_SUSPENDED, pf->state);
4855 	ice_service_task_restart(pf);
4856 
4857 	/* Restart the service task */
4858 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4859 
4860 	return 0;
4861 }
4862 #endif /* CONFIG_PM */
4863 
4864 /**
4865  * ice_pci_err_detected - warning that PCI error has been detected
4866  * @pdev: PCI device information struct
4867  * @err: the type of PCI error
4868  *
4869  * Called to warn that something happened on the PCI bus and the error handling
4870  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4871  */
4872 static pci_ers_result_t
4873 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4874 {
4875 	struct ice_pf *pf = pci_get_drvdata(pdev);
4876 
4877 	if (!pf) {
4878 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4879 			__func__, err);
4880 		return PCI_ERS_RESULT_DISCONNECT;
4881 	}
4882 
4883 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4884 		ice_service_task_stop(pf);
4885 
4886 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4887 			set_bit(ICE_PFR_REQ, pf->state);
4888 			ice_prepare_for_reset(pf);
4889 		}
4890 	}
4891 
4892 	return PCI_ERS_RESULT_NEED_RESET;
4893 }
4894 
4895 /**
4896  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4897  * @pdev: PCI device information struct
4898  *
4899  * Called to determine if the driver can recover from the PCI slot reset by
4900  * using a register read to determine if the device is recoverable.
4901  */
4902 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4903 {
4904 	struct ice_pf *pf = pci_get_drvdata(pdev);
4905 	pci_ers_result_t result;
4906 	int err;
4907 	u32 reg;
4908 
4909 	err = pci_enable_device_mem(pdev);
4910 	if (err) {
4911 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4912 			err);
4913 		result = PCI_ERS_RESULT_DISCONNECT;
4914 	} else {
4915 		pci_set_master(pdev);
4916 		pci_restore_state(pdev);
4917 		pci_save_state(pdev);
4918 		pci_wake_from_d3(pdev, false);
4919 
4920 		/* Check for life */
4921 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4922 		if (!reg)
4923 			result = PCI_ERS_RESULT_RECOVERED;
4924 		else
4925 			result = PCI_ERS_RESULT_DISCONNECT;
4926 	}
4927 
4928 	err = pci_aer_clear_nonfatal_status(pdev);
4929 	if (err)
4930 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4931 			err);
4932 		/* non-fatal, continue */
4933 
4934 	return result;
4935 }
4936 
4937 /**
4938  * ice_pci_err_resume - restart operations after PCI error recovery
4939  * @pdev: PCI device information struct
4940  *
4941  * Called to allow the driver to bring things back up after PCI error and/or
4942  * reset recovery have finished
4943  */
4944 static void ice_pci_err_resume(struct pci_dev *pdev)
4945 {
4946 	struct ice_pf *pf = pci_get_drvdata(pdev);
4947 
4948 	if (!pf) {
4949 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4950 			__func__);
4951 		return;
4952 	}
4953 
4954 	if (test_bit(ICE_SUSPENDED, pf->state)) {
4955 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4956 			__func__);
4957 		return;
4958 	}
4959 
4960 	ice_restore_all_vfs_msi_state(pdev);
4961 
4962 	ice_do_reset(pf, ICE_RESET_PFR);
4963 	ice_service_task_restart(pf);
4964 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4965 }
4966 
4967 /**
4968  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4969  * @pdev: PCI device information struct
4970  */
4971 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4972 {
4973 	struct ice_pf *pf = pci_get_drvdata(pdev);
4974 
4975 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4976 		ice_service_task_stop(pf);
4977 
4978 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4979 			set_bit(ICE_PFR_REQ, pf->state);
4980 			ice_prepare_for_reset(pf);
4981 		}
4982 	}
4983 }
4984 
4985 /**
4986  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4987  * @pdev: PCI device information struct
4988  */
4989 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4990 {
4991 	ice_pci_err_resume(pdev);
4992 }
4993 
4994 /* ice_pci_tbl - PCI Device ID Table
4995  *
4996  * Wildcard entries (PCI_ANY_ID) should come last
4997  * Last entry must be all 0s
4998  *
4999  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5000  *   Class, Class Mask, private data (not used) }
5001  */
5002 static const struct pci_device_id ice_pci_tbl[] = {
5003 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5004 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5005 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5006 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5007 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5008 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5009 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5010 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5011 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5012 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5013 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5014 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5015 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5016 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5017 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5018 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5019 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5020 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5021 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5022 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5023 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5024 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5025 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5026 	/* required last entry */
5027 	{ 0, }
5028 };
5029 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5030 
5031 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5032 
5033 static const struct pci_error_handlers ice_pci_err_handler = {
5034 	.error_detected = ice_pci_err_detected,
5035 	.slot_reset = ice_pci_err_slot_reset,
5036 	.reset_prepare = ice_pci_err_reset_prepare,
5037 	.reset_done = ice_pci_err_reset_done,
5038 	.resume = ice_pci_err_resume
5039 };
5040 
5041 static struct pci_driver ice_driver = {
5042 	.name = KBUILD_MODNAME,
5043 	.id_table = ice_pci_tbl,
5044 	.probe = ice_probe,
5045 	.remove = ice_remove,
5046 #ifdef CONFIG_PM
5047 	.driver.pm = &ice_pm_ops,
5048 #endif /* CONFIG_PM */
5049 	.shutdown = ice_shutdown,
5050 	.sriov_configure = ice_sriov_configure,
5051 	.err_handler = &ice_pci_err_handler
5052 };
5053 
5054 /**
5055  * ice_module_init - Driver registration routine
5056  *
5057  * ice_module_init is the first routine called when the driver is
5058  * loaded. All it does is register with the PCI subsystem.
5059  */
5060 static int __init ice_module_init(void)
5061 {
5062 	int status;
5063 
5064 	pr_info("%s\n", ice_driver_string);
5065 	pr_info("%s\n", ice_copyright);
5066 
5067 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5068 	if (!ice_wq) {
5069 		pr_err("Failed to create workqueue\n");
5070 		return -ENOMEM;
5071 	}
5072 
5073 	status = pci_register_driver(&ice_driver);
5074 	if (status) {
5075 		pr_err("failed to register PCI driver, err %d\n", status);
5076 		destroy_workqueue(ice_wq);
5077 	}
5078 
5079 	return status;
5080 }
5081 module_init(ice_module_init);
5082 
5083 /**
5084  * ice_module_exit - Driver exit cleanup routine
5085  *
5086  * ice_module_exit is called just before the driver is removed
5087  * from memory.
5088  */
5089 static void __exit ice_module_exit(void)
5090 {
5091 	pci_unregister_driver(&ice_driver);
5092 	destroy_workqueue(ice_wq);
5093 	pr_info("module unloaded\n");
5094 }
5095 module_exit(ice_module_exit);
5096 
5097 /**
5098  * ice_set_mac_address - NDO callback to set MAC address
5099  * @netdev: network interface device structure
5100  * @pi: pointer to an address structure
5101  *
5102  * Returns 0 on success, negative on failure
5103  */
5104 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5105 {
5106 	struct ice_netdev_priv *np = netdev_priv(netdev);
5107 	struct ice_vsi *vsi = np->vsi;
5108 	struct ice_pf *pf = vsi->back;
5109 	struct ice_hw *hw = &pf->hw;
5110 	struct sockaddr *addr = pi;
5111 	enum ice_status status;
5112 	u8 flags = 0;
5113 	int err = 0;
5114 	u8 *mac;
5115 
5116 	mac = (u8 *)addr->sa_data;
5117 
5118 	if (!is_valid_ether_addr(mac))
5119 		return -EADDRNOTAVAIL;
5120 
5121 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5122 		netdev_warn(netdev, "already using mac %pM\n", mac);
5123 		return 0;
5124 	}
5125 
5126 	if (test_bit(ICE_DOWN, pf->state) ||
5127 	    ice_is_reset_in_progress(pf->state)) {
5128 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5129 			   mac);
5130 		return -EBUSY;
5131 	}
5132 
5133 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5134 	status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
5135 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5136 		err = -EADDRNOTAVAIL;
5137 		goto err_update_filters;
5138 	}
5139 
5140 	/* Add filter for new MAC. If filter exists, return success */
5141 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5142 	if (status == ICE_ERR_ALREADY_EXISTS) {
5143 		/* Although this MAC filter is already present in hardware it's
5144 		 * possible in some cases (e.g. bonding) that dev_addr was
5145 		 * modified outside of the driver and needs to be restored back
5146 		 * to this value.
5147 		 */
5148 		memcpy(netdev->dev_addr, mac, netdev->addr_len);
5149 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5150 		return 0;
5151 	}
5152 
5153 	/* error if the new filter addition failed */
5154 	if (status)
5155 		err = -EADDRNOTAVAIL;
5156 
5157 err_update_filters:
5158 	if (err) {
5159 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5160 			   mac);
5161 		return err;
5162 	}
5163 
5164 	/* change the netdev's MAC address */
5165 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5166 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5167 		   netdev->dev_addr);
5168 
5169 	/* write new MAC address to the firmware */
5170 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5171 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5172 	if (status) {
5173 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5174 			   mac, ice_stat_str(status));
5175 	}
5176 	return 0;
5177 }
5178 
5179 /**
5180  * ice_set_rx_mode - NDO callback to set the netdev filters
5181  * @netdev: network interface device structure
5182  */
5183 static void ice_set_rx_mode(struct net_device *netdev)
5184 {
5185 	struct ice_netdev_priv *np = netdev_priv(netdev);
5186 	struct ice_vsi *vsi = np->vsi;
5187 
5188 	if (!vsi)
5189 		return;
5190 
5191 	/* Set the flags to synchronize filters
5192 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5193 	 * flags
5194 	 */
5195 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5196 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5197 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5198 
5199 	/* schedule our worker thread which will take care of
5200 	 * applying the new filter changes
5201 	 */
5202 	ice_service_task_schedule(vsi->back);
5203 }
5204 
5205 /**
5206  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5207  * @netdev: network interface device structure
5208  * @queue_index: Queue ID
5209  * @maxrate: maximum bandwidth in Mbps
5210  */
5211 static int
5212 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5213 {
5214 	struct ice_netdev_priv *np = netdev_priv(netdev);
5215 	struct ice_vsi *vsi = np->vsi;
5216 	enum ice_status status;
5217 	u16 q_handle;
5218 	u8 tc;
5219 
5220 	/* Validate maxrate requested is within permitted range */
5221 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5222 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5223 			   maxrate, queue_index);
5224 		return -EINVAL;
5225 	}
5226 
5227 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5228 	tc = ice_dcb_get_tc(vsi, queue_index);
5229 
5230 	/* Set BW back to default, when user set maxrate to 0 */
5231 	if (!maxrate)
5232 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5233 					       q_handle, ICE_MAX_BW);
5234 	else
5235 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5236 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5237 	if (status) {
5238 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5239 			   ice_stat_str(status));
5240 		return -EIO;
5241 	}
5242 
5243 	return 0;
5244 }
5245 
5246 /**
5247  * ice_fdb_add - add an entry to the hardware database
5248  * @ndm: the input from the stack
5249  * @tb: pointer to array of nladdr (unused)
5250  * @dev: the net device pointer
5251  * @addr: the MAC address entry being added
5252  * @vid: VLAN ID
5253  * @flags: instructions from stack about fdb operation
5254  * @extack: netlink extended ack
5255  */
5256 static int
5257 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5258 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5259 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5260 {
5261 	int err;
5262 
5263 	if (vid) {
5264 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5265 		return -EINVAL;
5266 	}
5267 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5268 		netdev_err(dev, "FDB only supports static addresses\n");
5269 		return -EINVAL;
5270 	}
5271 
5272 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5273 		err = dev_uc_add_excl(dev, addr);
5274 	else if (is_multicast_ether_addr(addr))
5275 		err = dev_mc_add_excl(dev, addr);
5276 	else
5277 		err = -EINVAL;
5278 
5279 	/* Only return duplicate errors if NLM_F_EXCL is set */
5280 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5281 		err = 0;
5282 
5283 	return err;
5284 }
5285 
5286 /**
5287  * ice_fdb_del - delete an entry from the hardware database
5288  * @ndm: the input from the stack
5289  * @tb: pointer to array of nladdr (unused)
5290  * @dev: the net device pointer
5291  * @addr: the MAC address entry being added
5292  * @vid: VLAN ID
5293  */
5294 static int
5295 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5296 	    struct net_device *dev, const unsigned char *addr,
5297 	    __always_unused u16 vid)
5298 {
5299 	int err;
5300 
5301 	if (ndm->ndm_state & NUD_PERMANENT) {
5302 		netdev_err(dev, "FDB only supports static addresses\n");
5303 		return -EINVAL;
5304 	}
5305 
5306 	if (is_unicast_ether_addr(addr))
5307 		err = dev_uc_del(dev, addr);
5308 	else if (is_multicast_ether_addr(addr))
5309 		err = dev_mc_del(dev, addr);
5310 	else
5311 		err = -EINVAL;
5312 
5313 	return err;
5314 }
5315 
5316 /**
5317  * ice_set_features - set the netdev feature flags
5318  * @netdev: ptr to the netdev being adjusted
5319  * @features: the feature set that the stack is suggesting
5320  */
5321 static int
5322 ice_set_features(struct net_device *netdev, netdev_features_t features)
5323 {
5324 	struct ice_netdev_priv *np = netdev_priv(netdev);
5325 	struct ice_vsi *vsi = np->vsi;
5326 	struct ice_pf *pf = vsi->back;
5327 	int ret = 0;
5328 
5329 	/* Don't set any netdev advanced features with device in Safe Mode */
5330 	if (ice_is_safe_mode(vsi->back)) {
5331 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5332 		return ret;
5333 	}
5334 
5335 	/* Do not change setting during reset */
5336 	if (ice_is_reset_in_progress(pf->state)) {
5337 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5338 		return -EBUSY;
5339 	}
5340 
5341 	/* Multiple features can be changed in one call so keep features in
5342 	 * separate if/else statements to guarantee each feature is checked
5343 	 */
5344 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5345 		ice_vsi_manage_rss_lut(vsi, true);
5346 	else if (!(features & NETIF_F_RXHASH) &&
5347 		 netdev->features & NETIF_F_RXHASH)
5348 		ice_vsi_manage_rss_lut(vsi, false);
5349 
5350 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5351 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5352 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5353 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5354 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5355 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5356 
5357 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5358 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5359 		ret = ice_vsi_manage_vlan_insertion(vsi);
5360 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5361 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5362 		ret = ice_vsi_manage_vlan_insertion(vsi);
5363 
5364 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5365 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5366 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5367 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5368 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5369 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5370 
5371 	if ((features & NETIF_F_NTUPLE) &&
5372 	    !(netdev->features & NETIF_F_NTUPLE)) {
5373 		ice_vsi_manage_fdir(vsi, true);
5374 		ice_init_arfs(vsi);
5375 	} else if (!(features & NETIF_F_NTUPLE) &&
5376 		 (netdev->features & NETIF_F_NTUPLE)) {
5377 		ice_vsi_manage_fdir(vsi, false);
5378 		ice_clear_arfs(vsi);
5379 	}
5380 
5381 	return ret;
5382 }
5383 
5384 /**
5385  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5386  * @vsi: VSI to setup VLAN properties for
5387  */
5388 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5389 {
5390 	int ret = 0;
5391 
5392 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5393 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5394 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5395 		ret = ice_vsi_manage_vlan_insertion(vsi);
5396 
5397 	return ret;
5398 }
5399 
5400 /**
5401  * ice_vsi_cfg - Setup the VSI
5402  * @vsi: the VSI being configured
5403  *
5404  * Return 0 on success and negative value on error
5405  */
5406 int ice_vsi_cfg(struct ice_vsi *vsi)
5407 {
5408 	int err;
5409 
5410 	if (vsi->netdev) {
5411 		ice_set_rx_mode(vsi->netdev);
5412 
5413 		err = ice_vsi_vlan_setup(vsi);
5414 
5415 		if (err)
5416 			return err;
5417 	}
5418 	ice_vsi_cfg_dcb_rings(vsi);
5419 
5420 	err = ice_vsi_cfg_lan_txqs(vsi);
5421 	if (!err && ice_is_xdp_ena_vsi(vsi))
5422 		err = ice_vsi_cfg_xdp_txqs(vsi);
5423 	if (!err)
5424 		err = ice_vsi_cfg_rxqs(vsi);
5425 
5426 	return err;
5427 }
5428 
5429 /* THEORY OF MODERATION:
5430  * The below code creates custom DIM profiles for use by this driver, because
5431  * the ice driver hardware works differently than the hardware that DIMLIB was
5432  * originally made for. ice hardware doesn't have packet count limits that
5433  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5434  * and this code adds that capability to be used by the driver when it's using
5435  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5436  * for how to "respond" to traffic and interrupts, so this driver uses a
5437  * slightly different set of moderation parameters to get best performance.
5438  */
5439 struct ice_dim {
5440 	/* the throttle rate for interrupts, basically worst case delay before
5441 	 * an initial interrupt fires, value is stored in microseconds.
5442 	 */
5443 	u16 itr;
5444 	/* the rate limit for interrupts, which can cap a delay from a small
5445 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5446 	 * could yield as much as 500,000 interrupts per second, but with a
5447 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5448 	 * is stored in microseconds.
5449 	 */
5450 	u16 intrl;
5451 };
5452 
5453 /* Make a different profile for Rx that doesn't allow quite so aggressive
5454  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5455  * second. The INTRL/rate parameters here are only useful to cap small ITR
5456  * values, which is why for larger ITR's - like 128, which can only generate
5457  * 8k interrupts per second, there is no point to rate limit and the values
5458  * are set to zero. The rate limit values do affect latency, and so must
5459  * be reasonably small so to not impact latency sensitive tests.
5460  */
5461 static const struct ice_dim rx_profile[] = {
5462 	{2, 10},
5463 	{8, 16},
5464 	{32, 0},
5465 	{96, 0},
5466 	{128, 0}
5467 };
5468 
5469 /* The transmit profile, which has the same sorts of values
5470  * as the previous struct
5471  */
5472 static const struct ice_dim tx_profile[] = {
5473 	{2, 10},
5474 	{8, 16},
5475 	{64, 0},
5476 	{128, 0},
5477 	{256, 0}
5478 };
5479 
5480 static void ice_tx_dim_work(struct work_struct *work)
5481 {
5482 	struct ice_ring_container *rc;
5483 	struct ice_q_vector *q_vector;
5484 	struct dim *dim;
5485 	u16 itr, intrl;
5486 
5487 	dim = container_of(work, struct dim, work);
5488 	rc = container_of(dim, struct ice_ring_container, dim);
5489 	q_vector = container_of(rc, struct ice_q_vector, tx);
5490 
5491 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5492 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5493 
5494 	/* look up the values in our local table */
5495 	itr = tx_profile[dim->profile_ix].itr;
5496 	intrl = tx_profile[dim->profile_ix].intrl;
5497 
5498 	ice_trace(tx_dim_work, q_vector, dim);
5499 	ice_write_itr(rc, itr);
5500 	ice_write_intrl(q_vector, intrl);
5501 
5502 	dim->state = DIM_START_MEASURE;
5503 }
5504 
5505 static void ice_rx_dim_work(struct work_struct *work)
5506 {
5507 	struct ice_ring_container *rc;
5508 	struct ice_q_vector *q_vector;
5509 	struct dim *dim;
5510 	u16 itr, intrl;
5511 
5512 	dim = container_of(work, struct dim, work);
5513 	rc = container_of(dim, struct ice_ring_container, dim);
5514 	q_vector = container_of(rc, struct ice_q_vector, rx);
5515 
5516 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5517 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5518 
5519 	/* look up the values in our local table */
5520 	itr = rx_profile[dim->profile_ix].itr;
5521 	intrl = rx_profile[dim->profile_ix].intrl;
5522 
5523 	ice_trace(rx_dim_work, q_vector, dim);
5524 	ice_write_itr(rc, itr);
5525 	ice_write_intrl(q_vector, intrl);
5526 
5527 	dim->state = DIM_START_MEASURE;
5528 }
5529 
5530 /**
5531  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5532  * @vsi: the VSI being configured
5533  */
5534 static void ice_napi_enable_all(struct ice_vsi *vsi)
5535 {
5536 	int q_idx;
5537 
5538 	if (!vsi->netdev)
5539 		return;
5540 
5541 	ice_for_each_q_vector(vsi, q_idx) {
5542 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5543 
5544 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5545 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5546 
5547 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5548 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5549 
5550 		if (q_vector->rx.ring || q_vector->tx.ring)
5551 			napi_enable(&q_vector->napi);
5552 	}
5553 }
5554 
5555 /**
5556  * ice_up_complete - Finish the last steps of bringing up a connection
5557  * @vsi: The VSI being configured
5558  *
5559  * Return 0 on success and negative value on error
5560  */
5561 static int ice_up_complete(struct ice_vsi *vsi)
5562 {
5563 	struct ice_pf *pf = vsi->back;
5564 	int err;
5565 
5566 	ice_vsi_cfg_msix(vsi);
5567 
5568 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5569 	 * Tx queue group list was configured and the context bits were
5570 	 * programmed using ice_vsi_cfg_txqs
5571 	 */
5572 	err = ice_vsi_start_all_rx_rings(vsi);
5573 	if (err)
5574 		return err;
5575 
5576 	clear_bit(ICE_VSI_DOWN, vsi->state);
5577 	ice_napi_enable_all(vsi);
5578 	ice_vsi_ena_irq(vsi);
5579 
5580 	if (vsi->port_info &&
5581 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5582 	    vsi->netdev) {
5583 		ice_print_link_msg(vsi, true);
5584 		netif_tx_start_all_queues(vsi->netdev);
5585 		netif_carrier_on(vsi->netdev);
5586 	}
5587 
5588 	ice_service_task_schedule(pf);
5589 
5590 	return 0;
5591 }
5592 
5593 /**
5594  * ice_up - Bring the connection back up after being down
5595  * @vsi: VSI being configured
5596  */
5597 int ice_up(struct ice_vsi *vsi)
5598 {
5599 	int err;
5600 
5601 	err = ice_vsi_cfg(vsi);
5602 	if (!err)
5603 		err = ice_up_complete(vsi);
5604 
5605 	return err;
5606 }
5607 
5608 /**
5609  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5610  * @ring: Tx or Rx ring to read stats from
5611  * @pkts: packets stats counter
5612  * @bytes: bytes stats counter
5613  *
5614  * This function fetches stats from the ring considering the atomic operations
5615  * that needs to be performed to read u64 values in 32 bit machine.
5616  */
5617 static void
5618 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5619 {
5620 	unsigned int start;
5621 	*pkts = 0;
5622 	*bytes = 0;
5623 
5624 	if (!ring)
5625 		return;
5626 	do {
5627 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5628 		*pkts = ring->stats.pkts;
5629 		*bytes = ring->stats.bytes;
5630 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5631 }
5632 
5633 /**
5634  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5635  * @vsi: the VSI to be updated
5636  * @rings: rings to work on
5637  * @count: number of rings
5638  */
5639 static void
5640 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5641 			     u16 count)
5642 {
5643 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5644 	u16 i;
5645 
5646 	for (i = 0; i < count; i++) {
5647 		struct ice_ring *ring;
5648 		u64 pkts, bytes;
5649 
5650 		ring = READ_ONCE(rings[i]);
5651 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5652 		vsi_stats->tx_packets += pkts;
5653 		vsi_stats->tx_bytes += bytes;
5654 		vsi->tx_restart += ring->tx_stats.restart_q;
5655 		vsi->tx_busy += ring->tx_stats.tx_busy;
5656 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5657 	}
5658 }
5659 
5660 /**
5661  * ice_update_vsi_ring_stats - Update VSI stats counters
5662  * @vsi: the VSI to be updated
5663  */
5664 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5665 {
5666 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5667 	u64 pkts, bytes;
5668 	int i;
5669 
5670 	/* reset netdev stats */
5671 	vsi_stats->tx_packets = 0;
5672 	vsi_stats->tx_bytes = 0;
5673 	vsi_stats->rx_packets = 0;
5674 	vsi_stats->rx_bytes = 0;
5675 
5676 	/* reset non-netdev (extended) stats */
5677 	vsi->tx_restart = 0;
5678 	vsi->tx_busy = 0;
5679 	vsi->tx_linearize = 0;
5680 	vsi->rx_buf_failed = 0;
5681 	vsi->rx_page_failed = 0;
5682 
5683 	rcu_read_lock();
5684 
5685 	/* update Tx rings counters */
5686 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5687 
5688 	/* update Rx rings counters */
5689 	ice_for_each_rxq(vsi, i) {
5690 		struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5691 
5692 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5693 		vsi_stats->rx_packets += pkts;
5694 		vsi_stats->rx_bytes += bytes;
5695 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5696 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5697 	}
5698 
5699 	/* update XDP Tx rings counters */
5700 	if (ice_is_xdp_ena_vsi(vsi))
5701 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5702 					     vsi->num_xdp_txq);
5703 
5704 	rcu_read_unlock();
5705 }
5706 
5707 /**
5708  * ice_update_vsi_stats - Update VSI stats counters
5709  * @vsi: the VSI to be updated
5710  */
5711 void ice_update_vsi_stats(struct ice_vsi *vsi)
5712 {
5713 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5714 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5715 	struct ice_pf *pf = vsi->back;
5716 
5717 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5718 	    test_bit(ICE_CFG_BUSY, pf->state))
5719 		return;
5720 
5721 	/* get stats as recorded by Tx/Rx rings */
5722 	ice_update_vsi_ring_stats(vsi);
5723 
5724 	/* get VSI stats as recorded by the hardware */
5725 	ice_update_eth_stats(vsi);
5726 
5727 	cur_ns->tx_errors = cur_es->tx_errors;
5728 	cur_ns->rx_dropped = cur_es->rx_discards;
5729 	cur_ns->tx_dropped = cur_es->tx_discards;
5730 	cur_ns->multicast = cur_es->rx_multicast;
5731 
5732 	/* update some more netdev stats if this is main VSI */
5733 	if (vsi->type == ICE_VSI_PF) {
5734 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5735 		cur_ns->rx_errors = pf->stats.crc_errors +
5736 				    pf->stats.illegal_bytes +
5737 				    pf->stats.rx_len_errors +
5738 				    pf->stats.rx_undersize +
5739 				    pf->hw_csum_rx_error +
5740 				    pf->stats.rx_jabber +
5741 				    pf->stats.rx_fragments +
5742 				    pf->stats.rx_oversize;
5743 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5744 		/* record drops from the port level */
5745 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5746 	}
5747 }
5748 
5749 /**
5750  * ice_update_pf_stats - Update PF port stats counters
5751  * @pf: PF whose stats needs to be updated
5752  */
5753 void ice_update_pf_stats(struct ice_pf *pf)
5754 {
5755 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5756 	struct ice_hw *hw = &pf->hw;
5757 	u16 fd_ctr_base;
5758 	u8 port;
5759 
5760 	port = hw->port_info->lport;
5761 	prev_ps = &pf->stats_prev;
5762 	cur_ps = &pf->stats;
5763 
5764 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5765 			  &prev_ps->eth.rx_bytes,
5766 			  &cur_ps->eth.rx_bytes);
5767 
5768 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5769 			  &prev_ps->eth.rx_unicast,
5770 			  &cur_ps->eth.rx_unicast);
5771 
5772 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5773 			  &prev_ps->eth.rx_multicast,
5774 			  &cur_ps->eth.rx_multicast);
5775 
5776 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5777 			  &prev_ps->eth.rx_broadcast,
5778 			  &cur_ps->eth.rx_broadcast);
5779 
5780 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5781 			  &prev_ps->eth.rx_discards,
5782 			  &cur_ps->eth.rx_discards);
5783 
5784 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5785 			  &prev_ps->eth.tx_bytes,
5786 			  &cur_ps->eth.tx_bytes);
5787 
5788 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5789 			  &prev_ps->eth.tx_unicast,
5790 			  &cur_ps->eth.tx_unicast);
5791 
5792 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5793 			  &prev_ps->eth.tx_multicast,
5794 			  &cur_ps->eth.tx_multicast);
5795 
5796 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5797 			  &prev_ps->eth.tx_broadcast,
5798 			  &cur_ps->eth.tx_broadcast);
5799 
5800 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5801 			  &prev_ps->tx_dropped_link_down,
5802 			  &cur_ps->tx_dropped_link_down);
5803 
5804 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5805 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5806 
5807 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5808 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5809 
5810 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5811 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5812 
5813 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5814 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5815 
5816 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5817 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5818 
5819 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5820 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5821 
5822 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5823 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5824 
5825 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5826 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5827 
5828 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5829 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5830 
5831 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5832 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5833 
5834 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5835 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5836 
5837 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5838 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5839 
5840 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5841 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5842 
5843 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5844 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5845 
5846 	fd_ctr_base = hw->fd_ctr_base;
5847 
5848 	ice_stat_update40(hw,
5849 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5850 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5851 			  &cur_ps->fd_sb_match);
5852 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5853 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5854 
5855 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5856 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5857 
5858 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5859 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5860 
5861 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5862 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5863 
5864 	ice_update_dcb_stats(pf);
5865 
5866 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5867 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5868 
5869 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5870 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5871 
5872 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5873 			  &prev_ps->mac_local_faults,
5874 			  &cur_ps->mac_local_faults);
5875 
5876 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5877 			  &prev_ps->mac_remote_faults,
5878 			  &cur_ps->mac_remote_faults);
5879 
5880 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5881 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5882 
5883 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5884 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5885 
5886 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5887 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5888 
5889 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5890 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5891 
5892 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5893 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5894 
5895 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5896 
5897 	pf->stat_prev_loaded = true;
5898 }
5899 
5900 /**
5901  * ice_get_stats64 - get statistics for network device structure
5902  * @netdev: network interface device structure
5903  * @stats: main device statistics structure
5904  */
5905 static
5906 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5907 {
5908 	struct ice_netdev_priv *np = netdev_priv(netdev);
5909 	struct rtnl_link_stats64 *vsi_stats;
5910 	struct ice_vsi *vsi = np->vsi;
5911 
5912 	vsi_stats = &vsi->net_stats;
5913 
5914 	if (!vsi->num_txq || !vsi->num_rxq)
5915 		return;
5916 
5917 	/* netdev packet/byte stats come from ring counter. These are obtained
5918 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5919 	 * But, only call the update routine and read the registers if VSI is
5920 	 * not down.
5921 	 */
5922 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5923 		ice_update_vsi_ring_stats(vsi);
5924 	stats->tx_packets = vsi_stats->tx_packets;
5925 	stats->tx_bytes = vsi_stats->tx_bytes;
5926 	stats->rx_packets = vsi_stats->rx_packets;
5927 	stats->rx_bytes = vsi_stats->rx_bytes;
5928 
5929 	/* The rest of the stats can be read from the hardware but instead we
5930 	 * just return values that the watchdog task has already obtained from
5931 	 * the hardware.
5932 	 */
5933 	stats->multicast = vsi_stats->multicast;
5934 	stats->tx_errors = vsi_stats->tx_errors;
5935 	stats->tx_dropped = vsi_stats->tx_dropped;
5936 	stats->rx_errors = vsi_stats->rx_errors;
5937 	stats->rx_dropped = vsi_stats->rx_dropped;
5938 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5939 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5940 }
5941 
5942 /**
5943  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5944  * @vsi: VSI having NAPI disabled
5945  */
5946 static void ice_napi_disable_all(struct ice_vsi *vsi)
5947 {
5948 	int q_idx;
5949 
5950 	if (!vsi->netdev)
5951 		return;
5952 
5953 	ice_for_each_q_vector(vsi, q_idx) {
5954 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5955 
5956 		if (q_vector->rx.ring || q_vector->tx.ring)
5957 			napi_disable(&q_vector->napi);
5958 
5959 		cancel_work_sync(&q_vector->tx.dim.work);
5960 		cancel_work_sync(&q_vector->rx.dim.work);
5961 	}
5962 }
5963 
5964 /**
5965  * ice_down - Shutdown the connection
5966  * @vsi: The VSI being stopped
5967  */
5968 int ice_down(struct ice_vsi *vsi)
5969 {
5970 	int i, tx_err, rx_err, link_err = 0;
5971 
5972 	/* Caller of this function is expected to set the
5973 	 * vsi->state ICE_DOWN bit
5974 	 */
5975 	if (vsi->netdev) {
5976 		netif_carrier_off(vsi->netdev);
5977 		netif_tx_disable(vsi->netdev);
5978 	}
5979 
5980 	ice_vsi_dis_irq(vsi);
5981 
5982 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5983 	if (tx_err)
5984 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5985 			   vsi->vsi_num, tx_err);
5986 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5987 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5988 		if (tx_err)
5989 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5990 				   vsi->vsi_num, tx_err);
5991 	}
5992 
5993 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
5994 	if (rx_err)
5995 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5996 			   vsi->vsi_num, rx_err);
5997 
5998 	ice_napi_disable_all(vsi);
5999 
6000 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6001 		link_err = ice_force_phys_link_state(vsi, false);
6002 		if (link_err)
6003 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6004 				   vsi->vsi_num, link_err);
6005 	}
6006 
6007 	ice_for_each_txq(vsi, i)
6008 		ice_clean_tx_ring(vsi->tx_rings[i]);
6009 
6010 	ice_for_each_rxq(vsi, i)
6011 		ice_clean_rx_ring(vsi->rx_rings[i]);
6012 
6013 	if (tx_err || rx_err || link_err) {
6014 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6015 			   vsi->vsi_num, vsi->vsw->sw_id);
6016 		return -EIO;
6017 	}
6018 
6019 	return 0;
6020 }
6021 
6022 /**
6023  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6024  * @vsi: VSI having resources allocated
6025  *
6026  * Return 0 on success, negative on failure
6027  */
6028 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6029 {
6030 	int i, err = 0;
6031 
6032 	if (!vsi->num_txq) {
6033 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6034 			vsi->vsi_num);
6035 		return -EINVAL;
6036 	}
6037 
6038 	ice_for_each_txq(vsi, i) {
6039 		struct ice_ring *ring = vsi->tx_rings[i];
6040 
6041 		if (!ring)
6042 			return -EINVAL;
6043 
6044 		ring->netdev = vsi->netdev;
6045 		err = ice_setup_tx_ring(ring);
6046 		if (err)
6047 			break;
6048 	}
6049 
6050 	return err;
6051 }
6052 
6053 /**
6054  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6055  * @vsi: VSI having resources allocated
6056  *
6057  * Return 0 on success, negative on failure
6058  */
6059 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6060 {
6061 	int i, err = 0;
6062 
6063 	if (!vsi->num_rxq) {
6064 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6065 			vsi->vsi_num);
6066 		return -EINVAL;
6067 	}
6068 
6069 	ice_for_each_rxq(vsi, i) {
6070 		struct ice_ring *ring = vsi->rx_rings[i];
6071 
6072 		if (!ring)
6073 			return -EINVAL;
6074 
6075 		ring->netdev = vsi->netdev;
6076 		err = ice_setup_rx_ring(ring);
6077 		if (err)
6078 			break;
6079 	}
6080 
6081 	return err;
6082 }
6083 
6084 /**
6085  * ice_vsi_open_ctrl - open control VSI for use
6086  * @vsi: the VSI to open
6087  *
6088  * Initialization of the Control VSI
6089  *
6090  * Returns 0 on success, negative value on error
6091  */
6092 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6093 {
6094 	char int_name[ICE_INT_NAME_STR_LEN];
6095 	struct ice_pf *pf = vsi->back;
6096 	struct device *dev;
6097 	int err;
6098 
6099 	dev = ice_pf_to_dev(pf);
6100 	/* allocate descriptors */
6101 	err = ice_vsi_setup_tx_rings(vsi);
6102 	if (err)
6103 		goto err_setup_tx;
6104 
6105 	err = ice_vsi_setup_rx_rings(vsi);
6106 	if (err)
6107 		goto err_setup_rx;
6108 
6109 	err = ice_vsi_cfg(vsi);
6110 	if (err)
6111 		goto err_setup_rx;
6112 
6113 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6114 		 dev_driver_string(dev), dev_name(dev));
6115 	err = ice_vsi_req_irq_msix(vsi, int_name);
6116 	if (err)
6117 		goto err_setup_rx;
6118 
6119 	ice_vsi_cfg_msix(vsi);
6120 
6121 	err = ice_vsi_start_all_rx_rings(vsi);
6122 	if (err)
6123 		goto err_up_complete;
6124 
6125 	clear_bit(ICE_VSI_DOWN, vsi->state);
6126 	ice_vsi_ena_irq(vsi);
6127 
6128 	return 0;
6129 
6130 err_up_complete:
6131 	ice_down(vsi);
6132 err_setup_rx:
6133 	ice_vsi_free_rx_rings(vsi);
6134 err_setup_tx:
6135 	ice_vsi_free_tx_rings(vsi);
6136 
6137 	return err;
6138 }
6139 
6140 /**
6141  * ice_vsi_open - Called when a network interface is made active
6142  * @vsi: the VSI to open
6143  *
6144  * Initialization of the VSI
6145  *
6146  * Returns 0 on success, negative value on error
6147  */
6148 static int ice_vsi_open(struct ice_vsi *vsi)
6149 {
6150 	char int_name[ICE_INT_NAME_STR_LEN];
6151 	struct ice_pf *pf = vsi->back;
6152 	int err;
6153 
6154 	/* allocate descriptors */
6155 	err = ice_vsi_setup_tx_rings(vsi);
6156 	if (err)
6157 		goto err_setup_tx;
6158 
6159 	err = ice_vsi_setup_rx_rings(vsi);
6160 	if (err)
6161 		goto err_setup_rx;
6162 
6163 	err = ice_vsi_cfg(vsi);
6164 	if (err)
6165 		goto err_setup_rx;
6166 
6167 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6168 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6169 	err = ice_vsi_req_irq_msix(vsi, int_name);
6170 	if (err)
6171 		goto err_setup_rx;
6172 
6173 	/* Notify the stack of the actual queue counts. */
6174 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6175 	if (err)
6176 		goto err_set_qs;
6177 
6178 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6179 	if (err)
6180 		goto err_set_qs;
6181 
6182 	err = ice_up_complete(vsi);
6183 	if (err)
6184 		goto err_up_complete;
6185 
6186 	return 0;
6187 
6188 err_up_complete:
6189 	ice_down(vsi);
6190 err_set_qs:
6191 	ice_vsi_free_irq(vsi);
6192 err_setup_rx:
6193 	ice_vsi_free_rx_rings(vsi);
6194 err_setup_tx:
6195 	ice_vsi_free_tx_rings(vsi);
6196 
6197 	return err;
6198 }
6199 
6200 /**
6201  * ice_vsi_release_all - Delete all VSIs
6202  * @pf: PF from which all VSIs are being removed
6203  */
6204 static void ice_vsi_release_all(struct ice_pf *pf)
6205 {
6206 	int err, i;
6207 
6208 	if (!pf->vsi)
6209 		return;
6210 
6211 	ice_for_each_vsi(pf, i) {
6212 		if (!pf->vsi[i])
6213 			continue;
6214 
6215 		err = ice_vsi_release(pf->vsi[i]);
6216 		if (err)
6217 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6218 				i, err, pf->vsi[i]->vsi_num);
6219 	}
6220 }
6221 
6222 /**
6223  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6224  * @pf: pointer to the PF instance
6225  * @type: VSI type to rebuild
6226  *
6227  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6228  */
6229 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6230 {
6231 	struct device *dev = ice_pf_to_dev(pf);
6232 	enum ice_status status;
6233 	int i, err;
6234 
6235 	ice_for_each_vsi(pf, i) {
6236 		struct ice_vsi *vsi = pf->vsi[i];
6237 
6238 		if (!vsi || vsi->type != type)
6239 			continue;
6240 
6241 		/* rebuild the VSI */
6242 		err = ice_vsi_rebuild(vsi, true);
6243 		if (err) {
6244 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6245 				err, vsi->idx, ice_vsi_type_str(type));
6246 			return err;
6247 		}
6248 
6249 		/* replay filters for the VSI */
6250 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6251 		if (status) {
6252 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6253 				ice_stat_str(status), vsi->idx,
6254 				ice_vsi_type_str(type));
6255 			return -EIO;
6256 		}
6257 
6258 		/* Re-map HW VSI number, using VSI handle that has been
6259 		 * previously validated in ice_replay_vsi() call above
6260 		 */
6261 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6262 
6263 		/* enable the VSI */
6264 		err = ice_ena_vsi(vsi, false);
6265 		if (err) {
6266 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6267 				err, vsi->idx, ice_vsi_type_str(type));
6268 			return err;
6269 		}
6270 
6271 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6272 			 ice_vsi_type_str(type));
6273 	}
6274 
6275 	return 0;
6276 }
6277 
6278 /**
6279  * ice_update_pf_netdev_link - Update PF netdev link status
6280  * @pf: pointer to the PF instance
6281  */
6282 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6283 {
6284 	bool link_up;
6285 	int i;
6286 
6287 	ice_for_each_vsi(pf, i) {
6288 		struct ice_vsi *vsi = pf->vsi[i];
6289 
6290 		if (!vsi || vsi->type != ICE_VSI_PF)
6291 			return;
6292 
6293 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6294 		if (link_up) {
6295 			netif_carrier_on(pf->vsi[i]->netdev);
6296 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6297 		} else {
6298 			netif_carrier_off(pf->vsi[i]->netdev);
6299 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6300 		}
6301 	}
6302 }
6303 
6304 /**
6305  * ice_rebuild - rebuild after reset
6306  * @pf: PF to rebuild
6307  * @reset_type: type of reset
6308  *
6309  * Do not rebuild VF VSI in this flow because that is already handled via
6310  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6311  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6312  * to reset/rebuild all the VF VSI twice.
6313  */
6314 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6315 {
6316 	struct device *dev = ice_pf_to_dev(pf);
6317 	struct ice_hw *hw = &pf->hw;
6318 	enum ice_status ret;
6319 	int err;
6320 
6321 	if (test_bit(ICE_DOWN, pf->state))
6322 		goto clear_recovery;
6323 
6324 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6325 
6326 	ret = ice_init_all_ctrlq(hw);
6327 	if (ret) {
6328 		dev_err(dev, "control queues init failed %s\n",
6329 			ice_stat_str(ret));
6330 		goto err_init_ctrlq;
6331 	}
6332 
6333 	/* if DDP was previously loaded successfully */
6334 	if (!ice_is_safe_mode(pf)) {
6335 		/* reload the SW DB of filter tables */
6336 		if (reset_type == ICE_RESET_PFR)
6337 			ice_fill_blk_tbls(hw);
6338 		else
6339 			/* Reload DDP Package after CORER/GLOBR reset */
6340 			ice_load_pkg(NULL, pf);
6341 	}
6342 
6343 	ret = ice_clear_pf_cfg(hw);
6344 	if (ret) {
6345 		dev_err(dev, "clear PF configuration failed %s\n",
6346 			ice_stat_str(ret));
6347 		goto err_init_ctrlq;
6348 	}
6349 
6350 	if (pf->first_sw->dflt_vsi_ena)
6351 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6352 	/* clear the default VSI configuration if it exists */
6353 	pf->first_sw->dflt_vsi = NULL;
6354 	pf->first_sw->dflt_vsi_ena = false;
6355 
6356 	ice_clear_pxe_mode(hw);
6357 
6358 	ret = ice_init_nvm(hw);
6359 	if (ret) {
6360 		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6361 		goto err_init_ctrlq;
6362 	}
6363 
6364 	ret = ice_get_caps(hw);
6365 	if (ret) {
6366 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6367 		goto err_init_ctrlq;
6368 	}
6369 
6370 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6371 	if (ret) {
6372 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6373 		goto err_init_ctrlq;
6374 	}
6375 
6376 	err = ice_sched_init_port(hw->port_info);
6377 	if (err)
6378 		goto err_sched_init_port;
6379 
6380 	/* start misc vector */
6381 	err = ice_req_irq_msix_misc(pf);
6382 	if (err) {
6383 		dev_err(dev, "misc vector setup failed: %d\n", err);
6384 		goto err_sched_init_port;
6385 	}
6386 
6387 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6388 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6389 		if (!rd32(hw, PFQF_FD_SIZE)) {
6390 			u16 unused, guar, b_effort;
6391 
6392 			guar = hw->func_caps.fd_fltr_guar;
6393 			b_effort = hw->func_caps.fd_fltr_best_effort;
6394 
6395 			/* force guaranteed filter pool for PF */
6396 			ice_alloc_fd_guar_item(hw, &unused, guar);
6397 			/* force shared filter pool for PF */
6398 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6399 		}
6400 	}
6401 
6402 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6403 		ice_dcb_rebuild(pf);
6404 
6405 	/* If the PF previously had enabled PTP, PTP init needs to happen before
6406 	 * the VSI rebuild. If not, this causes the PTP link status events to
6407 	 * fail.
6408 	 */
6409 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6410 		ice_ptp_init(pf);
6411 
6412 	/* rebuild PF VSI */
6413 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6414 	if (err) {
6415 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6416 		goto err_vsi_rebuild;
6417 	}
6418 
6419 	/* If Flow Director is active */
6420 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6421 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6422 		if (err) {
6423 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6424 			goto err_vsi_rebuild;
6425 		}
6426 
6427 		/* replay HW Flow Director recipes */
6428 		if (hw->fdir_prof)
6429 			ice_fdir_replay_flows(hw);
6430 
6431 		/* replay Flow Director filters */
6432 		ice_fdir_replay_fltrs(pf);
6433 
6434 		ice_rebuild_arfs(pf);
6435 	}
6436 
6437 	ice_update_pf_netdev_link(pf);
6438 
6439 	/* tell the firmware we are up */
6440 	ret = ice_send_version(pf);
6441 	if (ret) {
6442 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6443 			ice_stat_str(ret));
6444 		goto err_vsi_rebuild;
6445 	}
6446 
6447 	ice_replay_post(hw);
6448 
6449 	/* if we get here, reset flow is successful */
6450 	clear_bit(ICE_RESET_FAILED, pf->state);
6451 
6452 	ice_plug_aux_dev(pf);
6453 	return;
6454 
6455 err_vsi_rebuild:
6456 err_sched_init_port:
6457 	ice_sched_cleanup_all(hw);
6458 err_init_ctrlq:
6459 	ice_shutdown_all_ctrlq(hw);
6460 	set_bit(ICE_RESET_FAILED, pf->state);
6461 clear_recovery:
6462 	/* set this bit in PF state to control service task scheduling */
6463 	set_bit(ICE_NEEDS_RESTART, pf->state);
6464 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6465 }
6466 
6467 /**
6468  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6469  * @vsi: Pointer to VSI structure
6470  */
6471 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6472 {
6473 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6474 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6475 	else
6476 		return ICE_RXBUF_3072;
6477 }
6478 
6479 /**
6480  * ice_change_mtu - NDO callback to change the MTU
6481  * @netdev: network interface device structure
6482  * @new_mtu: new value for maximum frame size
6483  *
6484  * Returns 0 on success, negative on failure
6485  */
6486 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6487 {
6488 	struct ice_netdev_priv *np = netdev_priv(netdev);
6489 	struct ice_vsi *vsi = np->vsi;
6490 	struct ice_pf *pf = vsi->back;
6491 	struct iidc_event *event;
6492 	u8 count = 0;
6493 	int err = 0;
6494 
6495 	if (new_mtu == (int)netdev->mtu) {
6496 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6497 		return 0;
6498 	}
6499 
6500 	if (ice_is_xdp_ena_vsi(vsi)) {
6501 		int frame_size = ice_max_xdp_frame_size(vsi);
6502 
6503 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6504 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6505 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6506 			return -EINVAL;
6507 		}
6508 	}
6509 
6510 	/* if a reset is in progress, wait for some time for it to complete */
6511 	do {
6512 		if (ice_is_reset_in_progress(pf->state)) {
6513 			count++;
6514 			usleep_range(1000, 2000);
6515 		} else {
6516 			break;
6517 		}
6518 
6519 	} while (count < 100);
6520 
6521 	if (count == 100) {
6522 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6523 		return -EBUSY;
6524 	}
6525 
6526 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6527 	if (!event)
6528 		return -ENOMEM;
6529 
6530 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6531 	ice_send_event_to_aux(pf, event);
6532 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6533 
6534 	netdev->mtu = (unsigned int)new_mtu;
6535 
6536 	/* if VSI is up, bring it down and then back up */
6537 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6538 		err = ice_down(vsi);
6539 		if (err) {
6540 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6541 			goto event_after;
6542 		}
6543 
6544 		err = ice_up(vsi);
6545 		if (err) {
6546 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6547 			goto event_after;
6548 		}
6549 	}
6550 
6551 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6552 event_after:
6553 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6554 	ice_send_event_to_aux(pf, event);
6555 	kfree(event);
6556 
6557 	return err;
6558 }
6559 
6560 /**
6561  * ice_do_ioctl - Access the hwtstamp interface
6562  * @netdev: network interface device structure
6563  * @ifr: interface request data
6564  * @cmd: ioctl command
6565  */
6566 static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6567 {
6568 	struct ice_netdev_priv *np = netdev_priv(netdev);
6569 	struct ice_pf *pf = np->vsi->back;
6570 
6571 	switch (cmd) {
6572 	case SIOCGHWTSTAMP:
6573 		return ice_ptp_get_ts_config(pf, ifr);
6574 	case SIOCSHWTSTAMP:
6575 		return ice_ptp_set_ts_config(pf, ifr);
6576 	default:
6577 		return -EOPNOTSUPP;
6578 	}
6579 }
6580 
6581 /**
6582  * ice_aq_str - convert AQ err code to a string
6583  * @aq_err: the AQ error code to convert
6584  */
6585 const char *ice_aq_str(enum ice_aq_err aq_err)
6586 {
6587 	switch (aq_err) {
6588 	case ICE_AQ_RC_OK:
6589 		return "OK";
6590 	case ICE_AQ_RC_EPERM:
6591 		return "ICE_AQ_RC_EPERM";
6592 	case ICE_AQ_RC_ENOENT:
6593 		return "ICE_AQ_RC_ENOENT";
6594 	case ICE_AQ_RC_ENOMEM:
6595 		return "ICE_AQ_RC_ENOMEM";
6596 	case ICE_AQ_RC_EBUSY:
6597 		return "ICE_AQ_RC_EBUSY";
6598 	case ICE_AQ_RC_EEXIST:
6599 		return "ICE_AQ_RC_EEXIST";
6600 	case ICE_AQ_RC_EINVAL:
6601 		return "ICE_AQ_RC_EINVAL";
6602 	case ICE_AQ_RC_ENOSPC:
6603 		return "ICE_AQ_RC_ENOSPC";
6604 	case ICE_AQ_RC_ENOSYS:
6605 		return "ICE_AQ_RC_ENOSYS";
6606 	case ICE_AQ_RC_EMODE:
6607 		return "ICE_AQ_RC_EMODE";
6608 	case ICE_AQ_RC_ENOSEC:
6609 		return "ICE_AQ_RC_ENOSEC";
6610 	case ICE_AQ_RC_EBADSIG:
6611 		return "ICE_AQ_RC_EBADSIG";
6612 	case ICE_AQ_RC_ESVN:
6613 		return "ICE_AQ_RC_ESVN";
6614 	case ICE_AQ_RC_EBADMAN:
6615 		return "ICE_AQ_RC_EBADMAN";
6616 	case ICE_AQ_RC_EBADBUF:
6617 		return "ICE_AQ_RC_EBADBUF";
6618 	}
6619 
6620 	return "ICE_AQ_RC_UNKNOWN";
6621 }
6622 
6623 /**
6624  * ice_stat_str - convert status err code to a string
6625  * @stat_err: the status error code to convert
6626  */
6627 const char *ice_stat_str(enum ice_status stat_err)
6628 {
6629 	switch (stat_err) {
6630 	case ICE_SUCCESS:
6631 		return "OK";
6632 	case ICE_ERR_PARAM:
6633 		return "ICE_ERR_PARAM";
6634 	case ICE_ERR_NOT_IMPL:
6635 		return "ICE_ERR_NOT_IMPL";
6636 	case ICE_ERR_NOT_READY:
6637 		return "ICE_ERR_NOT_READY";
6638 	case ICE_ERR_NOT_SUPPORTED:
6639 		return "ICE_ERR_NOT_SUPPORTED";
6640 	case ICE_ERR_BAD_PTR:
6641 		return "ICE_ERR_BAD_PTR";
6642 	case ICE_ERR_INVAL_SIZE:
6643 		return "ICE_ERR_INVAL_SIZE";
6644 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6645 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6646 	case ICE_ERR_RESET_FAILED:
6647 		return "ICE_ERR_RESET_FAILED";
6648 	case ICE_ERR_FW_API_VER:
6649 		return "ICE_ERR_FW_API_VER";
6650 	case ICE_ERR_NO_MEMORY:
6651 		return "ICE_ERR_NO_MEMORY";
6652 	case ICE_ERR_CFG:
6653 		return "ICE_ERR_CFG";
6654 	case ICE_ERR_OUT_OF_RANGE:
6655 		return "ICE_ERR_OUT_OF_RANGE";
6656 	case ICE_ERR_ALREADY_EXISTS:
6657 		return "ICE_ERR_ALREADY_EXISTS";
6658 	case ICE_ERR_NVM:
6659 		return "ICE_ERR_NVM";
6660 	case ICE_ERR_NVM_CHECKSUM:
6661 		return "ICE_ERR_NVM_CHECKSUM";
6662 	case ICE_ERR_BUF_TOO_SHORT:
6663 		return "ICE_ERR_BUF_TOO_SHORT";
6664 	case ICE_ERR_NVM_BLANK_MODE:
6665 		return "ICE_ERR_NVM_BLANK_MODE";
6666 	case ICE_ERR_IN_USE:
6667 		return "ICE_ERR_IN_USE";
6668 	case ICE_ERR_MAX_LIMIT:
6669 		return "ICE_ERR_MAX_LIMIT";
6670 	case ICE_ERR_RESET_ONGOING:
6671 		return "ICE_ERR_RESET_ONGOING";
6672 	case ICE_ERR_HW_TABLE:
6673 		return "ICE_ERR_HW_TABLE";
6674 	case ICE_ERR_DOES_NOT_EXIST:
6675 		return "ICE_ERR_DOES_NOT_EXIST";
6676 	case ICE_ERR_FW_DDP_MISMATCH:
6677 		return "ICE_ERR_FW_DDP_MISMATCH";
6678 	case ICE_ERR_AQ_ERROR:
6679 		return "ICE_ERR_AQ_ERROR";
6680 	case ICE_ERR_AQ_TIMEOUT:
6681 		return "ICE_ERR_AQ_TIMEOUT";
6682 	case ICE_ERR_AQ_FULL:
6683 		return "ICE_ERR_AQ_FULL";
6684 	case ICE_ERR_AQ_NO_WORK:
6685 		return "ICE_ERR_AQ_NO_WORK";
6686 	case ICE_ERR_AQ_EMPTY:
6687 		return "ICE_ERR_AQ_EMPTY";
6688 	case ICE_ERR_AQ_FW_CRITICAL:
6689 		return "ICE_ERR_AQ_FW_CRITICAL";
6690 	}
6691 
6692 	return "ICE_ERR_UNKNOWN";
6693 }
6694 
6695 /**
6696  * ice_set_rss_lut - Set RSS LUT
6697  * @vsi: Pointer to VSI structure
6698  * @lut: Lookup table
6699  * @lut_size: Lookup table size
6700  *
6701  * Returns 0 on success, negative on failure
6702  */
6703 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6704 {
6705 	struct ice_aq_get_set_rss_lut_params params = {};
6706 	struct ice_hw *hw = &vsi->back->hw;
6707 	enum ice_status status;
6708 
6709 	if (!lut)
6710 		return -EINVAL;
6711 
6712 	params.vsi_handle = vsi->idx;
6713 	params.lut_size = lut_size;
6714 	params.lut_type = vsi->rss_lut_type;
6715 	params.lut = lut;
6716 
6717 	status = ice_aq_set_rss_lut(hw, &params);
6718 	if (status) {
6719 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6720 			ice_stat_str(status),
6721 			ice_aq_str(hw->adminq.sq_last_status));
6722 		return -EIO;
6723 	}
6724 
6725 	return 0;
6726 }
6727 
6728 /**
6729  * ice_set_rss_key - Set RSS key
6730  * @vsi: Pointer to the VSI structure
6731  * @seed: RSS hash seed
6732  *
6733  * Returns 0 on success, negative on failure
6734  */
6735 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6736 {
6737 	struct ice_hw *hw = &vsi->back->hw;
6738 	enum ice_status status;
6739 
6740 	if (!seed)
6741 		return -EINVAL;
6742 
6743 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6744 	if (status) {
6745 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6746 			ice_stat_str(status),
6747 			ice_aq_str(hw->adminq.sq_last_status));
6748 		return -EIO;
6749 	}
6750 
6751 	return 0;
6752 }
6753 
6754 /**
6755  * ice_get_rss_lut - Get RSS LUT
6756  * @vsi: Pointer to VSI structure
6757  * @lut: Buffer to store the lookup table entries
6758  * @lut_size: Size of buffer to store the lookup table entries
6759  *
6760  * Returns 0 on success, negative on failure
6761  */
6762 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6763 {
6764 	struct ice_aq_get_set_rss_lut_params params = {};
6765 	struct ice_hw *hw = &vsi->back->hw;
6766 	enum ice_status status;
6767 
6768 	if (!lut)
6769 		return -EINVAL;
6770 
6771 	params.vsi_handle = vsi->idx;
6772 	params.lut_size = lut_size;
6773 	params.lut_type = vsi->rss_lut_type;
6774 	params.lut = lut;
6775 
6776 	status = ice_aq_get_rss_lut(hw, &params);
6777 	if (status) {
6778 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6779 			ice_stat_str(status),
6780 			ice_aq_str(hw->adminq.sq_last_status));
6781 		return -EIO;
6782 	}
6783 
6784 	return 0;
6785 }
6786 
6787 /**
6788  * ice_get_rss_key - Get RSS key
6789  * @vsi: Pointer to VSI structure
6790  * @seed: Buffer to store the key in
6791  *
6792  * Returns 0 on success, negative on failure
6793  */
6794 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6795 {
6796 	struct ice_hw *hw = &vsi->back->hw;
6797 	enum ice_status status;
6798 
6799 	if (!seed)
6800 		return -EINVAL;
6801 
6802 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6803 	if (status) {
6804 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6805 			ice_stat_str(status),
6806 			ice_aq_str(hw->adminq.sq_last_status));
6807 		return -EIO;
6808 	}
6809 
6810 	return 0;
6811 }
6812 
6813 /**
6814  * ice_bridge_getlink - Get the hardware bridge mode
6815  * @skb: skb buff
6816  * @pid: process ID
6817  * @seq: RTNL message seq
6818  * @dev: the netdev being configured
6819  * @filter_mask: filter mask passed in
6820  * @nlflags: netlink flags passed in
6821  *
6822  * Return the bridge mode (VEB/VEPA)
6823  */
6824 static int
6825 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6826 		   struct net_device *dev, u32 filter_mask, int nlflags)
6827 {
6828 	struct ice_netdev_priv *np = netdev_priv(dev);
6829 	struct ice_vsi *vsi = np->vsi;
6830 	struct ice_pf *pf = vsi->back;
6831 	u16 bmode;
6832 
6833 	bmode = pf->first_sw->bridge_mode;
6834 
6835 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6836 				       filter_mask, NULL);
6837 }
6838 
6839 /**
6840  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6841  * @vsi: Pointer to VSI structure
6842  * @bmode: Hardware bridge mode (VEB/VEPA)
6843  *
6844  * Returns 0 on success, negative on failure
6845  */
6846 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6847 {
6848 	struct ice_aqc_vsi_props *vsi_props;
6849 	struct ice_hw *hw = &vsi->back->hw;
6850 	struct ice_vsi_ctx *ctxt;
6851 	enum ice_status status;
6852 	int ret = 0;
6853 
6854 	vsi_props = &vsi->info;
6855 
6856 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6857 	if (!ctxt)
6858 		return -ENOMEM;
6859 
6860 	ctxt->info = vsi->info;
6861 
6862 	if (bmode == BRIDGE_MODE_VEB)
6863 		/* change from VEPA to VEB mode */
6864 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6865 	else
6866 		/* change from VEB to VEPA mode */
6867 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6868 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6869 
6870 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6871 	if (status) {
6872 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6873 			bmode, ice_stat_str(status),
6874 			ice_aq_str(hw->adminq.sq_last_status));
6875 		ret = -EIO;
6876 		goto out;
6877 	}
6878 	/* Update sw flags for book keeping */
6879 	vsi_props->sw_flags = ctxt->info.sw_flags;
6880 
6881 out:
6882 	kfree(ctxt);
6883 	return ret;
6884 }
6885 
6886 /**
6887  * ice_bridge_setlink - Set the hardware bridge mode
6888  * @dev: the netdev being configured
6889  * @nlh: RTNL message
6890  * @flags: bridge setlink flags
6891  * @extack: netlink extended ack
6892  *
6893  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6894  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6895  * not already set for all VSIs connected to this switch. And also update the
6896  * unicast switch filter rules for the corresponding switch of the netdev.
6897  */
6898 static int
6899 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6900 		   u16 __always_unused flags,
6901 		   struct netlink_ext_ack __always_unused *extack)
6902 {
6903 	struct ice_netdev_priv *np = netdev_priv(dev);
6904 	struct ice_pf *pf = np->vsi->back;
6905 	struct nlattr *attr, *br_spec;
6906 	struct ice_hw *hw = &pf->hw;
6907 	enum ice_status status;
6908 	struct ice_sw *pf_sw;
6909 	int rem, v, err = 0;
6910 
6911 	pf_sw = pf->first_sw;
6912 	/* find the attribute in the netlink message */
6913 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6914 
6915 	nla_for_each_nested(attr, br_spec, rem) {
6916 		__u16 mode;
6917 
6918 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6919 			continue;
6920 		mode = nla_get_u16(attr);
6921 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6922 			return -EINVAL;
6923 		/* Continue  if bridge mode is not being flipped */
6924 		if (mode == pf_sw->bridge_mode)
6925 			continue;
6926 		/* Iterates through the PF VSI list and update the loopback
6927 		 * mode of the VSI
6928 		 */
6929 		ice_for_each_vsi(pf, v) {
6930 			if (!pf->vsi[v])
6931 				continue;
6932 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6933 			if (err)
6934 				return err;
6935 		}
6936 
6937 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6938 		/* Update the unicast switch filter rules for the corresponding
6939 		 * switch of the netdev
6940 		 */
6941 		status = ice_update_sw_rule_bridge_mode(hw);
6942 		if (status) {
6943 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6944 				   mode, ice_stat_str(status),
6945 				   ice_aq_str(hw->adminq.sq_last_status));
6946 			/* revert hw->evb_veb */
6947 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6948 			return -EIO;
6949 		}
6950 
6951 		pf_sw->bridge_mode = mode;
6952 	}
6953 
6954 	return 0;
6955 }
6956 
6957 /**
6958  * ice_tx_timeout - Respond to a Tx Hang
6959  * @netdev: network interface device structure
6960  * @txqueue: Tx queue
6961  */
6962 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6963 {
6964 	struct ice_netdev_priv *np = netdev_priv(netdev);
6965 	struct ice_ring *tx_ring = NULL;
6966 	struct ice_vsi *vsi = np->vsi;
6967 	struct ice_pf *pf = vsi->back;
6968 	u32 i;
6969 
6970 	pf->tx_timeout_count++;
6971 
6972 	/* Check if PFC is enabled for the TC to which the queue belongs
6973 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6974 	 * need to reset and rebuild
6975 	 */
6976 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6977 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6978 			 txqueue);
6979 		return;
6980 	}
6981 
6982 	/* now that we have an index, find the tx_ring struct */
6983 	for (i = 0; i < vsi->num_txq; i++)
6984 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6985 			if (txqueue == vsi->tx_rings[i]->q_index) {
6986 				tx_ring = vsi->tx_rings[i];
6987 				break;
6988 			}
6989 
6990 	/* Reset recovery level if enough time has elapsed after last timeout.
6991 	 * Also ensure no new reset action happens before next timeout period.
6992 	 */
6993 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6994 		pf->tx_timeout_recovery_level = 1;
6995 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6996 				       netdev->watchdog_timeo)))
6997 		return;
6998 
6999 	if (tx_ring) {
7000 		struct ice_hw *hw = &pf->hw;
7001 		u32 head, val = 0;
7002 
7003 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7004 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7005 		/* Read interrupt register */
7006 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7007 
7008 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7009 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7010 			    head, tx_ring->next_to_use, val);
7011 	}
7012 
7013 	pf->tx_timeout_last_recovery = jiffies;
7014 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7015 		    pf->tx_timeout_recovery_level, txqueue);
7016 
7017 	switch (pf->tx_timeout_recovery_level) {
7018 	case 1:
7019 		set_bit(ICE_PFR_REQ, pf->state);
7020 		break;
7021 	case 2:
7022 		set_bit(ICE_CORER_REQ, pf->state);
7023 		break;
7024 	case 3:
7025 		set_bit(ICE_GLOBR_REQ, pf->state);
7026 		break;
7027 	default:
7028 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7029 		set_bit(ICE_DOWN, pf->state);
7030 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7031 		set_bit(ICE_SERVICE_DIS, pf->state);
7032 		break;
7033 	}
7034 
7035 	ice_service_task_schedule(pf);
7036 	pf->tx_timeout_recovery_level++;
7037 }
7038 
7039 /**
7040  * ice_open - Called when a network interface becomes active
7041  * @netdev: network interface device structure
7042  *
7043  * The open entry point is called when a network interface is made
7044  * active by the system (IFF_UP). At this point all resources needed
7045  * for transmit and receive operations are allocated, the interrupt
7046  * handler is registered with the OS, the netdev watchdog is enabled,
7047  * and the stack is notified that the interface is ready.
7048  *
7049  * Returns 0 on success, negative value on failure
7050  */
7051 int ice_open(struct net_device *netdev)
7052 {
7053 	struct ice_netdev_priv *np = netdev_priv(netdev);
7054 	struct ice_pf *pf = np->vsi->back;
7055 
7056 	if (ice_is_reset_in_progress(pf->state)) {
7057 		netdev_err(netdev, "can't open net device while reset is in progress");
7058 		return -EBUSY;
7059 	}
7060 
7061 	return ice_open_internal(netdev);
7062 }
7063 
7064 /**
7065  * ice_open_internal - Called when a network interface becomes active
7066  * @netdev: network interface device structure
7067  *
7068  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7069  * handling routine
7070  *
7071  * Returns 0 on success, negative value on failure
7072  */
7073 int ice_open_internal(struct net_device *netdev)
7074 {
7075 	struct ice_netdev_priv *np = netdev_priv(netdev);
7076 	struct ice_vsi *vsi = np->vsi;
7077 	struct ice_pf *pf = vsi->back;
7078 	struct ice_port_info *pi;
7079 	enum ice_status status;
7080 	int err;
7081 
7082 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7083 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7084 		return -EIO;
7085 	}
7086 
7087 	netif_carrier_off(netdev);
7088 
7089 	pi = vsi->port_info;
7090 	status = ice_update_link_info(pi);
7091 	if (status) {
7092 		netdev_err(netdev, "Failed to get link info, error %s\n",
7093 			   ice_stat_str(status));
7094 		return -EIO;
7095 	}
7096 
7097 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7098 
7099 	/* Set PHY if there is media, otherwise, turn off PHY */
7100 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7101 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7102 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7103 			err = ice_init_phy_user_cfg(pi);
7104 			if (err) {
7105 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7106 					   err);
7107 				return err;
7108 			}
7109 		}
7110 
7111 		err = ice_configure_phy(vsi);
7112 		if (err) {
7113 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7114 				   err);
7115 			return err;
7116 		}
7117 	} else {
7118 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7119 		ice_set_link(vsi, false);
7120 	}
7121 
7122 	err = ice_vsi_open(vsi);
7123 	if (err)
7124 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7125 			   vsi->vsi_num, vsi->vsw->sw_id);
7126 
7127 	/* Update existing tunnels information */
7128 	udp_tunnel_get_rx_info(netdev);
7129 
7130 	return err;
7131 }
7132 
7133 /**
7134  * ice_stop - Disables a network interface
7135  * @netdev: network interface device structure
7136  *
7137  * The stop entry point is called when an interface is de-activated by the OS,
7138  * and the netdevice enters the DOWN state. The hardware is still under the
7139  * driver's control, but the netdev interface is disabled.
7140  *
7141  * Returns success only - not allowed to fail
7142  */
7143 int ice_stop(struct net_device *netdev)
7144 {
7145 	struct ice_netdev_priv *np = netdev_priv(netdev);
7146 	struct ice_vsi *vsi = np->vsi;
7147 	struct ice_pf *pf = vsi->back;
7148 
7149 	if (ice_is_reset_in_progress(pf->state)) {
7150 		netdev_err(netdev, "can't stop net device while reset is in progress");
7151 		return -EBUSY;
7152 	}
7153 
7154 	ice_vsi_close(vsi);
7155 
7156 	return 0;
7157 }
7158 
7159 /**
7160  * ice_features_check - Validate encapsulated packet conforms to limits
7161  * @skb: skb buffer
7162  * @netdev: This port's netdev
7163  * @features: Offload features that the stack believes apply
7164  */
7165 static netdev_features_t
7166 ice_features_check(struct sk_buff *skb,
7167 		   struct net_device __always_unused *netdev,
7168 		   netdev_features_t features)
7169 {
7170 	size_t len;
7171 
7172 	/* No point in doing any of this if neither checksum nor GSO are
7173 	 * being requested for this frame. We can rule out both by just
7174 	 * checking for CHECKSUM_PARTIAL
7175 	 */
7176 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7177 		return features;
7178 
7179 	/* We cannot support GSO if the MSS is going to be less than
7180 	 * 64 bytes. If it is then we need to drop support for GSO.
7181 	 */
7182 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7183 		features &= ~NETIF_F_GSO_MASK;
7184 
7185 	len = skb_network_header(skb) - skb->data;
7186 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7187 		goto out_rm_features;
7188 
7189 	len = skb_transport_header(skb) - skb_network_header(skb);
7190 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7191 		goto out_rm_features;
7192 
7193 	if (skb->encapsulation) {
7194 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7195 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7196 			goto out_rm_features;
7197 
7198 		len = skb_inner_transport_header(skb) -
7199 		      skb_inner_network_header(skb);
7200 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7201 			goto out_rm_features;
7202 	}
7203 
7204 	return features;
7205 out_rm_features:
7206 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7207 }
7208 
7209 static const struct net_device_ops ice_netdev_safe_mode_ops = {
7210 	.ndo_open = ice_open,
7211 	.ndo_stop = ice_stop,
7212 	.ndo_start_xmit = ice_start_xmit,
7213 	.ndo_set_mac_address = ice_set_mac_address,
7214 	.ndo_validate_addr = eth_validate_addr,
7215 	.ndo_change_mtu = ice_change_mtu,
7216 	.ndo_get_stats64 = ice_get_stats64,
7217 	.ndo_tx_timeout = ice_tx_timeout,
7218 	.ndo_bpf = ice_xdp_safe_mode,
7219 };
7220 
7221 static const struct net_device_ops ice_netdev_ops = {
7222 	.ndo_open = ice_open,
7223 	.ndo_stop = ice_stop,
7224 	.ndo_start_xmit = ice_start_xmit,
7225 	.ndo_features_check = ice_features_check,
7226 	.ndo_set_rx_mode = ice_set_rx_mode,
7227 	.ndo_set_mac_address = ice_set_mac_address,
7228 	.ndo_validate_addr = eth_validate_addr,
7229 	.ndo_change_mtu = ice_change_mtu,
7230 	.ndo_get_stats64 = ice_get_stats64,
7231 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7232 	.ndo_do_ioctl = ice_do_ioctl,
7233 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7234 	.ndo_set_vf_mac = ice_set_vf_mac,
7235 	.ndo_get_vf_config = ice_get_vf_cfg,
7236 	.ndo_set_vf_trust = ice_set_vf_trust,
7237 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7238 	.ndo_set_vf_link_state = ice_set_vf_link_state,
7239 	.ndo_get_vf_stats = ice_get_vf_stats,
7240 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7241 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
7242 	.ndo_set_features = ice_set_features,
7243 	.ndo_bridge_getlink = ice_bridge_getlink,
7244 	.ndo_bridge_setlink = ice_bridge_setlink,
7245 	.ndo_fdb_add = ice_fdb_add,
7246 	.ndo_fdb_del = ice_fdb_del,
7247 #ifdef CONFIG_RFS_ACCEL
7248 	.ndo_rx_flow_steer = ice_rx_flow_steer,
7249 #endif
7250 	.ndo_tx_timeout = ice_tx_timeout,
7251 	.ndo_bpf = ice_xdp,
7252 	.ndo_xdp_xmit = ice_xdp_xmit,
7253 	.ndo_xsk_wakeup = ice_xsk_wakeup,
7254 };
7255