1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 
25 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
26 static const char ice_driver_string[] = DRV_SUMMARY;
27 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
28 
29 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
30 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
31 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
32 
33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34 MODULE_DESCRIPTION(DRV_SUMMARY);
35 MODULE_LICENSE("GPL v2");
36 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
37 
38 static int debug = -1;
39 module_param(debug, int, 0644);
40 #ifndef CONFIG_DYNAMIC_DEBUG
41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
42 #else
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
44 #endif /* !CONFIG_DYNAMIC_DEBUG */
45 
46 static DEFINE_IDA(ice_aux_ida);
47 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
48 EXPORT_SYMBOL(ice_xdp_locking_key);
49 
50 static struct workqueue_struct *ice_wq;
51 static const struct net_device_ops ice_netdev_safe_mode_ops;
52 static const struct net_device_ops ice_netdev_ops;
53 
54 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
55 
56 static void ice_vsi_release_all(struct ice_pf *pf);
57 
58 static int ice_rebuild_channels(struct ice_pf *pf);
59 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
60 
61 static int
62 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
63 		     void *cb_priv, enum tc_setup_type type, void *type_data,
64 		     void *data,
65 		     void (*cleanup)(struct flow_block_cb *block_cb));
66 
67 bool netif_is_ice(struct net_device *dev)
68 {
69 	return dev && (dev->netdev_ops == &ice_netdev_ops);
70 }
71 
72 /**
73  * ice_get_tx_pending - returns number of Tx descriptors not processed
74  * @ring: the ring of descriptors
75  */
76 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
77 {
78 	u16 head, tail;
79 
80 	head = ring->next_to_clean;
81 	tail = ring->next_to_use;
82 
83 	if (head != tail)
84 		return (head < tail) ?
85 			tail - head : (tail + ring->count - head);
86 	return 0;
87 }
88 
89 /**
90  * ice_check_for_hang_subtask - check for and recover hung queues
91  * @pf: pointer to PF struct
92  */
93 static void ice_check_for_hang_subtask(struct ice_pf *pf)
94 {
95 	struct ice_vsi *vsi = NULL;
96 	struct ice_hw *hw;
97 	unsigned int i;
98 	int packets;
99 	u32 v;
100 
101 	ice_for_each_vsi(pf, v)
102 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
103 			vsi = pf->vsi[v];
104 			break;
105 		}
106 
107 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
108 		return;
109 
110 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
111 		return;
112 
113 	hw = &vsi->back->hw;
114 
115 	ice_for_each_txq(vsi, i) {
116 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
117 
118 		if (!tx_ring)
119 			continue;
120 		if (ice_ring_ch_enabled(tx_ring))
121 			continue;
122 
123 		if (tx_ring->desc) {
124 			/* If packet counter has not changed the queue is
125 			 * likely stalled, so force an interrupt for this
126 			 * queue.
127 			 *
128 			 * prev_pkt would be negative if there was no
129 			 * pending work.
130 			 */
131 			packets = tx_ring->stats.pkts & INT_MAX;
132 			if (tx_ring->tx_stats.prev_pkt == packets) {
133 				/* Trigger sw interrupt to revive the queue */
134 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
135 				continue;
136 			}
137 
138 			/* Memory barrier between read of packet count and call
139 			 * to ice_get_tx_pending()
140 			 */
141 			smp_rmb();
142 			tx_ring->tx_stats.prev_pkt =
143 			    ice_get_tx_pending(tx_ring) ? packets : -1;
144 		}
145 	}
146 }
147 
148 /**
149  * ice_init_mac_fltr - Set initial MAC filters
150  * @pf: board private structure
151  *
152  * Set initial set of MAC filters for PF VSI; configure filters for permanent
153  * address and broadcast address. If an error is encountered, netdevice will be
154  * unregistered.
155  */
156 static int ice_init_mac_fltr(struct ice_pf *pf)
157 {
158 	struct ice_vsi *vsi;
159 	u8 *perm_addr;
160 
161 	vsi = ice_get_main_vsi(pf);
162 	if (!vsi)
163 		return -EINVAL;
164 
165 	perm_addr = vsi->port_info->mac.perm_addr;
166 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
167 }
168 
169 /**
170  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
171  * @netdev: the net device on which the sync is happening
172  * @addr: MAC address to sync
173  *
174  * This is a callback function which is called by the in kernel device sync
175  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
176  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
177  * MAC filters from the hardware.
178  */
179 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
180 {
181 	struct ice_netdev_priv *np = netdev_priv(netdev);
182 	struct ice_vsi *vsi = np->vsi;
183 
184 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
185 				     ICE_FWD_TO_VSI))
186 		return -EINVAL;
187 
188 	return 0;
189 }
190 
191 /**
192  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
193  * @netdev: the net device on which the unsync is happening
194  * @addr: MAC address to unsync
195  *
196  * This is a callback function which is called by the in kernel device unsync
197  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
198  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
199  * delete the MAC filters from the hardware.
200  */
201 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
202 {
203 	struct ice_netdev_priv *np = netdev_priv(netdev);
204 	struct ice_vsi *vsi = np->vsi;
205 
206 	/* Under some circumstances, we might receive a request to delete our
207 	 * own device address from our uc list. Because we store the device
208 	 * address in the VSI's MAC filter list, we need to ignore such
209 	 * requests and not delete our device address from this list.
210 	 */
211 	if (ether_addr_equal(addr, netdev->dev_addr))
212 		return 0;
213 
214 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
215 				     ICE_FWD_TO_VSI))
216 		return -EINVAL;
217 
218 	return 0;
219 }
220 
221 /**
222  * ice_vsi_fltr_changed - check if filter state changed
223  * @vsi: VSI to be checked
224  *
225  * returns true if filter state has changed, false otherwise.
226  */
227 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
228 {
229 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
230 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
231 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
232 }
233 
234 /**
235  * ice_set_promisc - Enable promiscuous mode for a given PF
236  * @vsi: the VSI being configured
237  * @promisc_m: mask of promiscuous config bits
238  *
239  */
240 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
241 {
242 	int status;
243 
244 	if (vsi->type != ICE_VSI_PF)
245 		return 0;
246 
247 	if (vsi->num_vlan > 1)
248 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
249 	else
250 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
251 	return status;
252 }
253 
254 /**
255  * ice_clear_promisc - Disable promiscuous mode for a given PF
256  * @vsi: the VSI being configured
257  * @promisc_m: mask of promiscuous config bits
258  *
259  */
260 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
261 {
262 	int status;
263 
264 	if (vsi->type != ICE_VSI_PF)
265 		return 0;
266 
267 	if (vsi->num_vlan > 1)
268 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
269 	else
270 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
271 	return status;
272 }
273 
274 /**
275  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
276  * @vsi: ptr to the VSI
277  *
278  * Push any outstanding VSI filter changes through the AdminQ.
279  */
280 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
281 {
282 	struct device *dev = ice_pf_to_dev(vsi->back);
283 	struct net_device *netdev = vsi->netdev;
284 	bool promisc_forced_on = false;
285 	struct ice_pf *pf = vsi->back;
286 	struct ice_hw *hw = &pf->hw;
287 	u32 changed_flags = 0;
288 	u8 promisc_m;
289 	int err;
290 
291 	if (!vsi->netdev)
292 		return -EINVAL;
293 
294 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
295 		usleep_range(1000, 2000);
296 
297 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
298 	vsi->current_netdev_flags = vsi->netdev->flags;
299 
300 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
301 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
302 
303 	if (ice_vsi_fltr_changed(vsi)) {
304 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
305 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
306 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
307 
308 		/* grab the netdev's addr_list_lock */
309 		netif_addr_lock_bh(netdev);
310 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
311 			      ice_add_mac_to_unsync_list);
312 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
313 			      ice_add_mac_to_unsync_list);
314 		/* our temp lists are populated. release lock */
315 		netif_addr_unlock_bh(netdev);
316 	}
317 
318 	/* Remove MAC addresses in the unsync list */
319 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
320 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
321 	if (err) {
322 		netdev_err(netdev, "Failed to delete MAC filters\n");
323 		/* if we failed because of alloc failures, just bail */
324 		if (err == -ENOMEM)
325 			goto out;
326 	}
327 
328 	/* Add MAC addresses in the sync list */
329 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
330 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
331 	/* If filter is added successfully or already exists, do not go into
332 	 * 'if' condition and report it as error. Instead continue processing
333 	 * rest of the function.
334 	 */
335 	if (err && err != -EEXIST) {
336 		netdev_err(netdev, "Failed to add MAC filters\n");
337 		/* If there is no more space for new umac filters, VSI
338 		 * should go into promiscuous mode. There should be some
339 		 * space reserved for promiscuous filters.
340 		 */
341 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
342 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
343 				      vsi->state)) {
344 			promisc_forced_on = true;
345 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
346 				    vsi->vsi_num);
347 		} else {
348 			goto out;
349 		}
350 	}
351 	err = 0;
352 	/* check for changes in promiscuous modes */
353 	if (changed_flags & IFF_ALLMULTI) {
354 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
355 			if (vsi->num_vlan > 1)
356 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
357 			else
358 				promisc_m = ICE_MCAST_PROMISC_BITS;
359 
360 			err = ice_set_promisc(vsi, promisc_m);
361 			if (err) {
362 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
363 					   vsi->vsi_num);
364 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
365 				goto out_promisc;
366 			}
367 		} else {
368 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
369 			if (vsi->num_vlan > 1)
370 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
371 			else
372 				promisc_m = ICE_MCAST_PROMISC_BITS;
373 
374 			err = ice_clear_promisc(vsi, promisc_m);
375 			if (err) {
376 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
377 					   vsi->vsi_num);
378 				vsi->current_netdev_flags |= IFF_ALLMULTI;
379 				goto out_promisc;
380 			}
381 		}
382 	}
383 
384 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
385 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
386 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
387 		if (vsi->current_netdev_flags & IFF_PROMISC) {
388 			/* Apply Rx filter rule to get traffic from wire */
389 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
390 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
391 				if (err && err != -EEXIST) {
392 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
393 						   err, vsi->vsi_num);
394 					vsi->current_netdev_flags &=
395 						~IFF_PROMISC;
396 					goto out_promisc;
397 				}
398 				err = 0;
399 				ice_cfg_vlan_pruning(vsi, false);
400 			}
401 		} else {
402 			/* Clear Rx filter to remove traffic from wire */
403 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
404 				err = ice_clear_dflt_vsi(pf->first_sw);
405 				if (err) {
406 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
407 						   err, vsi->vsi_num);
408 					vsi->current_netdev_flags |=
409 						IFF_PROMISC;
410 					goto out_promisc;
411 				}
412 				if (vsi->num_vlan > 1)
413 					ice_cfg_vlan_pruning(vsi, true);
414 			}
415 		}
416 	}
417 	goto exit;
418 
419 out_promisc:
420 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
421 	goto exit;
422 out:
423 	/* if something went wrong then set the changed flag so we try again */
424 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
425 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
426 exit:
427 	clear_bit(ICE_CFG_BUSY, vsi->state);
428 	return err;
429 }
430 
431 /**
432  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
433  * @pf: board private structure
434  */
435 static void ice_sync_fltr_subtask(struct ice_pf *pf)
436 {
437 	int v;
438 
439 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
440 		return;
441 
442 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
443 
444 	ice_for_each_vsi(pf, v)
445 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
446 		    ice_vsi_sync_fltr(pf->vsi[v])) {
447 			/* come back and try again later */
448 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
449 			break;
450 		}
451 }
452 
453 /**
454  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
455  * @pf: the PF
456  * @locked: is the rtnl_lock already held
457  */
458 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
459 {
460 	int node;
461 	int v;
462 
463 	ice_for_each_vsi(pf, v)
464 		if (pf->vsi[v])
465 			ice_dis_vsi(pf->vsi[v], locked);
466 
467 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
468 		pf->pf_agg_node[node].num_vsis = 0;
469 
470 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
471 		pf->vf_agg_node[node].num_vsis = 0;
472 }
473 
474 /**
475  * ice_clear_sw_switch_recipes - clear switch recipes
476  * @pf: board private structure
477  *
478  * Mark switch recipes as not created in sw structures. There are cases where
479  * rules (especially advanced rules) need to be restored, either re-read from
480  * hardware or added again. For example after the reset. 'recp_created' flag
481  * prevents from doing that and need to be cleared upfront.
482  */
483 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
484 {
485 	struct ice_sw_recipe *recp;
486 	u8 i;
487 
488 	recp = pf->hw.switch_info->recp_list;
489 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
490 		recp[i].recp_created = false;
491 }
492 
493 /**
494  * ice_prepare_for_reset - prep for reset
495  * @pf: board private structure
496  * @reset_type: reset type requested
497  *
498  * Inform or close all dependent features in prep for reset.
499  */
500 static void
501 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
502 {
503 	struct ice_hw *hw = &pf->hw;
504 	struct ice_vsi *vsi;
505 	unsigned int i;
506 
507 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
508 
509 	/* already prepared for reset */
510 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
511 		return;
512 
513 	ice_unplug_aux_dev(pf);
514 
515 	/* Notify VFs of impending reset */
516 	if (ice_check_sq_alive(hw, &hw->mailboxq))
517 		ice_vc_notify_reset(pf);
518 
519 	/* Disable VFs until reset is completed */
520 	ice_for_each_vf(pf, i)
521 		ice_set_vf_state_qs_dis(&pf->vf[i]);
522 
523 	if (ice_is_eswitch_mode_switchdev(pf)) {
524 		if (reset_type != ICE_RESET_PFR)
525 			ice_clear_sw_switch_recipes(pf);
526 	}
527 
528 	/* release ADQ specific HW and SW resources */
529 	vsi = ice_get_main_vsi(pf);
530 	if (!vsi)
531 		goto skip;
532 
533 	/* to be on safe side, reset orig_rss_size so that normal flow
534 	 * of deciding rss_size can take precedence
535 	 */
536 	vsi->orig_rss_size = 0;
537 
538 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
539 		if (reset_type == ICE_RESET_PFR) {
540 			vsi->old_ena_tc = vsi->all_enatc;
541 			vsi->old_numtc = vsi->all_numtc;
542 		} else {
543 			ice_remove_q_channels(vsi, true);
544 
545 			/* for other reset type, do not support channel rebuild
546 			 * hence reset needed info
547 			 */
548 			vsi->old_ena_tc = 0;
549 			vsi->all_enatc = 0;
550 			vsi->old_numtc = 0;
551 			vsi->all_numtc = 0;
552 			vsi->req_txq = 0;
553 			vsi->req_rxq = 0;
554 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
555 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
556 		}
557 	}
558 skip:
559 
560 	/* clear SW filtering DB */
561 	ice_clear_hw_tbls(hw);
562 	/* disable the VSIs and their queues that are not already DOWN */
563 	ice_pf_dis_all_vsi(pf, false);
564 
565 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
566 		ice_ptp_prepare_for_reset(pf);
567 
568 	if (hw->port_info)
569 		ice_sched_clear_port(hw->port_info);
570 
571 	ice_shutdown_all_ctrlq(hw);
572 
573 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
574 }
575 
576 /**
577  * ice_do_reset - Initiate one of many types of resets
578  * @pf: board private structure
579  * @reset_type: reset type requested before this function was called.
580  */
581 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
582 {
583 	struct device *dev = ice_pf_to_dev(pf);
584 	struct ice_hw *hw = &pf->hw;
585 
586 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
587 
588 	ice_prepare_for_reset(pf, reset_type);
589 
590 	/* trigger the reset */
591 	if (ice_reset(hw, reset_type)) {
592 		dev_err(dev, "reset %d failed\n", reset_type);
593 		set_bit(ICE_RESET_FAILED, pf->state);
594 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
595 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
596 		clear_bit(ICE_PFR_REQ, pf->state);
597 		clear_bit(ICE_CORER_REQ, pf->state);
598 		clear_bit(ICE_GLOBR_REQ, pf->state);
599 		wake_up(&pf->reset_wait_queue);
600 		return;
601 	}
602 
603 	/* PFR is a bit of a special case because it doesn't result in an OICR
604 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
605 	 * associated state bits.
606 	 */
607 	if (reset_type == ICE_RESET_PFR) {
608 		pf->pfr_count++;
609 		ice_rebuild(pf, reset_type);
610 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
611 		clear_bit(ICE_PFR_REQ, pf->state);
612 		wake_up(&pf->reset_wait_queue);
613 		ice_reset_all_vfs(pf, true);
614 	}
615 }
616 
617 /**
618  * ice_reset_subtask - Set up for resetting the device and driver
619  * @pf: board private structure
620  */
621 static void ice_reset_subtask(struct ice_pf *pf)
622 {
623 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
624 
625 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
626 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
627 	 * of reset is pending and sets bits in pf->state indicating the reset
628 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
629 	 * prepare for pending reset if not already (for PF software-initiated
630 	 * global resets the software should already be prepared for it as
631 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
632 	 * by firmware or software on other PFs, that bit is not set so prepare
633 	 * for the reset now), poll for reset done, rebuild and return.
634 	 */
635 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
636 		/* Perform the largest reset requested */
637 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
638 			reset_type = ICE_RESET_CORER;
639 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
640 			reset_type = ICE_RESET_GLOBR;
641 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
642 			reset_type = ICE_RESET_EMPR;
643 		/* return if no valid reset type requested */
644 		if (reset_type == ICE_RESET_INVAL)
645 			return;
646 		ice_prepare_for_reset(pf, reset_type);
647 
648 		/* make sure we are ready to rebuild */
649 		if (ice_check_reset(&pf->hw)) {
650 			set_bit(ICE_RESET_FAILED, pf->state);
651 		} else {
652 			/* done with reset. start rebuild */
653 			pf->hw.reset_ongoing = false;
654 			ice_rebuild(pf, reset_type);
655 			/* clear bit to resume normal operations, but
656 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
657 			 */
658 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
659 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
660 			clear_bit(ICE_PFR_REQ, pf->state);
661 			clear_bit(ICE_CORER_REQ, pf->state);
662 			clear_bit(ICE_GLOBR_REQ, pf->state);
663 			wake_up(&pf->reset_wait_queue);
664 			ice_reset_all_vfs(pf, true);
665 		}
666 
667 		return;
668 	}
669 
670 	/* No pending resets to finish processing. Check for new resets */
671 	if (test_bit(ICE_PFR_REQ, pf->state))
672 		reset_type = ICE_RESET_PFR;
673 	if (test_bit(ICE_CORER_REQ, pf->state))
674 		reset_type = ICE_RESET_CORER;
675 	if (test_bit(ICE_GLOBR_REQ, pf->state))
676 		reset_type = ICE_RESET_GLOBR;
677 	/* If no valid reset type requested just return */
678 	if (reset_type == ICE_RESET_INVAL)
679 		return;
680 
681 	/* reset if not already down or busy */
682 	if (!test_bit(ICE_DOWN, pf->state) &&
683 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
684 		ice_do_reset(pf, reset_type);
685 	}
686 }
687 
688 /**
689  * ice_print_topo_conflict - print topology conflict message
690  * @vsi: the VSI whose topology status is being checked
691  */
692 static void ice_print_topo_conflict(struct ice_vsi *vsi)
693 {
694 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
695 	case ICE_AQ_LINK_TOPO_CONFLICT:
696 	case ICE_AQ_LINK_MEDIA_CONFLICT:
697 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
698 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
699 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
700 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
701 		break;
702 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
703 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
704 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
705 		else
706 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
707 		break;
708 	default:
709 		break;
710 	}
711 }
712 
713 /**
714  * ice_print_link_msg - print link up or down message
715  * @vsi: the VSI whose link status is being queried
716  * @isup: boolean for if the link is now up or down
717  */
718 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
719 {
720 	struct ice_aqc_get_phy_caps_data *caps;
721 	const char *an_advertised;
722 	const char *fec_req;
723 	const char *speed;
724 	const char *fec;
725 	const char *fc;
726 	const char *an;
727 	int status;
728 
729 	if (!vsi)
730 		return;
731 
732 	if (vsi->current_isup == isup)
733 		return;
734 
735 	vsi->current_isup = isup;
736 
737 	if (!isup) {
738 		netdev_info(vsi->netdev, "NIC Link is Down\n");
739 		return;
740 	}
741 
742 	switch (vsi->port_info->phy.link_info.link_speed) {
743 	case ICE_AQ_LINK_SPEED_100GB:
744 		speed = "100 G";
745 		break;
746 	case ICE_AQ_LINK_SPEED_50GB:
747 		speed = "50 G";
748 		break;
749 	case ICE_AQ_LINK_SPEED_40GB:
750 		speed = "40 G";
751 		break;
752 	case ICE_AQ_LINK_SPEED_25GB:
753 		speed = "25 G";
754 		break;
755 	case ICE_AQ_LINK_SPEED_20GB:
756 		speed = "20 G";
757 		break;
758 	case ICE_AQ_LINK_SPEED_10GB:
759 		speed = "10 G";
760 		break;
761 	case ICE_AQ_LINK_SPEED_5GB:
762 		speed = "5 G";
763 		break;
764 	case ICE_AQ_LINK_SPEED_2500MB:
765 		speed = "2.5 G";
766 		break;
767 	case ICE_AQ_LINK_SPEED_1000MB:
768 		speed = "1 G";
769 		break;
770 	case ICE_AQ_LINK_SPEED_100MB:
771 		speed = "100 M";
772 		break;
773 	default:
774 		speed = "Unknown ";
775 		break;
776 	}
777 
778 	switch (vsi->port_info->fc.current_mode) {
779 	case ICE_FC_FULL:
780 		fc = "Rx/Tx";
781 		break;
782 	case ICE_FC_TX_PAUSE:
783 		fc = "Tx";
784 		break;
785 	case ICE_FC_RX_PAUSE:
786 		fc = "Rx";
787 		break;
788 	case ICE_FC_NONE:
789 		fc = "None";
790 		break;
791 	default:
792 		fc = "Unknown";
793 		break;
794 	}
795 
796 	/* Get FEC mode based on negotiated link info */
797 	switch (vsi->port_info->phy.link_info.fec_info) {
798 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
799 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
800 		fec = "RS-FEC";
801 		break;
802 	case ICE_AQ_LINK_25G_KR_FEC_EN:
803 		fec = "FC-FEC/BASE-R";
804 		break;
805 	default:
806 		fec = "NONE";
807 		break;
808 	}
809 
810 	/* check if autoneg completed, might be false due to not supported */
811 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
812 		an = "True";
813 	else
814 		an = "False";
815 
816 	/* Get FEC mode requested based on PHY caps last SW configuration */
817 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
818 	if (!caps) {
819 		fec_req = "Unknown";
820 		an_advertised = "Unknown";
821 		goto done;
822 	}
823 
824 	status = ice_aq_get_phy_caps(vsi->port_info, false,
825 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
826 	if (status)
827 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
828 
829 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
830 
831 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
832 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
833 		fec_req = "RS-FEC";
834 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
835 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
836 		fec_req = "FC-FEC/BASE-R";
837 	else
838 		fec_req = "NONE";
839 
840 	kfree(caps);
841 
842 done:
843 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
844 		    speed, fec_req, fec, an_advertised, an, fc);
845 	ice_print_topo_conflict(vsi);
846 }
847 
848 /**
849  * ice_vsi_link_event - update the VSI's netdev
850  * @vsi: the VSI on which the link event occurred
851  * @link_up: whether or not the VSI needs to be set up or down
852  */
853 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
854 {
855 	if (!vsi)
856 		return;
857 
858 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
859 		return;
860 
861 	if (vsi->type == ICE_VSI_PF) {
862 		if (link_up == netif_carrier_ok(vsi->netdev))
863 			return;
864 
865 		if (link_up) {
866 			netif_carrier_on(vsi->netdev);
867 			netif_tx_wake_all_queues(vsi->netdev);
868 		} else {
869 			netif_carrier_off(vsi->netdev);
870 			netif_tx_stop_all_queues(vsi->netdev);
871 		}
872 	}
873 }
874 
875 /**
876  * ice_set_dflt_mib - send a default config MIB to the FW
877  * @pf: private PF struct
878  *
879  * This function sends a default configuration MIB to the FW.
880  *
881  * If this function errors out at any point, the driver is still able to
882  * function.  The main impact is that LFC may not operate as expected.
883  * Therefore an error state in this function should be treated with a DBG
884  * message and continue on with driver rebuild/reenable.
885  */
886 static void ice_set_dflt_mib(struct ice_pf *pf)
887 {
888 	struct device *dev = ice_pf_to_dev(pf);
889 	u8 mib_type, *buf, *lldpmib = NULL;
890 	u16 len, typelen, offset = 0;
891 	struct ice_lldp_org_tlv *tlv;
892 	struct ice_hw *hw = &pf->hw;
893 	u32 ouisubtype;
894 
895 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
896 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
897 	if (!lldpmib) {
898 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
899 			__func__);
900 		return;
901 	}
902 
903 	/* Add ETS CFG TLV */
904 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
905 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
906 		   ICE_IEEE_ETS_TLV_LEN);
907 	tlv->typelen = htons(typelen);
908 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
909 		      ICE_IEEE_SUBTYPE_ETS_CFG);
910 	tlv->ouisubtype = htonl(ouisubtype);
911 
912 	buf = tlv->tlvinfo;
913 	buf[0] = 0;
914 
915 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
916 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
917 	 * Octets 13 - 20 are TSA values - leave as zeros
918 	 */
919 	buf[5] = 0x64;
920 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
921 	offset += len + 2;
922 	tlv = (struct ice_lldp_org_tlv *)
923 		((char *)tlv + sizeof(tlv->typelen) + len);
924 
925 	/* Add ETS REC TLV */
926 	buf = tlv->tlvinfo;
927 	tlv->typelen = htons(typelen);
928 
929 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
930 		      ICE_IEEE_SUBTYPE_ETS_REC);
931 	tlv->ouisubtype = htonl(ouisubtype);
932 
933 	/* First octet of buf is reserved
934 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
935 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
936 	 * Octets 13 - 20 are TSA value - leave as zeros
937 	 */
938 	buf[5] = 0x64;
939 	offset += len + 2;
940 	tlv = (struct ice_lldp_org_tlv *)
941 		((char *)tlv + sizeof(tlv->typelen) + len);
942 
943 	/* Add PFC CFG TLV */
944 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
945 		   ICE_IEEE_PFC_TLV_LEN);
946 	tlv->typelen = htons(typelen);
947 
948 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
949 		      ICE_IEEE_SUBTYPE_PFC_CFG);
950 	tlv->ouisubtype = htonl(ouisubtype);
951 
952 	/* Octet 1 left as all zeros - PFC disabled */
953 	buf[0] = 0x08;
954 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
955 	offset += len + 2;
956 
957 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
958 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
959 
960 	kfree(lldpmib);
961 }
962 
963 /**
964  * ice_check_phy_fw_load - check if PHY FW load failed
965  * @pf: pointer to PF struct
966  * @link_cfg_err: bitmap from the link info structure
967  *
968  * check if external PHY FW load failed and print an error message if it did
969  */
970 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
971 {
972 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
973 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
974 		return;
975 	}
976 
977 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
978 		return;
979 
980 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
981 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
982 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
983 	}
984 }
985 
986 /**
987  * ice_check_module_power
988  * @pf: pointer to PF struct
989  * @link_cfg_err: bitmap from the link info structure
990  *
991  * check module power level returned by a previous call to aq_get_link_info
992  * and print error messages if module power level is not supported
993  */
994 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
995 {
996 	/* if module power level is supported, clear the flag */
997 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
998 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
999 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1000 		return;
1001 	}
1002 
1003 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1004 	 * above block didn't clear this bit, there's nothing to do
1005 	 */
1006 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1007 		return;
1008 
1009 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1010 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1011 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1012 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1013 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1014 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1015 	}
1016 }
1017 
1018 /**
1019  * ice_check_link_cfg_err - check if link configuration failed
1020  * @pf: pointer to the PF struct
1021  * @link_cfg_err: bitmap from the link info structure
1022  *
1023  * print if any link configuration failure happens due to the value in the
1024  * link_cfg_err parameter in the link info structure
1025  */
1026 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1027 {
1028 	ice_check_module_power(pf, link_cfg_err);
1029 	ice_check_phy_fw_load(pf, link_cfg_err);
1030 }
1031 
1032 /**
1033  * ice_link_event - process the link event
1034  * @pf: PF that the link event is associated with
1035  * @pi: port_info for the port that the link event is associated with
1036  * @link_up: true if the physical link is up and false if it is down
1037  * @link_speed: current link speed received from the link event
1038  *
1039  * Returns 0 on success and negative on failure
1040  */
1041 static int
1042 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1043 	       u16 link_speed)
1044 {
1045 	struct device *dev = ice_pf_to_dev(pf);
1046 	struct ice_phy_info *phy_info;
1047 	struct ice_vsi *vsi;
1048 	u16 old_link_speed;
1049 	bool old_link;
1050 	int status;
1051 
1052 	phy_info = &pi->phy;
1053 	phy_info->link_info_old = phy_info->link_info;
1054 
1055 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1056 	old_link_speed = phy_info->link_info_old.link_speed;
1057 
1058 	/* update the link info structures and re-enable link events,
1059 	 * don't bail on failure due to other book keeping needed
1060 	 */
1061 	status = ice_update_link_info(pi);
1062 	if (status)
1063 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1064 			pi->lport, status,
1065 			ice_aq_str(pi->hw->adminq.sq_last_status));
1066 
1067 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1068 
1069 	/* Check if the link state is up after updating link info, and treat
1070 	 * this event as an UP event since the link is actually UP now.
1071 	 */
1072 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1073 		link_up = true;
1074 
1075 	vsi = ice_get_main_vsi(pf);
1076 	if (!vsi || !vsi->port_info)
1077 		return -EINVAL;
1078 
1079 	/* turn off PHY if media was removed */
1080 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1081 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1082 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1083 		ice_set_link(vsi, false);
1084 	}
1085 
1086 	/* if the old link up/down and speed is the same as the new */
1087 	if (link_up == old_link && link_speed == old_link_speed)
1088 		return 0;
1089 
1090 	if (!ice_is_e810(&pf->hw))
1091 		ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1092 
1093 	if (ice_is_dcb_active(pf)) {
1094 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1095 			ice_dcb_rebuild(pf);
1096 	} else {
1097 		if (link_up)
1098 			ice_set_dflt_mib(pf);
1099 	}
1100 	ice_vsi_link_event(vsi, link_up);
1101 	ice_print_link_msg(vsi, link_up);
1102 
1103 	ice_vc_notify_link_state(pf);
1104 
1105 	return 0;
1106 }
1107 
1108 /**
1109  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1110  * @pf: board private structure
1111  */
1112 static void ice_watchdog_subtask(struct ice_pf *pf)
1113 {
1114 	int i;
1115 
1116 	/* if interface is down do nothing */
1117 	if (test_bit(ICE_DOWN, pf->state) ||
1118 	    test_bit(ICE_CFG_BUSY, pf->state))
1119 		return;
1120 
1121 	/* make sure we don't do these things too often */
1122 	if (time_before(jiffies,
1123 			pf->serv_tmr_prev + pf->serv_tmr_period))
1124 		return;
1125 
1126 	pf->serv_tmr_prev = jiffies;
1127 
1128 	/* Update the stats for active netdevs so the network stack
1129 	 * can look at updated numbers whenever it cares to
1130 	 */
1131 	ice_update_pf_stats(pf);
1132 	ice_for_each_vsi(pf, i)
1133 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1134 			ice_update_vsi_stats(pf->vsi[i]);
1135 }
1136 
1137 /**
1138  * ice_init_link_events - enable/initialize link events
1139  * @pi: pointer to the port_info instance
1140  *
1141  * Returns -EIO on failure, 0 on success
1142  */
1143 static int ice_init_link_events(struct ice_port_info *pi)
1144 {
1145 	u16 mask;
1146 
1147 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1148 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1149 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1150 
1151 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1152 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1153 			pi->lport);
1154 		return -EIO;
1155 	}
1156 
1157 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1158 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1159 			pi->lport);
1160 		return -EIO;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 /**
1167  * ice_handle_link_event - handle link event via ARQ
1168  * @pf: PF that the link event is associated with
1169  * @event: event structure containing link status info
1170  */
1171 static int
1172 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1173 {
1174 	struct ice_aqc_get_link_status_data *link_data;
1175 	struct ice_port_info *port_info;
1176 	int status;
1177 
1178 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1179 	port_info = pf->hw.port_info;
1180 	if (!port_info)
1181 		return -EINVAL;
1182 
1183 	status = ice_link_event(pf, port_info,
1184 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1185 				le16_to_cpu(link_data->link_speed));
1186 	if (status)
1187 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1188 			status);
1189 
1190 	return status;
1191 }
1192 
1193 enum ice_aq_task_state {
1194 	ICE_AQ_TASK_WAITING = 0,
1195 	ICE_AQ_TASK_COMPLETE,
1196 	ICE_AQ_TASK_CANCELED,
1197 };
1198 
1199 struct ice_aq_task {
1200 	struct hlist_node entry;
1201 
1202 	u16 opcode;
1203 	struct ice_rq_event_info *event;
1204 	enum ice_aq_task_state state;
1205 };
1206 
1207 /**
1208  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1209  * @pf: pointer to the PF private structure
1210  * @opcode: the opcode to wait for
1211  * @timeout: how long to wait, in jiffies
1212  * @event: storage for the event info
1213  *
1214  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1215  * current thread will be put to sleep until the specified event occurs or
1216  * until the given timeout is reached.
1217  *
1218  * To obtain only the descriptor contents, pass an event without an allocated
1219  * msg_buf. If the complete data buffer is desired, allocate the
1220  * event->msg_buf with enough space ahead of time.
1221  *
1222  * Returns: zero on success, or a negative error code on failure.
1223  */
1224 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1225 			  struct ice_rq_event_info *event)
1226 {
1227 	struct device *dev = ice_pf_to_dev(pf);
1228 	struct ice_aq_task *task;
1229 	unsigned long start;
1230 	long ret;
1231 	int err;
1232 
1233 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1234 	if (!task)
1235 		return -ENOMEM;
1236 
1237 	INIT_HLIST_NODE(&task->entry);
1238 	task->opcode = opcode;
1239 	task->event = event;
1240 	task->state = ICE_AQ_TASK_WAITING;
1241 
1242 	spin_lock_bh(&pf->aq_wait_lock);
1243 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1244 	spin_unlock_bh(&pf->aq_wait_lock);
1245 
1246 	start = jiffies;
1247 
1248 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1249 					       timeout);
1250 	switch (task->state) {
1251 	case ICE_AQ_TASK_WAITING:
1252 		err = ret < 0 ? ret : -ETIMEDOUT;
1253 		break;
1254 	case ICE_AQ_TASK_CANCELED:
1255 		err = ret < 0 ? ret : -ECANCELED;
1256 		break;
1257 	case ICE_AQ_TASK_COMPLETE:
1258 		err = ret < 0 ? ret : 0;
1259 		break;
1260 	default:
1261 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1262 		err = -EINVAL;
1263 		break;
1264 	}
1265 
1266 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1267 		jiffies_to_msecs(jiffies - start),
1268 		jiffies_to_msecs(timeout),
1269 		opcode);
1270 
1271 	spin_lock_bh(&pf->aq_wait_lock);
1272 	hlist_del(&task->entry);
1273 	spin_unlock_bh(&pf->aq_wait_lock);
1274 	kfree(task);
1275 
1276 	return err;
1277 }
1278 
1279 /**
1280  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1281  * @pf: pointer to the PF private structure
1282  * @opcode: the opcode of the event
1283  * @event: the event to check
1284  *
1285  * Loops over the current list of pending threads waiting for an AdminQ event.
1286  * For each matching task, copy the contents of the event into the task
1287  * structure and wake up the thread.
1288  *
1289  * If multiple threads wait for the same opcode, they will all be woken up.
1290  *
1291  * Note that event->msg_buf will only be duplicated if the event has a buffer
1292  * with enough space already allocated. Otherwise, only the descriptor and
1293  * message length will be copied.
1294  *
1295  * Returns: true if an event was found, false otherwise
1296  */
1297 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1298 				struct ice_rq_event_info *event)
1299 {
1300 	struct ice_aq_task *task;
1301 	bool found = false;
1302 
1303 	spin_lock_bh(&pf->aq_wait_lock);
1304 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1305 		if (task->state || task->opcode != opcode)
1306 			continue;
1307 
1308 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1309 		task->event->msg_len = event->msg_len;
1310 
1311 		/* Only copy the data buffer if a destination was set */
1312 		if (task->event->msg_buf &&
1313 		    task->event->buf_len > event->buf_len) {
1314 			memcpy(task->event->msg_buf, event->msg_buf,
1315 			       event->buf_len);
1316 			task->event->buf_len = event->buf_len;
1317 		}
1318 
1319 		task->state = ICE_AQ_TASK_COMPLETE;
1320 		found = true;
1321 	}
1322 	spin_unlock_bh(&pf->aq_wait_lock);
1323 
1324 	if (found)
1325 		wake_up(&pf->aq_wait_queue);
1326 }
1327 
1328 /**
1329  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1330  * @pf: the PF private structure
1331  *
1332  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1333  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1334  */
1335 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1336 {
1337 	struct ice_aq_task *task;
1338 
1339 	spin_lock_bh(&pf->aq_wait_lock);
1340 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1341 		task->state = ICE_AQ_TASK_CANCELED;
1342 	spin_unlock_bh(&pf->aq_wait_lock);
1343 
1344 	wake_up(&pf->aq_wait_queue);
1345 }
1346 
1347 /**
1348  * __ice_clean_ctrlq - helper function to clean controlq rings
1349  * @pf: ptr to struct ice_pf
1350  * @q_type: specific Control queue type
1351  */
1352 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1353 {
1354 	struct device *dev = ice_pf_to_dev(pf);
1355 	struct ice_rq_event_info event;
1356 	struct ice_hw *hw = &pf->hw;
1357 	struct ice_ctl_q_info *cq;
1358 	u16 pending, i = 0;
1359 	const char *qtype;
1360 	u32 oldval, val;
1361 
1362 	/* Do not clean control queue if/when PF reset fails */
1363 	if (test_bit(ICE_RESET_FAILED, pf->state))
1364 		return 0;
1365 
1366 	switch (q_type) {
1367 	case ICE_CTL_Q_ADMIN:
1368 		cq = &hw->adminq;
1369 		qtype = "Admin";
1370 		break;
1371 	case ICE_CTL_Q_SB:
1372 		cq = &hw->sbq;
1373 		qtype = "Sideband";
1374 		break;
1375 	case ICE_CTL_Q_MAILBOX:
1376 		cq = &hw->mailboxq;
1377 		qtype = "Mailbox";
1378 		/* we are going to try to detect a malicious VF, so set the
1379 		 * state to begin detection
1380 		 */
1381 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1382 		break;
1383 	default:
1384 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1385 		return 0;
1386 	}
1387 
1388 	/* check for error indications - PF_xx_AxQLEN register layout for
1389 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1390 	 */
1391 	val = rd32(hw, cq->rq.len);
1392 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1393 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1394 		oldval = val;
1395 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1396 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1397 				qtype);
1398 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1399 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1400 				qtype);
1401 		}
1402 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1403 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1404 				qtype);
1405 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1406 			 PF_FW_ARQLEN_ARQCRIT_M);
1407 		if (oldval != val)
1408 			wr32(hw, cq->rq.len, val);
1409 	}
1410 
1411 	val = rd32(hw, cq->sq.len);
1412 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1413 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1414 		oldval = val;
1415 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1416 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1417 				qtype);
1418 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1419 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1420 				qtype);
1421 		}
1422 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1423 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1424 				qtype);
1425 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1426 			 PF_FW_ATQLEN_ATQCRIT_M);
1427 		if (oldval != val)
1428 			wr32(hw, cq->sq.len, val);
1429 	}
1430 
1431 	event.buf_len = cq->rq_buf_size;
1432 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1433 	if (!event.msg_buf)
1434 		return 0;
1435 
1436 	do {
1437 		u16 opcode;
1438 		int ret;
1439 
1440 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1441 		if (ret == -EALREADY)
1442 			break;
1443 		if (ret) {
1444 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1445 				ret);
1446 			break;
1447 		}
1448 
1449 		opcode = le16_to_cpu(event.desc.opcode);
1450 
1451 		/* Notify any thread that might be waiting for this event */
1452 		ice_aq_check_events(pf, opcode, &event);
1453 
1454 		switch (opcode) {
1455 		case ice_aqc_opc_get_link_status:
1456 			if (ice_handle_link_event(pf, &event))
1457 				dev_err(dev, "Could not handle link event\n");
1458 			break;
1459 		case ice_aqc_opc_event_lan_overflow:
1460 			ice_vf_lan_overflow_event(pf, &event);
1461 			break;
1462 		case ice_mbx_opc_send_msg_to_pf:
1463 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1464 				ice_vc_process_vf_msg(pf, &event);
1465 			break;
1466 		case ice_aqc_opc_fw_logging:
1467 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1468 			break;
1469 		case ice_aqc_opc_lldp_set_mib_change:
1470 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1471 			break;
1472 		default:
1473 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1474 				qtype, opcode);
1475 			break;
1476 		}
1477 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1478 
1479 	kfree(event.msg_buf);
1480 
1481 	return pending && (i == ICE_DFLT_IRQ_WORK);
1482 }
1483 
1484 /**
1485  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1486  * @hw: pointer to hardware info
1487  * @cq: control queue information
1488  *
1489  * returns true if there are pending messages in a queue, false if there aren't
1490  */
1491 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1492 {
1493 	u16 ntu;
1494 
1495 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1496 	return cq->rq.next_to_clean != ntu;
1497 }
1498 
1499 /**
1500  * ice_clean_adminq_subtask - clean the AdminQ rings
1501  * @pf: board private structure
1502  */
1503 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1504 {
1505 	struct ice_hw *hw = &pf->hw;
1506 
1507 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1508 		return;
1509 
1510 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1511 		return;
1512 
1513 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1514 
1515 	/* There might be a situation where new messages arrive to a control
1516 	 * queue between processing the last message and clearing the
1517 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1518 	 * ice_ctrlq_pending) and process new messages if any.
1519 	 */
1520 	if (ice_ctrlq_pending(hw, &hw->adminq))
1521 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1522 
1523 	ice_flush(hw);
1524 }
1525 
1526 /**
1527  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1528  * @pf: board private structure
1529  */
1530 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1531 {
1532 	struct ice_hw *hw = &pf->hw;
1533 
1534 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1535 		return;
1536 
1537 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1538 		return;
1539 
1540 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1541 
1542 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1543 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1544 
1545 	ice_flush(hw);
1546 }
1547 
1548 /**
1549  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1550  * @pf: board private structure
1551  */
1552 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1553 {
1554 	struct ice_hw *hw = &pf->hw;
1555 
1556 	/* Nothing to do here if sideband queue is not supported */
1557 	if (!ice_is_sbq_supported(hw)) {
1558 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1559 		return;
1560 	}
1561 
1562 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1563 		return;
1564 
1565 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1566 		return;
1567 
1568 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1569 
1570 	if (ice_ctrlq_pending(hw, &hw->sbq))
1571 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1572 
1573 	ice_flush(hw);
1574 }
1575 
1576 /**
1577  * ice_service_task_schedule - schedule the service task to wake up
1578  * @pf: board private structure
1579  *
1580  * If not already scheduled, this puts the task into the work queue.
1581  */
1582 void ice_service_task_schedule(struct ice_pf *pf)
1583 {
1584 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1585 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1586 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1587 		queue_work(ice_wq, &pf->serv_task);
1588 }
1589 
1590 /**
1591  * ice_service_task_complete - finish up the service task
1592  * @pf: board private structure
1593  */
1594 static void ice_service_task_complete(struct ice_pf *pf)
1595 {
1596 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1597 
1598 	/* force memory (pf->state) to sync before next service task */
1599 	smp_mb__before_atomic();
1600 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1601 }
1602 
1603 /**
1604  * ice_service_task_stop - stop service task and cancel works
1605  * @pf: board private structure
1606  *
1607  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1608  * 1 otherwise.
1609  */
1610 static int ice_service_task_stop(struct ice_pf *pf)
1611 {
1612 	int ret;
1613 
1614 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1615 
1616 	if (pf->serv_tmr.function)
1617 		del_timer_sync(&pf->serv_tmr);
1618 	if (pf->serv_task.func)
1619 		cancel_work_sync(&pf->serv_task);
1620 
1621 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1622 	return ret;
1623 }
1624 
1625 /**
1626  * ice_service_task_restart - restart service task and schedule works
1627  * @pf: board private structure
1628  *
1629  * This function is needed for suspend and resume works (e.g WoL scenario)
1630  */
1631 static void ice_service_task_restart(struct ice_pf *pf)
1632 {
1633 	clear_bit(ICE_SERVICE_DIS, pf->state);
1634 	ice_service_task_schedule(pf);
1635 }
1636 
1637 /**
1638  * ice_service_timer - timer callback to schedule service task
1639  * @t: pointer to timer_list
1640  */
1641 static void ice_service_timer(struct timer_list *t)
1642 {
1643 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1644 
1645 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1646 	ice_service_task_schedule(pf);
1647 }
1648 
1649 /**
1650  * ice_handle_mdd_event - handle malicious driver detect event
1651  * @pf: pointer to the PF structure
1652  *
1653  * Called from service task. OICR interrupt handler indicates MDD event.
1654  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1655  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1656  * disable the queue, the PF can be configured to reset the VF using ethtool
1657  * private flag mdd-auto-reset-vf.
1658  */
1659 static void ice_handle_mdd_event(struct ice_pf *pf)
1660 {
1661 	struct device *dev = ice_pf_to_dev(pf);
1662 	struct ice_hw *hw = &pf->hw;
1663 	unsigned int i;
1664 	u32 reg;
1665 
1666 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1667 		/* Since the VF MDD event logging is rate limited, check if
1668 		 * there are pending MDD events.
1669 		 */
1670 		ice_print_vfs_mdd_events(pf);
1671 		return;
1672 	}
1673 
1674 	/* find what triggered an MDD event */
1675 	reg = rd32(hw, GL_MDET_TX_PQM);
1676 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1677 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1678 				GL_MDET_TX_PQM_PF_NUM_S;
1679 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1680 				GL_MDET_TX_PQM_VF_NUM_S;
1681 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1682 				GL_MDET_TX_PQM_MAL_TYPE_S;
1683 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1684 				GL_MDET_TX_PQM_QNUM_S);
1685 
1686 		if (netif_msg_tx_err(pf))
1687 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1688 				 event, queue, pf_num, vf_num);
1689 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1690 	}
1691 
1692 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1693 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1694 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1695 				GL_MDET_TX_TCLAN_PF_NUM_S;
1696 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1697 				GL_MDET_TX_TCLAN_VF_NUM_S;
1698 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1699 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1700 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1701 				GL_MDET_TX_TCLAN_QNUM_S);
1702 
1703 		if (netif_msg_tx_err(pf))
1704 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1705 				 event, queue, pf_num, vf_num);
1706 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1707 	}
1708 
1709 	reg = rd32(hw, GL_MDET_RX);
1710 	if (reg & GL_MDET_RX_VALID_M) {
1711 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1712 				GL_MDET_RX_PF_NUM_S;
1713 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1714 				GL_MDET_RX_VF_NUM_S;
1715 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1716 				GL_MDET_RX_MAL_TYPE_S;
1717 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1718 				GL_MDET_RX_QNUM_S);
1719 
1720 		if (netif_msg_rx_err(pf))
1721 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1722 				 event, queue, pf_num, vf_num);
1723 		wr32(hw, GL_MDET_RX, 0xffffffff);
1724 	}
1725 
1726 	/* check to see if this PF caused an MDD event */
1727 	reg = rd32(hw, PF_MDET_TX_PQM);
1728 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1729 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1730 		if (netif_msg_tx_err(pf))
1731 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1732 	}
1733 
1734 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1735 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1736 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1737 		if (netif_msg_tx_err(pf))
1738 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1739 	}
1740 
1741 	reg = rd32(hw, PF_MDET_RX);
1742 	if (reg & PF_MDET_RX_VALID_M) {
1743 		wr32(hw, PF_MDET_RX, 0xFFFF);
1744 		if (netif_msg_rx_err(pf))
1745 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1746 	}
1747 
1748 	/* Check to see if one of the VFs caused an MDD event, and then
1749 	 * increment counters and set print pending
1750 	 */
1751 	ice_for_each_vf(pf, i) {
1752 		struct ice_vf *vf = &pf->vf[i];
1753 
1754 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1755 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1756 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1757 			vf->mdd_tx_events.count++;
1758 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1759 			if (netif_msg_tx_err(pf))
1760 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1761 					 i);
1762 		}
1763 
1764 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1765 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1766 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1767 			vf->mdd_tx_events.count++;
1768 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1769 			if (netif_msg_tx_err(pf))
1770 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1771 					 i);
1772 		}
1773 
1774 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1775 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1776 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1777 			vf->mdd_tx_events.count++;
1778 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1779 			if (netif_msg_tx_err(pf))
1780 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1781 					 i);
1782 		}
1783 
1784 		reg = rd32(hw, VP_MDET_RX(i));
1785 		if (reg & VP_MDET_RX_VALID_M) {
1786 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1787 			vf->mdd_rx_events.count++;
1788 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1789 			if (netif_msg_rx_err(pf))
1790 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1791 					 i);
1792 
1793 			/* Since the queue is disabled on VF Rx MDD events, the
1794 			 * PF can be configured to reset the VF through ethtool
1795 			 * private flag mdd-auto-reset-vf.
1796 			 */
1797 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1798 				/* VF MDD event counters will be cleared by
1799 				 * reset, so print the event prior to reset.
1800 				 */
1801 				ice_print_vf_rx_mdd_event(vf);
1802 				ice_reset_vf(&pf->vf[i], false);
1803 			}
1804 		}
1805 	}
1806 
1807 	ice_print_vfs_mdd_events(pf);
1808 }
1809 
1810 /**
1811  * ice_force_phys_link_state - Force the physical link state
1812  * @vsi: VSI to force the physical link state to up/down
1813  * @link_up: true/false indicates to set the physical link to up/down
1814  *
1815  * Force the physical link state by getting the current PHY capabilities from
1816  * hardware and setting the PHY config based on the determined capabilities. If
1817  * link changes a link event will be triggered because both the Enable Automatic
1818  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1819  *
1820  * Returns 0 on success, negative on failure
1821  */
1822 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1823 {
1824 	struct ice_aqc_get_phy_caps_data *pcaps;
1825 	struct ice_aqc_set_phy_cfg_data *cfg;
1826 	struct ice_port_info *pi;
1827 	struct device *dev;
1828 	int retcode;
1829 
1830 	if (!vsi || !vsi->port_info || !vsi->back)
1831 		return -EINVAL;
1832 	if (vsi->type != ICE_VSI_PF)
1833 		return 0;
1834 
1835 	dev = ice_pf_to_dev(vsi->back);
1836 
1837 	pi = vsi->port_info;
1838 
1839 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1840 	if (!pcaps)
1841 		return -ENOMEM;
1842 
1843 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1844 				      NULL);
1845 	if (retcode) {
1846 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1847 			vsi->vsi_num, retcode);
1848 		retcode = -EIO;
1849 		goto out;
1850 	}
1851 
1852 	/* No change in link */
1853 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1854 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1855 		goto out;
1856 
1857 	/* Use the current user PHY configuration. The current user PHY
1858 	 * configuration is initialized during probe from PHY capabilities
1859 	 * software mode, and updated on set PHY configuration.
1860 	 */
1861 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1862 	if (!cfg) {
1863 		retcode = -ENOMEM;
1864 		goto out;
1865 	}
1866 
1867 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1868 	if (link_up)
1869 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1870 	else
1871 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1872 
1873 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1874 	if (retcode) {
1875 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1876 			vsi->vsi_num, retcode);
1877 		retcode = -EIO;
1878 	}
1879 
1880 	kfree(cfg);
1881 out:
1882 	kfree(pcaps);
1883 	return retcode;
1884 }
1885 
1886 /**
1887  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1888  * @pi: port info structure
1889  *
1890  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1891  */
1892 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1893 {
1894 	struct ice_aqc_get_phy_caps_data *pcaps;
1895 	struct ice_pf *pf = pi->hw->back;
1896 	int err;
1897 
1898 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1899 	if (!pcaps)
1900 		return -ENOMEM;
1901 
1902 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1903 				  pcaps, NULL);
1904 
1905 	if (err) {
1906 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1907 		goto out;
1908 	}
1909 
1910 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1911 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1912 
1913 out:
1914 	kfree(pcaps);
1915 	return err;
1916 }
1917 
1918 /**
1919  * ice_init_link_dflt_override - Initialize link default override
1920  * @pi: port info structure
1921  *
1922  * Initialize link default override and PHY total port shutdown during probe
1923  */
1924 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1925 {
1926 	struct ice_link_default_override_tlv *ldo;
1927 	struct ice_pf *pf = pi->hw->back;
1928 
1929 	ldo = &pf->link_dflt_override;
1930 	if (ice_get_link_default_override(ldo, pi))
1931 		return;
1932 
1933 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1934 		return;
1935 
1936 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1937 	 * ethtool private flag) for ports with Port Disable bit set.
1938 	 */
1939 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1940 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1941 }
1942 
1943 /**
1944  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1945  * @pi: port info structure
1946  *
1947  * If default override is enabled, initialize the user PHY cfg speed and FEC
1948  * settings using the default override mask from the NVM.
1949  *
1950  * The PHY should only be configured with the default override settings the
1951  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1952  * is used to indicate that the user PHY cfg default override is initialized
1953  * and the PHY has not been configured with the default override settings. The
1954  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1955  * configured.
1956  *
1957  * This function should be called only if the FW doesn't support default
1958  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1959  */
1960 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1961 {
1962 	struct ice_link_default_override_tlv *ldo;
1963 	struct ice_aqc_set_phy_cfg_data *cfg;
1964 	struct ice_phy_info *phy = &pi->phy;
1965 	struct ice_pf *pf = pi->hw->back;
1966 
1967 	ldo = &pf->link_dflt_override;
1968 
1969 	/* If link default override is enabled, use to mask NVM PHY capabilities
1970 	 * for speed and FEC default configuration.
1971 	 */
1972 	cfg = &phy->curr_user_phy_cfg;
1973 
1974 	if (ldo->phy_type_low || ldo->phy_type_high) {
1975 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1976 				    cpu_to_le64(ldo->phy_type_low);
1977 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1978 				     cpu_to_le64(ldo->phy_type_high);
1979 	}
1980 	cfg->link_fec_opt = ldo->fec_options;
1981 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1982 
1983 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1984 }
1985 
1986 /**
1987  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1988  * @pi: port info structure
1989  *
1990  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1991  * mode to default. The PHY defaults are from get PHY capabilities topology
1992  * with media so call when media is first available. An error is returned if
1993  * called when media is not available. The PHY initialization completed state is
1994  * set here.
1995  *
1996  * These configurations are used when setting PHY
1997  * configuration. The user PHY configuration is updated on set PHY
1998  * configuration. Returns 0 on success, negative on failure
1999  */
2000 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2001 {
2002 	struct ice_aqc_get_phy_caps_data *pcaps;
2003 	struct ice_phy_info *phy = &pi->phy;
2004 	struct ice_pf *pf = pi->hw->back;
2005 	int err;
2006 
2007 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2008 		return -EIO;
2009 
2010 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2011 	if (!pcaps)
2012 		return -ENOMEM;
2013 
2014 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2015 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2016 					  pcaps, NULL);
2017 	else
2018 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2019 					  pcaps, NULL);
2020 	if (err) {
2021 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2022 		goto err_out;
2023 	}
2024 
2025 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2026 
2027 	/* check if lenient mode is supported and enabled */
2028 	if (ice_fw_supports_link_override(pi->hw) &&
2029 	    !(pcaps->module_compliance_enforcement &
2030 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2031 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2032 
2033 		/* if the FW supports default PHY configuration mode, then the driver
2034 		 * does not have to apply link override settings. If not,
2035 		 * initialize user PHY configuration with link override values
2036 		 */
2037 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2038 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2039 			ice_init_phy_cfg_dflt_override(pi);
2040 			goto out;
2041 		}
2042 	}
2043 
2044 	/* if link default override is not enabled, set user flow control and
2045 	 * FEC settings based on what get_phy_caps returned
2046 	 */
2047 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2048 						      pcaps->link_fec_options);
2049 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2050 
2051 out:
2052 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2053 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2054 err_out:
2055 	kfree(pcaps);
2056 	return err;
2057 }
2058 
2059 /**
2060  * ice_configure_phy - configure PHY
2061  * @vsi: VSI of PHY
2062  *
2063  * Set the PHY configuration. If the current PHY configuration is the same as
2064  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2065  * configure the based get PHY capabilities for topology with media.
2066  */
2067 static int ice_configure_phy(struct ice_vsi *vsi)
2068 {
2069 	struct device *dev = ice_pf_to_dev(vsi->back);
2070 	struct ice_port_info *pi = vsi->port_info;
2071 	struct ice_aqc_get_phy_caps_data *pcaps;
2072 	struct ice_aqc_set_phy_cfg_data *cfg;
2073 	struct ice_phy_info *phy = &pi->phy;
2074 	struct ice_pf *pf = vsi->back;
2075 	int err;
2076 
2077 	/* Ensure we have media as we cannot configure a medialess port */
2078 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2079 		return -EPERM;
2080 
2081 	ice_print_topo_conflict(vsi);
2082 
2083 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2084 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2085 		return -EPERM;
2086 
2087 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2088 		return ice_force_phys_link_state(vsi, true);
2089 
2090 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2091 	if (!pcaps)
2092 		return -ENOMEM;
2093 
2094 	/* Get current PHY config */
2095 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2096 				  NULL);
2097 	if (err) {
2098 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2099 			vsi->vsi_num, err);
2100 		goto done;
2101 	}
2102 
2103 	/* If PHY enable link is configured and configuration has not changed,
2104 	 * there's nothing to do
2105 	 */
2106 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2107 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2108 		goto done;
2109 
2110 	/* Use PHY topology as baseline for configuration */
2111 	memset(pcaps, 0, sizeof(*pcaps));
2112 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2113 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2114 					  pcaps, NULL);
2115 	else
2116 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2117 					  pcaps, NULL);
2118 	if (err) {
2119 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2120 			vsi->vsi_num, err);
2121 		goto done;
2122 	}
2123 
2124 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2125 	if (!cfg) {
2126 		err = -ENOMEM;
2127 		goto done;
2128 	}
2129 
2130 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2131 
2132 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2133 	 * ice_init_phy_user_cfg_ldo.
2134 	 */
2135 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2136 			       vsi->back->state)) {
2137 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2138 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2139 	} else {
2140 		u64 phy_low = 0, phy_high = 0;
2141 
2142 		ice_update_phy_type(&phy_low, &phy_high,
2143 				    pi->phy.curr_user_speed_req);
2144 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2145 		cfg->phy_type_high = pcaps->phy_type_high &
2146 				     cpu_to_le64(phy_high);
2147 	}
2148 
2149 	/* Can't provide what was requested; use PHY capabilities */
2150 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2151 		cfg->phy_type_low = pcaps->phy_type_low;
2152 		cfg->phy_type_high = pcaps->phy_type_high;
2153 	}
2154 
2155 	/* FEC */
2156 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2157 
2158 	/* Can't provide what was requested; use PHY capabilities */
2159 	if (cfg->link_fec_opt !=
2160 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2161 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2162 		cfg->link_fec_opt = pcaps->link_fec_options;
2163 	}
2164 
2165 	/* Flow Control - always supported; no need to check against
2166 	 * capabilities
2167 	 */
2168 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2169 
2170 	/* Enable link and link update */
2171 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2172 
2173 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2174 	if (err)
2175 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2176 			vsi->vsi_num, err);
2177 
2178 	kfree(cfg);
2179 done:
2180 	kfree(pcaps);
2181 	return err;
2182 }
2183 
2184 /**
2185  * ice_check_media_subtask - Check for media
2186  * @pf: pointer to PF struct
2187  *
2188  * If media is available, then initialize PHY user configuration if it is not
2189  * been, and configure the PHY if the interface is up.
2190  */
2191 static void ice_check_media_subtask(struct ice_pf *pf)
2192 {
2193 	struct ice_port_info *pi;
2194 	struct ice_vsi *vsi;
2195 	int err;
2196 
2197 	/* No need to check for media if it's already present */
2198 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2199 		return;
2200 
2201 	vsi = ice_get_main_vsi(pf);
2202 	if (!vsi)
2203 		return;
2204 
2205 	/* Refresh link info and check if media is present */
2206 	pi = vsi->port_info;
2207 	err = ice_update_link_info(pi);
2208 	if (err)
2209 		return;
2210 
2211 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2212 
2213 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2214 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2215 			ice_init_phy_user_cfg(pi);
2216 
2217 		/* PHY settings are reset on media insertion, reconfigure
2218 		 * PHY to preserve settings.
2219 		 */
2220 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2221 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2222 			return;
2223 
2224 		err = ice_configure_phy(vsi);
2225 		if (!err)
2226 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2227 
2228 		/* A Link Status Event will be generated; the event handler
2229 		 * will complete bringing the interface up
2230 		 */
2231 	}
2232 }
2233 
2234 /**
2235  * ice_service_task - manage and run subtasks
2236  * @work: pointer to work_struct contained by the PF struct
2237  */
2238 static void ice_service_task(struct work_struct *work)
2239 {
2240 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2241 	unsigned long start_time = jiffies;
2242 
2243 	/* subtasks */
2244 
2245 	/* process reset requests first */
2246 	ice_reset_subtask(pf);
2247 
2248 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2249 	if (ice_is_reset_in_progress(pf->state) ||
2250 	    test_bit(ICE_SUSPENDED, pf->state) ||
2251 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2252 		ice_service_task_complete(pf);
2253 		return;
2254 	}
2255 
2256 	ice_clean_adminq_subtask(pf);
2257 	ice_check_media_subtask(pf);
2258 	ice_check_for_hang_subtask(pf);
2259 	ice_sync_fltr_subtask(pf);
2260 	ice_handle_mdd_event(pf);
2261 	ice_watchdog_subtask(pf);
2262 
2263 	if (ice_is_safe_mode(pf)) {
2264 		ice_service_task_complete(pf);
2265 		return;
2266 	}
2267 
2268 	ice_process_vflr_event(pf);
2269 	ice_clean_mailboxq_subtask(pf);
2270 	ice_clean_sbq_subtask(pf);
2271 	ice_sync_arfs_fltrs(pf);
2272 	ice_flush_fdir_ctx(pf);
2273 
2274 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2275 	ice_service_task_complete(pf);
2276 
2277 	/* If the tasks have taken longer than one service timer period
2278 	 * or there is more work to be done, reset the service timer to
2279 	 * schedule the service task now.
2280 	 */
2281 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2282 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2283 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2284 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2285 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2286 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2287 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2288 		mod_timer(&pf->serv_tmr, jiffies);
2289 }
2290 
2291 /**
2292  * ice_set_ctrlq_len - helper function to set controlq length
2293  * @hw: pointer to the HW instance
2294  */
2295 static void ice_set_ctrlq_len(struct ice_hw *hw)
2296 {
2297 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2298 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2299 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2300 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2301 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2302 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2303 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2304 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2305 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2306 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2307 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2308 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2309 }
2310 
2311 /**
2312  * ice_schedule_reset - schedule a reset
2313  * @pf: board private structure
2314  * @reset: reset being requested
2315  */
2316 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2317 {
2318 	struct device *dev = ice_pf_to_dev(pf);
2319 
2320 	/* bail out if earlier reset has failed */
2321 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2322 		dev_dbg(dev, "earlier reset has failed\n");
2323 		return -EIO;
2324 	}
2325 	/* bail if reset/recovery already in progress */
2326 	if (ice_is_reset_in_progress(pf->state)) {
2327 		dev_dbg(dev, "Reset already in progress\n");
2328 		return -EBUSY;
2329 	}
2330 
2331 	ice_unplug_aux_dev(pf);
2332 
2333 	switch (reset) {
2334 	case ICE_RESET_PFR:
2335 		set_bit(ICE_PFR_REQ, pf->state);
2336 		break;
2337 	case ICE_RESET_CORER:
2338 		set_bit(ICE_CORER_REQ, pf->state);
2339 		break;
2340 	case ICE_RESET_GLOBR:
2341 		set_bit(ICE_GLOBR_REQ, pf->state);
2342 		break;
2343 	default:
2344 		return -EINVAL;
2345 	}
2346 
2347 	ice_service_task_schedule(pf);
2348 	return 0;
2349 }
2350 
2351 /**
2352  * ice_irq_affinity_notify - Callback for affinity changes
2353  * @notify: context as to what irq was changed
2354  * @mask: the new affinity mask
2355  *
2356  * This is a callback function used by the irq_set_affinity_notifier function
2357  * so that we may register to receive changes to the irq affinity masks.
2358  */
2359 static void
2360 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2361 			const cpumask_t *mask)
2362 {
2363 	struct ice_q_vector *q_vector =
2364 		container_of(notify, struct ice_q_vector, affinity_notify);
2365 
2366 	cpumask_copy(&q_vector->affinity_mask, mask);
2367 }
2368 
2369 /**
2370  * ice_irq_affinity_release - Callback for affinity notifier release
2371  * @ref: internal core kernel usage
2372  *
2373  * This is a callback function used by the irq_set_affinity_notifier function
2374  * to inform the current notification subscriber that they will no longer
2375  * receive notifications.
2376  */
2377 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2378 
2379 /**
2380  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2381  * @vsi: the VSI being configured
2382  */
2383 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2384 {
2385 	struct ice_hw *hw = &vsi->back->hw;
2386 	int i;
2387 
2388 	ice_for_each_q_vector(vsi, i)
2389 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2390 
2391 	ice_flush(hw);
2392 	return 0;
2393 }
2394 
2395 /**
2396  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2397  * @vsi: the VSI being configured
2398  * @basename: name for the vector
2399  */
2400 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2401 {
2402 	int q_vectors = vsi->num_q_vectors;
2403 	struct ice_pf *pf = vsi->back;
2404 	int base = vsi->base_vector;
2405 	struct device *dev;
2406 	int rx_int_idx = 0;
2407 	int tx_int_idx = 0;
2408 	int vector, err;
2409 	int irq_num;
2410 
2411 	dev = ice_pf_to_dev(pf);
2412 	for (vector = 0; vector < q_vectors; vector++) {
2413 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2414 
2415 		irq_num = pf->msix_entries[base + vector].vector;
2416 
2417 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2418 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2419 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2420 			tx_int_idx++;
2421 		} else if (q_vector->rx.rx_ring) {
2422 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2423 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2424 		} else if (q_vector->tx.tx_ring) {
2425 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2426 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2427 		} else {
2428 			/* skip this unused q_vector */
2429 			continue;
2430 		}
2431 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2432 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2433 					       IRQF_SHARED, q_vector->name,
2434 					       q_vector);
2435 		else
2436 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2437 					       0, q_vector->name, q_vector);
2438 		if (err) {
2439 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2440 				   err);
2441 			goto free_q_irqs;
2442 		}
2443 
2444 		/* register for affinity change notifications */
2445 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2446 			struct irq_affinity_notify *affinity_notify;
2447 
2448 			affinity_notify = &q_vector->affinity_notify;
2449 			affinity_notify->notify = ice_irq_affinity_notify;
2450 			affinity_notify->release = ice_irq_affinity_release;
2451 			irq_set_affinity_notifier(irq_num, affinity_notify);
2452 		}
2453 
2454 		/* assign the mask for this irq */
2455 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2456 	}
2457 
2458 	vsi->irqs_ready = true;
2459 	return 0;
2460 
2461 free_q_irqs:
2462 	while (vector) {
2463 		vector--;
2464 		irq_num = pf->msix_entries[base + vector].vector;
2465 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2466 			irq_set_affinity_notifier(irq_num, NULL);
2467 		irq_set_affinity_hint(irq_num, NULL);
2468 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2469 	}
2470 	return err;
2471 }
2472 
2473 /**
2474  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2475  * @vsi: VSI to setup Tx rings used by XDP
2476  *
2477  * Return 0 on success and negative value on error
2478  */
2479 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2480 {
2481 	struct device *dev = ice_pf_to_dev(vsi->back);
2482 	struct ice_tx_desc *tx_desc;
2483 	int i, j;
2484 
2485 	ice_for_each_xdp_txq(vsi, i) {
2486 		u16 xdp_q_idx = vsi->alloc_txq + i;
2487 		struct ice_tx_ring *xdp_ring;
2488 
2489 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2490 
2491 		if (!xdp_ring)
2492 			goto free_xdp_rings;
2493 
2494 		xdp_ring->q_index = xdp_q_idx;
2495 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2496 		xdp_ring->vsi = vsi;
2497 		xdp_ring->netdev = NULL;
2498 		xdp_ring->next_dd = ICE_TX_THRESH - 1;
2499 		xdp_ring->next_rs = ICE_TX_THRESH - 1;
2500 		xdp_ring->dev = dev;
2501 		xdp_ring->count = vsi->num_tx_desc;
2502 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2503 		if (ice_setup_tx_ring(xdp_ring))
2504 			goto free_xdp_rings;
2505 		ice_set_ring_xdp(xdp_ring);
2506 		xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
2507 		spin_lock_init(&xdp_ring->tx_lock);
2508 		for (j = 0; j < xdp_ring->count; j++) {
2509 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2510 			tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
2511 		}
2512 	}
2513 
2514 	ice_for_each_rxq(vsi, i) {
2515 		if (static_key_enabled(&ice_xdp_locking_key))
2516 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2517 		else
2518 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
2519 	}
2520 
2521 	return 0;
2522 
2523 free_xdp_rings:
2524 	for (; i >= 0; i--)
2525 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2526 			ice_free_tx_ring(vsi->xdp_rings[i]);
2527 	return -ENOMEM;
2528 }
2529 
2530 /**
2531  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2532  * @vsi: VSI to set the bpf prog on
2533  * @prog: the bpf prog pointer
2534  */
2535 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2536 {
2537 	struct bpf_prog *old_prog;
2538 	int i;
2539 
2540 	old_prog = xchg(&vsi->xdp_prog, prog);
2541 	if (old_prog)
2542 		bpf_prog_put(old_prog);
2543 
2544 	ice_for_each_rxq(vsi, i)
2545 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2546 }
2547 
2548 /**
2549  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2550  * @vsi: VSI to bring up Tx rings used by XDP
2551  * @prog: bpf program that will be assigned to VSI
2552  *
2553  * Return 0 on success and negative value on error
2554  */
2555 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2556 {
2557 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2558 	int xdp_rings_rem = vsi->num_xdp_txq;
2559 	struct ice_pf *pf = vsi->back;
2560 	struct ice_qs_cfg xdp_qs_cfg = {
2561 		.qs_mutex = &pf->avail_q_mutex,
2562 		.pf_map = pf->avail_txqs,
2563 		.pf_map_size = pf->max_pf_txqs,
2564 		.q_count = vsi->num_xdp_txq,
2565 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2566 		.vsi_map = vsi->txq_map,
2567 		.vsi_map_offset = vsi->alloc_txq,
2568 		.mapping_mode = ICE_VSI_MAP_CONTIG
2569 	};
2570 	struct device *dev;
2571 	int i, v_idx;
2572 	int status;
2573 
2574 	dev = ice_pf_to_dev(pf);
2575 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2576 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2577 	if (!vsi->xdp_rings)
2578 		return -ENOMEM;
2579 
2580 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2581 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2582 		goto err_map_xdp;
2583 
2584 	if (static_key_enabled(&ice_xdp_locking_key))
2585 		netdev_warn(vsi->netdev,
2586 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2587 
2588 	if (ice_xdp_alloc_setup_rings(vsi))
2589 		goto clear_xdp_rings;
2590 
2591 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2592 	ice_for_each_q_vector(vsi, v_idx) {
2593 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2594 		int xdp_rings_per_v, q_id, q_base;
2595 
2596 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2597 					       vsi->num_q_vectors - v_idx);
2598 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2599 
2600 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2601 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2602 
2603 			xdp_ring->q_vector = q_vector;
2604 			xdp_ring->next = q_vector->tx.tx_ring;
2605 			q_vector->tx.tx_ring = xdp_ring;
2606 		}
2607 		xdp_rings_rem -= xdp_rings_per_v;
2608 	}
2609 
2610 	/* omit the scheduler update if in reset path; XDP queues will be
2611 	 * taken into account at the end of ice_vsi_rebuild, where
2612 	 * ice_cfg_vsi_lan is being called
2613 	 */
2614 	if (ice_is_reset_in_progress(pf->state))
2615 		return 0;
2616 
2617 	/* tell the Tx scheduler that right now we have
2618 	 * additional queues
2619 	 */
2620 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2621 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2622 
2623 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2624 				 max_txqs);
2625 	if (status) {
2626 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2627 			status);
2628 		goto clear_xdp_rings;
2629 	}
2630 
2631 	/* assign the prog only when it's not already present on VSI;
2632 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2633 	 * VSI rebuild that happens under ethtool -L can expose us to
2634 	 * the bpf_prog refcount issues as we would be swapping same
2635 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2636 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2637 	 * this is not harmful as dev_xdp_install bumps the refcount
2638 	 * before calling the op exposed by the driver;
2639 	 */
2640 	if (!ice_is_xdp_ena_vsi(vsi))
2641 		ice_vsi_assign_bpf_prog(vsi, prog);
2642 
2643 	return 0;
2644 clear_xdp_rings:
2645 	ice_for_each_xdp_txq(vsi, i)
2646 		if (vsi->xdp_rings[i]) {
2647 			kfree_rcu(vsi->xdp_rings[i], rcu);
2648 			vsi->xdp_rings[i] = NULL;
2649 		}
2650 
2651 err_map_xdp:
2652 	mutex_lock(&pf->avail_q_mutex);
2653 	ice_for_each_xdp_txq(vsi, i) {
2654 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2655 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2656 	}
2657 	mutex_unlock(&pf->avail_q_mutex);
2658 
2659 	devm_kfree(dev, vsi->xdp_rings);
2660 	return -ENOMEM;
2661 }
2662 
2663 /**
2664  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2665  * @vsi: VSI to remove XDP rings
2666  *
2667  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2668  * resources
2669  */
2670 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2671 {
2672 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2673 	struct ice_pf *pf = vsi->back;
2674 	int i, v_idx;
2675 
2676 	/* q_vectors are freed in reset path so there's no point in detaching
2677 	 * rings; in case of rebuild being triggered not from reset bits
2678 	 * in pf->state won't be set, so additionally check first q_vector
2679 	 * against NULL
2680 	 */
2681 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2682 		goto free_qmap;
2683 
2684 	ice_for_each_q_vector(vsi, v_idx) {
2685 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2686 		struct ice_tx_ring *ring;
2687 
2688 		ice_for_each_tx_ring(ring, q_vector->tx)
2689 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2690 				break;
2691 
2692 		/* restore the value of last node prior to XDP setup */
2693 		q_vector->tx.tx_ring = ring;
2694 	}
2695 
2696 free_qmap:
2697 	mutex_lock(&pf->avail_q_mutex);
2698 	ice_for_each_xdp_txq(vsi, i) {
2699 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2700 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2701 	}
2702 	mutex_unlock(&pf->avail_q_mutex);
2703 
2704 	ice_for_each_xdp_txq(vsi, i)
2705 		if (vsi->xdp_rings[i]) {
2706 			if (vsi->xdp_rings[i]->desc)
2707 				ice_free_tx_ring(vsi->xdp_rings[i]);
2708 			kfree_rcu(vsi->xdp_rings[i], rcu);
2709 			vsi->xdp_rings[i] = NULL;
2710 		}
2711 
2712 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2713 	vsi->xdp_rings = NULL;
2714 
2715 	if (static_key_enabled(&ice_xdp_locking_key))
2716 		static_branch_dec(&ice_xdp_locking_key);
2717 
2718 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2719 		return 0;
2720 
2721 	ice_vsi_assign_bpf_prog(vsi, NULL);
2722 
2723 	/* notify Tx scheduler that we destroyed XDP queues and bring
2724 	 * back the old number of child nodes
2725 	 */
2726 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2727 		max_txqs[i] = vsi->num_txq;
2728 
2729 	/* change number of XDP Tx queues to 0 */
2730 	vsi->num_xdp_txq = 0;
2731 
2732 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2733 			       max_txqs);
2734 }
2735 
2736 /**
2737  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2738  * @vsi: VSI to schedule napi on
2739  */
2740 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2741 {
2742 	int i;
2743 
2744 	ice_for_each_rxq(vsi, i) {
2745 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2746 
2747 		if (rx_ring->xsk_pool)
2748 			napi_schedule(&rx_ring->q_vector->napi);
2749 	}
2750 }
2751 
2752 /**
2753  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2754  * @vsi: VSI to determine the count of XDP Tx qs
2755  *
2756  * returns 0 if Tx qs count is higher than at least half of CPU count,
2757  * -ENOMEM otherwise
2758  */
2759 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2760 {
2761 	u16 avail = ice_get_avail_txq_count(vsi->back);
2762 	u16 cpus = num_possible_cpus();
2763 
2764 	if (avail < cpus / 2)
2765 		return -ENOMEM;
2766 
2767 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2768 
2769 	if (vsi->num_xdp_txq < cpus)
2770 		static_branch_inc(&ice_xdp_locking_key);
2771 
2772 	return 0;
2773 }
2774 
2775 /**
2776  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2777  * @vsi: VSI to setup XDP for
2778  * @prog: XDP program
2779  * @extack: netlink extended ack
2780  */
2781 static int
2782 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2783 		   struct netlink_ext_ack *extack)
2784 {
2785 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2786 	bool if_running = netif_running(vsi->netdev);
2787 	int ret = 0, xdp_ring_err = 0;
2788 
2789 	if (frame_size > vsi->rx_buf_len) {
2790 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2791 		return -EOPNOTSUPP;
2792 	}
2793 
2794 	/* need to stop netdev while setting up the program for Rx rings */
2795 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2796 		ret = ice_down(vsi);
2797 		if (ret) {
2798 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2799 			return ret;
2800 		}
2801 	}
2802 
2803 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2804 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2805 		if (xdp_ring_err) {
2806 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2807 		} else {
2808 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2809 			if (xdp_ring_err)
2810 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2811 		}
2812 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2813 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2814 		if (xdp_ring_err)
2815 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2816 	} else {
2817 		/* safe to call even when prog == vsi->xdp_prog as
2818 		 * dev_xdp_install in net/core/dev.c incremented prog's
2819 		 * refcount so corresponding bpf_prog_put won't cause
2820 		 * underflow
2821 		 */
2822 		ice_vsi_assign_bpf_prog(vsi, prog);
2823 	}
2824 
2825 	if (if_running)
2826 		ret = ice_up(vsi);
2827 
2828 	if (!ret && prog)
2829 		ice_vsi_rx_napi_schedule(vsi);
2830 
2831 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2832 }
2833 
2834 /**
2835  * ice_xdp_safe_mode - XDP handler for safe mode
2836  * @dev: netdevice
2837  * @xdp: XDP command
2838  */
2839 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2840 			     struct netdev_bpf *xdp)
2841 {
2842 	NL_SET_ERR_MSG_MOD(xdp->extack,
2843 			   "Please provide working DDP firmware package in order to use XDP\n"
2844 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2845 	return -EOPNOTSUPP;
2846 }
2847 
2848 /**
2849  * ice_xdp - implements XDP handler
2850  * @dev: netdevice
2851  * @xdp: XDP command
2852  */
2853 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2854 {
2855 	struct ice_netdev_priv *np = netdev_priv(dev);
2856 	struct ice_vsi *vsi = np->vsi;
2857 
2858 	if (vsi->type != ICE_VSI_PF) {
2859 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2860 		return -EINVAL;
2861 	}
2862 
2863 	switch (xdp->command) {
2864 	case XDP_SETUP_PROG:
2865 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2866 	case XDP_SETUP_XSK_POOL:
2867 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2868 					  xdp->xsk.queue_id);
2869 	default:
2870 		return -EINVAL;
2871 	}
2872 }
2873 
2874 /**
2875  * ice_ena_misc_vector - enable the non-queue interrupts
2876  * @pf: board private structure
2877  */
2878 static void ice_ena_misc_vector(struct ice_pf *pf)
2879 {
2880 	struct ice_hw *hw = &pf->hw;
2881 	u32 val;
2882 
2883 	/* Disable anti-spoof detection interrupt to prevent spurious event
2884 	 * interrupts during a function reset. Anti-spoof functionally is
2885 	 * still supported.
2886 	 */
2887 	val = rd32(hw, GL_MDCK_TX_TDPU);
2888 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2889 	wr32(hw, GL_MDCK_TX_TDPU, val);
2890 
2891 	/* clear things first */
2892 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2893 	rd32(hw, PFINT_OICR);		/* read to clear */
2894 
2895 	val = (PFINT_OICR_ECC_ERR_M |
2896 	       PFINT_OICR_MAL_DETECT_M |
2897 	       PFINT_OICR_GRST_M |
2898 	       PFINT_OICR_PCI_EXCEPTION_M |
2899 	       PFINT_OICR_VFLR_M |
2900 	       PFINT_OICR_HMC_ERR_M |
2901 	       PFINT_OICR_PE_PUSH_M |
2902 	       PFINT_OICR_PE_CRITERR_M);
2903 
2904 	wr32(hw, PFINT_OICR_ENA, val);
2905 
2906 	/* SW_ITR_IDX = 0, but don't change INTENA */
2907 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2908 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2909 }
2910 
2911 /**
2912  * ice_misc_intr - misc interrupt handler
2913  * @irq: interrupt number
2914  * @data: pointer to a q_vector
2915  */
2916 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2917 {
2918 	struct ice_pf *pf = (struct ice_pf *)data;
2919 	struct ice_hw *hw = &pf->hw;
2920 	irqreturn_t ret = IRQ_NONE;
2921 	struct device *dev;
2922 	u32 oicr, ena_mask;
2923 
2924 	dev = ice_pf_to_dev(pf);
2925 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2926 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2927 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2928 
2929 	oicr = rd32(hw, PFINT_OICR);
2930 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2931 
2932 	if (oicr & PFINT_OICR_SWINT_M) {
2933 		ena_mask &= ~PFINT_OICR_SWINT_M;
2934 		pf->sw_int_count++;
2935 	}
2936 
2937 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2938 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2939 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2940 	}
2941 	if (oicr & PFINT_OICR_VFLR_M) {
2942 		/* disable any further VFLR event notifications */
2943 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2944 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2945 
2946 			reg &= ~PFINT_OICR_VFLR_M;
2947 			wr32(hw, PFINT_OICR_ENA, reg);
2948 		} else {
2949 			ena_mask &= ~PFINT_OICR_VFLR_M;
2950 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2951 		}
2952 	}
2953 
2954 	if (oicr & PFINT_OICR_GRST_M) {
2955 		u32 reset;
2956 
2957 		/* we have a reset warning */
2958 		ena_mask &= ~PFINT_OICR_GRST_M;
2959 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2960 			GLGEN_RSTAT_RESET_TYPE_S;
2961 
2962 		if (reset == ICE_RESET_CORER)
2963 			pf->corer_count++;
2964 		else if (reset == ICE_RESET_GLOBR)
2965 			pf->globr_count++;
2966 		else if (reset == ICE_RESET_EMPR)
2967 			pf->empr_count++;
2968 		else
2969 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2970 
2971 		/* If a reset cycle isn't already in progress, we set a bit in
2972 		 * pf->state so that the service task can start a reset/rebuild.
2973 		 */
2974 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2975 			if (reset == ICE_RESET_CORER)
2976 				set_bit(ICE_CORER_RECV, pf->state);
2977 			else if (reset == ICE_RESET_GLOBR)
2978 				set_bit(ICE_GLOBR_RECV, pf->state);
2979 			else
2980 				set_bit(ICE_EMPR_RECV, pf->state);
2981 
2982 			/* There are couple of different bits at play here.
2983 			 * hw->reset_ongoing indicates whether the hardware is
2984 			 * in reset. This is set to true when a reset interrupt
2985 			 * is received and set back to false after the driver
2986 			 * has determined that the hardware is out of reset.
2987 			 *
2988 			 * ICE_RESET_OICR_RECV in pf->state indicates
2989 			 * that a post reset rebuild is required before the
2990 			 * driver is operational again. This is set above.
2991 			 *
2992 			 * As this is the start of the reset/rebuild cycle, set
2993 			 * both to indicate that.
2994 			 */
2995 			hw->reset_ongoing = true;
2996 		}
2997 	}
2998 
2999 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3000 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3001 		ice_ptp_process_ts(pf);
3002 	}
3003 
3004 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3005 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3006 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3007 
3008 		/* Save EVENTs from GTSYN register */
3009 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3010 						     GLTSYN_STAT_EVENT1_M |
3011 						     GLTSYN_STAT_EVENT2_M);
3012 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3013 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3014 	}
3015 
3016 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3017 	if (oicr & ICE_AUX_CRIT_ERR) {
3018 		struct iidc_event *event;
3019 
3020 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3021 		event = kzalloc(sizeof(*event), GFP_KERNEL);
3022 		if (event) {
3023 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
3024 			/* report the entire OICR value to AUX driver */
3025 			event->reg = oicr;
3026 			ice_send_event_to_aux(pf, event);
3027 			kfree(event);
3028 		}
3029 	}
3030 
3031 	/* Report any remaining unexpected interrupts */
3032 	oicr &= ena_mask;
3033 	if (oicr) {
3034 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3035 		/* If a critical error is pending there is no choice but to
3036 		 * reset the device.
3037 		 */
3038 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3039 			    PFINT_OICR_ECC_ERR_M)) {
3040 			set_bit(ICE_PFR_REQ, pf->state);
3041 			ice_service_task_schedule(pf);
3042 		}
3043 	}
3044 	ret = IRQ_HANDLED;
3045 
3046 	ice_service_task_schedule(pf);
3047 	ice_irq_dynamic_ena(hw, NULL, NULL);
3048 
3049 	return ret;
3050 }
3051 
3052 /**
3053  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3054  * @hw: pointer to HW structure
3055  */
3056 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3057 {
3058 	/* disable Admin queue Interrupt causes */
3059 	wr32(hw, PFINT_FW_CTL,
3060 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3061 
3062 	/* disable Mailbox queue Interrupt causes */
3063 	wr32(hw, PFINT_MBX_CTL,
3064 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3065 
3066 	wr32(hw, PFINT_SB_CTL,
3067 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3068 
3069 	/* disable Control queue Interrupt causes */
3070 	wr32(hw, PFINT_OICR_CTL,
3071 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3072 
3073 	ice_flush(hw);
3074 }
3075 
3076 /**
3077  * ice_free_irq_msix_misc - Unroll misc vector setup
3078  * @pf: board private structure
3079  */
3080 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3081 {
3082 	struct ice_hw *hw = &pf->hw;
3083 
3084 	ice_dis_ctrlq_interrupts(hw);
3085 
3086 	/* disable OICR interrupt */
3087 	wr32(hw, PFINT_OICR_ENA, 0);
3088 	ice_flush(hw);
3089 
3090 	if (pf->msix_entries) {
3091 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3092 		devm_free_irq(ice_pf_to_dev(pf),
3093 			      pf->msix_entries[pf->oicr_idx].vector, pf);
3094 	}
3095 
3096 	pf->num_avail_sw_msix += 1;
3097 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3098 }
3099 
3100 /**
3101  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3102  * @hw: pointer to HW structure
3103  * @reg_idx: HW vector index to associate the control queue interrupts with
3104  */
3105 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3106 {
3107 	u32 val;
3108 
3109 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3110 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3111 	wr32(hw, PFINT_OICR_CTL, val);
3112 
3113 	/* enable Admin queue Interrupt causes */
3114 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3115 	       PFINT_FW_CTL_CAUSE_ENA_M);
3116 	wr32(hw, PFINT_FW_CTL, val);
3117 
3118 	/* enable Mailbox queue Interrupt causes */
3119 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3120 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3121 	wr32(hw, PFINT_MBX_CTL, val);
3122 
3123 	/* This enables Sideband queue Interrupt causes */
3124 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3125 	       PFINT_SB_CTL_CAUSE_ENA_M);
3126 	wr32(hw, PFINT_SB_CTL, val);
3127 
3128 	ice_flush(hw);
3129 }
3130 
3131 /**
3132  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3133  * @pf: board private structure
3134  *
3135  * This sets up the handler for MSIX 0, which is used to manage the
3136  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3137  * when in MSI or Legacy interrupt mode.
3138  */
3139 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3140 {
3141 	struct device *dev = ice_pf_to_dev(pf);
3142 	struct ice_hw *hw = &pf->hw;
3143 	int oicr_idx, err = 0;
3144 
3145 	if (!pf->int_name[0])
3146 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3147 			 dev_driver_string(dev), dev_name(dev));
3148 
3149 	/* Do not request IRQ but do enable OICR interrupt since settings are
3150 	 * lost during reset. Note that this function is called only during
3151 	 * rebuild path and not while reset is in progress.
3152 	 */
3153 	if (ice_is_reset_in_progress(pf->state))
3154 		goto skip_req_irq;
3155 
3156 	/* reserve one vector in irq_tracker for misc interrupts */
3157 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3158 	if (oicr_idx < 0)
3159 		return oicr_idx;
3160 
3161 	pf->num_avail_sw_msix -= 1;
3162 	pf->oicr_idx = (u16)oicr_idx;
3163 
3164 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3165 			       ice_misc_intr, 0, pf->int_name, pf);
3166 	if (err) {
3167 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3168 			pf->int_name, err);
3169 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3170 		pf->num_avail_sw_msix += 1;
3171 		return err;
3172 	}
3173 
3174 skip_req_irq:
3175 	ice_ena_misc_vector(pf);
3176 
3177 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3178 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3179 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3180 
3181 	ice_flush(hw);
3182 	ice_irq_dynamic_ena(hw, NULL, NULL);
3183 
3184 	return 0;
3185 }
3186 
3187 /**
3188  * ice_napi_add - register NAPI handler for the VSI
3189  * @vsi: VSI for which NAPI handler is to be registered
3190  *
3191  * This function is only called in the driver's load path. Registering the NAPI
3192  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3193  * reset/rebuild, etc.)
3194  */
3195 static void ice_napi_add(struct ice_vsi *vsi)
3196 {
3197 	int v_idx;
3198 
3199 	if (!vsi->netdev)
3200 		return;
3201 
3202 	ice_for_each_q_vector(vsi, v_idx)
3203 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3204 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3205 }
3206 
3207 /**
3208  * ice_set_ops - set netdev and ethtools ops for the given netdev
3209  * @netdev: netdev instance
3210  */
3211 static void ice_set_ops(struct net_device *netdev)
3212 {
3213 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3214 
3215 	if (ice_is_safe_mode(pf)) {
3216 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3217 		ice_set_ethtool_safe_mode_ops(netdev);
3218 		return;
3219 	}
3220 
3221 	netdev->netdev_ops = &ice_netdev_ops;
3222 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3223 	ice_set_ethtool_ops(netdev);
3224 }
3225 
3226 /**
3227  * ice_set_netdev_features - set features for the given netdev
3228  * @netdev: netdev instance
3229  */
3230 static void ice_set_netdev_features(struct net_device *netdev)
3231 {
3232 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3233 	netdev_features_t csumo_features;
3234 	netdev_features_t vlano_features;
3235 	netdev_features_t dflt_features;
3236 	netdev_features_t tso_features;
3237 
3238 	if (ice_is_safe_mode(pf)) {
3239 		/* safe mode */
3240 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3241 		netdev->hw_features = netdev->features;
3242 		return;
3243 	}
3244 
3245 	dflt_features = NETIF_F_SG	|
3246 			NETIF_F_HIGHDMA	|
3247 			NETIF_F_NTUPLE	|
3248 			NETIF_F_RXHASH;
3249 
3250 	csumo_features = NETIF_F_RXCSUM	  |
3251 			 NETIF_F_IP_CSUM  |
3252 			 NETIF_F_SCTP_CRC |
3253 			 NETIF_F_IPV6_CSUM;
3254 
3255 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3256 			 NETIF_F_HW_VLAN_CTAG_TX     |
3257 			 NETIF_F_HW_VLAN_CTAG_RX;
3258 
3259 	tso_features = NETIF_F_TSO			|
3260 		       NETIF_F_TSO_ECN			|
3261 		       NETIF_F_TSO6			|
3262 		       NETIF_F_GSO_GRE			|
3263 		       NETIF_F_GSO_UDP_TUNNEL		|
3264 		       NETIF_F_GSO_GRE_CSUM		|
3265 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3266 		       NETIF_F_GSO_PARTIAL		|
3267 		       NETIF_F_GSO_IPXIP4		|
3268 		       NETIF_F_GSO_IPXIP6		|
3269 		       NETIF_F_GSO_UDP_L4;
3270 
3271 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3272 					NETIF_F_GSO_GRE_CSUM;
3273 	/* set features that user can change */
3274 	netdev->hw_features = dflt_features | csumo_features |
3275 			      vlano_features | tso_features;
3276 
3277 	/* add support for HW_CSUM on packets with MPLS header */
3278 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3279 
3280 	/* enable features */
3281 	netdev->features |= netdev->hw_features;
3282 
3283 	netdev->hw_features |= NETIF_F_HW_TC;
3284 
3285 	/* encap and VLAN devices inherit default, csumo and tso features */
3286 	netdev->hw_enc_features |= dflt_features | csumo_features |
3287 				   tso_features;
3288 	netdev->vlan_features |= dflt_features | csumo_features |
3289 				 tso_features;
3290 }
3291 
3292 /**
3293  * ice_cfg_netdev - Allocate, configure and register a netdev
3294  * @vsi: the VSI associated with the new netdev
3295  *
3296  * Returns 0 on success, negative value on failure
3297  */
3298 static int ice_cfg_netdev(struct ice_vsi *vsi)
3299 {
3300 	struct ice_netdev_priv *np;
3301 	struct net_device *netdev;
3302 	u8 mac_addr[ETH_ALEN];
3303 
3304 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3305 				    vsi->alloc_rxq);
3306 	if (!netdev)
3307 		return -ENOMEM;
3308 
3309 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3310 	vsi->netdev = netdev;
3311 	np = netdev_priv(netdev);
3312 	np->vsi = vsi;
3313 
3314 	ice_set_netdev_features(netdev);
3315 
3316 	ice_set_ops(netdev);
3317 
3318 	if (vsi->type == ICE_VSI_PF) {
3319 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3320 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3321 		eth_hw_addr_set(netdev, mac_addr);
3322 		ether_addr_copy(netdev->perm_addr, mac_addr);
3323 	}
3324 
3325 	netdev->priv_flags |= IFF_UNICAST_FLT;
3326 
3327 	/* Setup netdev TC information */
3328 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3329 
3330 	/* setup watchdog timeout value to be 5 second */
3331 	netdev->watchdog_timeo = 5 * HZ;
3332 
3333 	netdev->min_mtu = ETH_MIN_MTU;
3334 	netdev->max_mtu = ICE_MAX_MTU;
3335 
3336 	return 0;
3337 }
3338 
3339 /**
3340  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3341  * @lut: Lookup table
3342  * @rss_table_size: Lookup table size
3343  * @rss_size: Range of queue number for hashing
3344  */
3345 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3346 {
3347 	u16 i;
3348 
3349 	for (i = 0; i < rss_table_size; i++)
3350 		lut[i] = i % rss_size;
3351 }
3352 
3353 /**
3354  * ice_pf_vsi_setup - Set up a PF VSI
3355  * @pf: board private structure
3356  * @pi: pointer to the port_info instance
3357  *
3358  * Returns pointer to the successfully allocated VSI software struct
3359  * on success, otherwise returns NULL on failure.
3360  */
3361 static struct ice_vsi *
3362 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3363 {
3364 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
3365 }
3366 
3367 static struct ice_vsi *
3368 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3369 		   struct ice_channel *ch)
3370 {
3371 	return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
3372 }
3373 
3374 /**
3375  * ice_ctrl_vsi_setup - Set up a control VSI
3376  * @pf: board private structure
3377  * @pi: pointer to the port_info instance
3378  *
3379  * Returns pointer to the successfully allocated VSI software struct
3380  * on success, otherwise returns NULL on failure.
3381  */
3382 static struct ice_vsi *
3383 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3384 {
3385 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
3386 }
3387 
3388 /**
3389  * ice_lb_vsi_setup - Set up a loopback VSI
3390  * @pf: board private structure
3391  * @pi: pointer to the port_info instance
3392  *
3393  * Returns pointer to the successfully allocated VSI software struct
3394  * on success, otherwise returns NULL on failure.
3395  */
3396 struct ice_vsi *
3397 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3398 {
3399 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
3400 }
3401 
3402 /**
3403  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3404  * @netdev: network interface to be adjusted
3405  * @proto: unused protocol
3406  * @vid: VLAN ID to be added
3407  *
3408  * net_device_ops implementation for adding VLAN IDs
3409  */
3410 static int
3411 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3412 		    u16 vid)
3413 {
3414 	struct ice_netdev_priv *np = netdev_priv(netdev);
3415 	struct ice_vsi *vsi = np->vsi;
3416 	int ret;
3417 
3418 	/* VLAN 0 is added by default during load/reset */
3419 	if (!vid)
3420 		return 0;
3421 
3422 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3423 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3424 		ret = ice_cfg_vlan_pruning(vsi, true);
3425 		if (ret)
3426 			return ret;
3427 	}
3428 
3429 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3430 	 * packets aren't pruned by the device's internal switch on Rx
3431 	 */
3432 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3433 	if (!ret)
3434 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3435 
3436 	return ret;
3437 }
3438 
3439 /**
3440  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3441  * @netdev: network interface to be adjusted
3442  * @proto: unused protocol
3443  * @vid: VLAN ID to be removed
3444  *
3445  * net_device_ops implementation for removing VLAN IDs
3446  */
3447 static int
3448 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3449 		     u16 vid)
3450 {
3451 	struct ice_netdev_priv *np = netdev_priv(netdev);
3452 	struct ice_vsi *vsi = np->vsi;
3453 	int ret;
3454 
3455 	/* don't allow removal of VLAN 0 */
3456 	if (!vid)
3457 		return 0;
3458 
3459 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3460 	 * information
3461 	 */
3462 	ret = ice_vsi_kill_vlan(vsi, vid);
3463 	if (ret)
3464 		return ret;
3465 
3466 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3467 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3468 		ret = ice_cfg_vlan_pruning(vsi, false);
3469 
3470 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3471 	return ret;
3472 }
3473 
3474 /**
3475  * ice_rep_indr_tc_block_unbind
3476  * @cb_priv: indirection block private data
3477  */
3478 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3479 {
3480 	struct ice_indr_block_priv *indr_priv = cb_priv;
3481 
3482 	list_del(&indr_priv->list);
3483 	kfree(indr_priv);
3484 }
3485 
3486 /**
3487  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3488  * @vsi: VSI struct which has the netdev
3489  */
3490 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3491 {
3492 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3493 
3494 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3495 				 ice_rep_indr_tc_block_unbind);
3496 }
3497 
3498 /**
3499  * ice_tc_indir_block_remove - clean indirect TC block notifications
3500  * @pf: PF structure
3501  */
3502 static void ice_tc_indir_block_remove(struct ice_pf *pf)
3503 {
3504 	struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3505 
3506 	if (!pf_vsi)
3507 		return;
3508 
3509 	ice_tc_indir_block_unregister(pf_vsi);
3510 }
3511 
3512 /**
3513  * ice_tc_indir_block_register - Register TC indirect block notifications
3514  * @vsi: VSI struct which has the netdev
3515  *
3516  * Returns 0 on success, negative value on failure
3517  */
3518 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3519 {
3520 	struct ice_netdev_priv *np;
3521 
3522 	if (!vsi || !vsi->netdev)
3523 		return -EINVAL;
3524 
3525 	np = netdev_priv(vsi->netdev);
3526 
3527 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3528 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3529 }
3530 
3531 /**
3532  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3533  * @pf: board private structure
3534  *
3535  * Returns 0 on success, negative value on failure
3536  */
3537 static int ice_setup_pf_sw(struct ice_pf *pf)
3538 {
3539 	struct device *dev = ice_pf_to_dev(pf);
3540 	struct ice_vsi *vsi;
3541 	int status;
3542 
3543 	if (ice_is_reset_in_progress(pf->state))
3544 		return -EBUSY;
3545 
3546 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3547 	if (!vsi)
3548 		return -ENOMEM;
3549 
3550 	/* init channel list */
3551 	INIT_LIST_HEAD(&vsi->ch_list);
3552 
3553 	status = ice_cfg_netdev(vsi);
3554 	if (status)
3555 		goto unroll_vsi_setup;
3556 	/* netdev has to be configured before setting frame size */
3557 	ice_vsi_cfg_frame_size(vsi);
3558 
3559 	/* init indirect block notifications */
3560 	status = ice_tc_indir_block_register(vsi);
3561 	if (status) {
3562 		dev_err(dev, "Failed to register netdev notifier\n");
3563 		goto unroll_cfg_netdev;
3564 	}
3565 
3566 	/* Setup DCB netlink interface */
3567 	ice_dcbnl_setup(vsi);
3568 
3569 	/* registering the NAPI handler requires both the queues and
3570 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3571 	 * and ice_cfg_netdev() respectively
3572 	 */
3573 	ice_napi_add(vsi);
3574 
3575 	status = ice_set_cpu_rx_rmap(vsi);
3576 	if (status) {
3577 		dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
3578 			vsi->vsi_num, status);
3579 		goto unroll_napi_add;
3580 	}
3581 	status = ice_init_mac_fltr(pf);
3582 	if (status)
3583 		goto free_cpu_rx_map;
3584 
3585 	return 0;
3586 
3587 free_cpu_rx_map:
3588 	ice_free_cpu_rx_rmap(vsi);
3589 unroll_napi_add:
3590 	ice_tc_indir_block_unregister(vsi);
3591 unroll_cfg_netdev:
3592 	if (vsi) {
3593 		ice_napi_del(vsi);
3594 		if (vsi->netdev) {
3595 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3596 			free_netdev(vsi->netdev);
3597 			vsi->netdev = NULL;
3598 		}
3599 	}
3600 
3601 unroll_vsi_setup:
3602 	ice_vsi_release(vsi);
3603 	return status;
3604 }
3605 
3606 /**
3607  * ice_get_avail_q_count - Get count of queues in use
3608  * @pf_qmap: bitmap to get queue use count from
3609  * @lock: pointer to a mutex that protects access to pf_qmap
3610  * @size: size of the bitmap
3611  */
3612 static u16
3613 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3614 {
3615 	unsigned long bit;
3616 	u16 count = 0;
3617 
3618 	mutex_lock(lock);
3619 	for_each_clear_bit(bit, pf_qmap, size)
3620 		count++;
3621 	mutex_unlock(lock);
3622 
3623 	return count;
3624 }
3625 
3626 /**
3627  * ice_get_avail_txq_count - Get count of Tx queues in use
3628  * @pf: pointer to an ice_pf instance
3629  */
3630 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3631 {
3632 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3633 				     pf->max_pf_txqs);
3634 }
3635 
3636 /**
3637  * ice_get_avail_rxq_count - Get count of Rx queues in use
3638  * @pf: pointer to an ice_pf instance
3639  */
3640 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3641 {
3642 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3643 				     pf->max_pf_rxqs);
3644 }
3645 
3646 /**
3647  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3648  * @pf: board private structure to initialize
3649  */
3650 static void ice_deinit_pf(struct ice_pf *pf)
3651 {
3652 	ice_service_task_stop(pf);
3653 	mutex_destroy(&pf->sw_mutex);
3654 	mutex_destroy(&pf->tc_mutex);
3655 	mutex_destroy(&pf->avail_q_mutex);
3656 
3657 	if (pf->avail_txqs) {
3658 		bitmap_free(pf->avail_txqs);
3659 		pf->avail_txqs = NULL;
3660 	}
3661 
3662 	if (pf->avail_rxqs) {
3663 		bitmap_free(pf->avail_rxqs);
3664 		pf->avail_rxqs = NULL;
3665 	}
3666 
3667 	if (pf->ptp.clock)
3668 		ptp_clock_unregister(pf->ptp.clock);
3669 }
3670 
3671 /**
3672  * ice_set_pf_caps - set PFs capability flags
3673  * @pf: pointer to the PF instance
3674  */
3675 static void ice_set_pf_caps(struct ice_pf *pf)
3676 {
3677 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3678 
3679 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3680 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3681 	if (func_caps->common_cap.rdma) {
3682 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3683 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3684 	}
3685 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3686 	if (func_caps->common_cap.dcb)
3687 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3688 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3689 	if (func_caps->common_cap.sr_iov_1_1) {
3690 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3691 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3692 					      ICE_MAX_VF_COUNT);
3693 	}
3694 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3695 	if (func_caps->common_cap.rss_table_size)
3696 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3697 
3698 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3699 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3700 		u16 unused;
3701 
3702 		/* ctrl_vsi_idx will be set to a valid value when flow director
3703 		 * is setup by ice_init_fdir
3704 		 */
3705 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3706 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3707 		/* force guaranteed filter pool for PF */
3708 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3709 				       func_caps->fd_fltr_guar);
3710 		/* force shared filter pool for PF */
3711 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3712 				       func_caps->fd_fltr_best_effort);
3713 	}
3714 
3715 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3716 	if (func_caps->common_cap.ieee_1588)
3717 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3718 
3719 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3720 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3721 }
3722 
3723 /**
3724  * ice_init_pf - Initialize general software structures (struct ice_pf)
3725  * @pf: board private structure to initialize
3726  */
3727 static int ice_init_pf(struct ice_pf *pf)
3728 {
3729 	ice_set_pf_caps(pf);
3730 
3731 	mutex_init(&pf->sw_mutex);
3732 	mutex_init(&pf->tc_mutex);
3733 
3734 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3735 	spin_lock_init(&pf->aq_wait_lock);
3736 	init_waitqueue_head(&pf->aq_wait_queue);
3737 
3738 	init_waitqueue_head(&pf->reset_wait_queue);
3739 
3740 	/* setup service timer and periodic service task */
3741 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3742 	pf->serv_tmr_period = HZ;
3743 	INIT_WORK(&pf->serv_task, ice_service_task);
3744 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3745 
3746 	mutex_init(&pf->avail_q_mutex);
3747 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3748 	if (!pf->avail_txqs)
3749 		return -ENOMEM;
3750 
3751 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3752 	if (!pf->avail_rxqs) {
3753 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3754 		pf->avail_txqs = NULL;
3755 		return -ENOMEM;
3756 	}
3757 
3758 	return 0;
3759 }
3760 
3761 /**
3762  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3763  * @pf: board private structure
3764  *
3765  * compute the number of MSIX vectors required (v_budget) and request from
3766  * the OS. Return the number of vectors reserved or negative on failure
3767  */
3768 static int ice_ena_msix_range(struct ice_pf *pf)
3769 {
3770 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3771 	struct device *dev = ice_pf_to_dev(pf);
3772 	int needed, err, i;
3773 
3774 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3775 	num_cpus = num_online_cpus();
3776 
3777 	/* reserve for LAN miscellaneous handler */
3778 	needed = ICE_MIN_LAN_OICR_MSIX;
3779 	if (v_left < needed)
3780 		goto no_hw_vecs_left_err;
3781 	v_budget += needed;
3782 	v_left -= needed;
3783 
3784 	/* reserve for flow director */
3785 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3786 		needed = ICE_FDIR_MSIX;
3787 		if (v_left < needed)
3788 			goto no_hw_vecs_left_err;
3789 		v_budget += needed;
3790 		v_left -= needed;
3791 	}
3792 
3793 	/* reserve for switchdev */
3794 	needed = ICE_ESWITCH_MSIX;
3795 	if (v_left < needed)
3796 		goto no_hw_vecs_left_err;
3797 	v_budget += needed;
3798 	v_left -= needed;
3799 
3800 	/* total used for non-traffic vectors */
3801 	v_other = v_budget;
3802 
3803 	/* reserve vectors for LAN traffic */
3804 	needed = num_cpus;
3805 	if (v_left < needed)
3806 		goto no_hw_vecs_left_err;
3807 	pf->num_lan_msix = needed;
3808 	v_budget += needed;
3809 	v_left -= needed;
3810 
3811 	/* reserve vectors for RDMA auxiliary driver */
3812 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3813 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3814 		if (v_left < needed)
3815 			goto no_hw_vecs_left_err;
3816 		pf->num_rdma_msix = needed;
3817 		v_budget += needed;
3818 		v_left -= needed;
3819 	}
3820 
3821 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3822 					sizeof(*pf->msix_entries), GFP_KERNEL);
3823 	if (!pf->msix_entries) {
3824 		err = -ENOMEM;
3825 		goto exit_err;
3826 	}
3827 
3828 	for (i = 0; i < v_budget; i++)
3829 		pf->msix_entries[i].entry = i;
3830 
3831 	/* actually reserve the vectors */
3832 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3833 					 ICE_MIN_MSIX, v_budget);
3834 	if (v_actual < 0) {
3835 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3836 		err = v_actual;
3837 		goto msix_err;
3838 	}
3839 
3840 	if (v_actual < v_budget) {
3841 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3842 			 v_budget, v_actual);
3843 
3844 		if (v_actual < ICE_MIN_MSIX) {
3845 			/* error if we can't get minimum vectors */
3846 			pci_disable_msix(pf->pdev);
3847 			err = -ERANGE;
3848 			goto msix_err;
3849 		} else {
3850 			int v_remain = v_actual - v_other;
3851 			int v_rdma = 0, v_min_rdma = 0;
3852 
3853 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3854 				/* Need at least 1 interrupt in addition to
3855 				 * AEQ MSIX
3856 				 */
3857 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3858 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3859 			}
3860 
3861 			if (v_actual == ICE_MIN_MSIX ||
3862 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3863 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3864 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3865 
3866 				pf->num_rdma_msix = 0;
3867 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3868 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3869 				   (v_remain - v_rdma < v_rdma)) {
3870 				/* Support minimum RDMA and give remaining
3871 				 * vectors to LAN MSIX
3872 				 */
3873 				pf->num_rdma_msix = v_min_rdma;
3874 				pf->num_lan_msix = v_remain - v_min_rdma;
3875 			} else {
3876 				/* Split remaining MSIX with RDMA after
3877 				 * accounting for AEQ MSIX
3878 				 */
3879 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3880 						    ICE_RDMA_NUM_AEQ_MSIX;
3881 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3882 			}
3883 
3884 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3885 				   pf->num_lan_msix);
3886 
3887 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3888 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3889 					   pf->num_rdma_msix);
3890 		}
3891 	}
3892 
3893 	return v_actual;
3894 
3895 msix_err:
3896 	devm_kfree(dev, pf->msix_entries);
3897 	goto exit_err;
3898 
3899 no_hw_vecs_left_err:
3900 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3901 		needed, v_left);
3902 	err = -ERANGE;
3903 exit_err:
3904 	pf->num_rdma_msix = 0;
3905 	pf->num_lan_msix = 0;
3906 	return err;
3907 }
3908 
3909 /**
3910  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3911  * @pf: board private structure
3912  */
3913 static void ice_dis_msix(struct ice_pf *pf)
3914 {
3915 	pci_disable_msix(pf->pdev);
3916 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3917 	pf->msix_entries = NULL;
3918 }
3919 
3920 /**
3921  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3922  * @pf: board private structure
3923  */
3924 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3925 {
3926 	ice_dis_msix(pf);
3927 
3928 	if (pf->irq_tracker) {
3929 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3930 		pf->irq_tracker = NULL;
3931 	}
3932 }
3933 
3934 /**
3935  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3936  * @pf: board private structure to initialize
3937  */
3938 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3939 {
3940 	int vectors;
3941 
3942 	vectors = ice_ena_msix_range(pf);
3943 
3944 	if (vectors < 0)
3945 		return vectors;
3946 
3947 	/* set up vector assignment tracking */
3948 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3949 				       struct_size(pf->irq_tracker, list, vectors),
3950 				       GFP_KERNEL);
3951 	if (!pf->irq_tracker) {
3952 		ice_dis_msix(pf);
3953 		return -ENOMEM;
3954 	}
3955 
3956 	/* populate SW interrupts pool with number of OS granted IRQs. */
3957 	pf->num_avail_sw_msix = (u16)vectors;
3958 	pf->irq_tracker->num_entries = (u16)vectors;
3959 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3960 
3961 	return 0;
3962 }
3963 
3964 /**
3965  * ice_is_wol_supported - check if WoL is supported
3966  * @hw: pointer to hardware info
3967  *
3968  * Check if WoL is supported based on the HW configuration.
3969  * Returns true if NVM supports and enables WoL for this port, false otherwise
3970  */
3971 bool ice_is_wol_supported(struct ice_hw *hw)
3972 {
3973 	u16 wol_ctrl;
3974 
3975 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3976 	 * word) indicates WoL is not supported on the corresponding PF ID.
3977 	 */
3978 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3979 		return false;
3980 
3981 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3982 }
3983 
3984 /**
3985  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3986  * @vsi: VSI being changed
3987  * @new_rx: new number of Rx queues
3988  * @new_tx: new number of Tx queues
3989  *
3990  * Only change the number of queues if new_tx, or new_rx is non-0.
3991  *
3992  * Returns 0 on success.
3993  */
3994 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3995 {
3996 	struct ice_pf *pf = vsi->back;
3997 	int err = 0, timeout = 50;
3998 
3999 	if (!new_rx && !new_tx)
4000 		return -EINVAL;
4001 
4002 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4003 		timeout--;
4004 		if (!timeout)
4005 			return -EBUSY;
4006 		usleep_range(1000, 2000);
4007 	}
4008 
4009 	if (new_tx)
4010 		vsi->req_txq = (u16)new_tx;
4011 	if (new_rx)
4012 		vsi->req_rxq = (u16)new_rx;
4013 
4014 	/* set for the next time the netdev is started */
4015 	if (!netif_running(vsi->netdev)) {
4016 		ice_vsi_rebuild(vsi, false);
4017 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4018 		goto done;
4019 	}
4020 
4021 	ice_vsi_close(vsi);
4022 	ice_vsi_rebuild(vsi, false);
4023 	ice_pf_dcb_recfg(pf);
4024 	ice_vsi_open(vsi);
4025 done:
4026 	clear_bit(ICE_CFG_BUSY, pf->state);
4027 	return err;
4028 }
4029 
4030 /**
4031  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4032  * @pf: PF to configure
4033  *
4034  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4035  * VSI can still Tx/Rx VLAN tagged packets.
4036  */
4037 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4038 {
4039 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4040 	struct ice_vsi_ctx *ctxt;
4041 	struct ice_hw *hw;
4042 	int status;
4043 
4044 	if (!vsi)
4045 		return;
4046 
4047 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4048 	if (!ctxt)
4049 		return;
4050 
4051 	hw = &pf->hw;
4052 	ctxt->info = vsi->info;
4053 
4054 	ctxt->info.valid_sections =
4055 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4056 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4057 			    ICE_AQ_VSI_PROP_SW_VALID);
4058 
4059 	/* disable VLAN anti-spoof */
4060 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4061 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4062 
4063 	/* disable VLAN pruning and keep all other settings */
4064 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4065 
4066 	/* allow all VLANs on Tx and don't strip on Rx */
4067 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
4068 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
4069 
4070 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4071 	if (status) {
4072 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4073 			status, ice_aq_str(hw->adminq.sq_last_status));
4074 	} else {
4075 		vsi->info.sec_flags = ctxt->info.sec_flags;
4076 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4077 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
4078 	}
4079 
4080 	kfree(ctxt);
4081 }
4082 
4083 /**
4084  * ice_log_pkg_init - log result of DDP package load
4085  * @hw: pointer to hardware info
4086  * @state: state of package load
4087  */
4088 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4089 {
4090 	struct ice_pf *pf = hw->back;
4091 	struct device *dev;
4092 
4093 	dev = ice_pf_to_dev(pf);
4094 
4095 	switch (state) {
4096 	case ICE_DDP_PKG_SUCCESS:
4097 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4098 			 hw->active_pkg_name,
4099 			 hw->active_pkg_ver.major,
4100 			 hw->active_pkg_ver.minor,
4101 			 hw->active_pkg_ver.update,
4102 			 hw->active_pkg_ver.draft);
4103 		break;
4104 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4105 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4106 			 hw->active_pkg_name,
4107 			 hw->active_pkg_ver.major,
4108 			 hw->active_pkg_ver.minor,
4109 			 hw->active_pkg_ver.update,
4110 			 hw->active_pkg_ver.draft);
4111 		break;
4112 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4113 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4114 			hw->active_pkg_name,
4115 			hw->active_pkg_ver.major,
4116 			hw->active_pkg_ver.minor,
4117 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4118 		break;
4119 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4120 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4121 			 hw->active_pkg_name,
4122 			 hw->active_pkg_ver.major,
4123 			 hw->active_pkg_ver.minor,
4124 			 hw->active_pkg_ver.update,
4125 			 hw->active_pkg_ver.draft,
4126 			 hw->pkg_name,
4127 			 hw->pkg_ver.major,
4128 			 hw->pkg_ver.minor,
4129 			 hw->pkg_ver.update,
4130 			 hw->pkg_ver.draft);
4131 		break;
4132 	case ICE_DDP_PKG_FW_MISMATCH:
4133 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4134 		break;
4135 	case ICE_DDP_PKG_INVALID_FILE:
4136 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4137 		break;
4138 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4139 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4140 		break;
4141 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4142 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4143 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4144 		break;
4145 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4146 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4147 		break;
4148 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4149 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4150 		break;
4151 	case ICE_DDP_PKG_LOAD_ERROR:
4152 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4153 		/* poll for reset to complete */
4154 		if (ice_check_reset(hw))
4155 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4156 		break;
4157 	case ICE_DDP_PKG_ERR:
4158 	default:
4159 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4160 		break;
4161 	}
4162 }
4163 
4164 /**
4165  * ice_load_pkg - load/reload the DDP Package file
4166  * @firmware: firmware structure when firmware requested or NULL for reload
4167  * @pf: pointer to the PF instance
4168  *
4169  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4170  * initialize HW tables.
4171  */
4172 static void
4173 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4174 {
4175 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4176 	struct device *dev = ice_pf_to_dev(pf);
4177 	struct ice_hw *hw = &pf->hw;
4178 
4179 	/* Load DDP Package */
4180 	if (firmware && !hw->pkg_copy) {
4181 		state = ice_copy_and_init_pkg(hw, firmware->data,
4182 					      firmware->size);
4183 		ice_log_pkg_init(hw, state);
4184 	} else if (!firmware && hw->pkg_copy) {
4185 		/* Reload package during rebuild after CORER/GLOBR reset */
4186 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4187 		ice_log_pkg_init(hw, state);
4188 	} else {
4189 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4190 	}
4191 
4192 	if (!ice_is_init_pkg_successful(state)) {
4193 		/* Safe Mode */
4194 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4195 		return;
4196 	}
4197 
4198 	/* Successful download package is the precondition for advanced
4199 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4200 	 */
4201 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4202 }
4203 
4204 /**
4205  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4206  * @pf: pointer to the PF structure
4207  *
4208  * There is no error returned here because the driver should be able to handle
4209  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4210  * specifically with Tx.
4211  */
4212 static void ice_verify_cacheline_size(struct ice_pf *pf)
4213 {
4214 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4215 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4216 			 ICE_CACHE_LINE_BYTES);
4217 }
4218 
4219 /**
4220  * ice_send_version - update firmware with driver version
4221  * @pf: PF struct
4222  *
4223  * Returns 0 on success, else error code
4224  */
4225 static int ice_send_version(struct ice_pf *pf)
4226 {
4227 	struct ice_driver_ver dv;
4228 
4229 	dv.major_ver = 0xff;
4230 	dv.minor_ver = 0xff;
4231 	dv.build_ver = 0xff;
4232 	dv.subbuild_ver = 0;
4233 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4234 		sizeof(dv.driver_string));
4235 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4236 }
4237 
4238 /**
4239  * ice_init_fdir - Initialize flow director VSI and configuration
4240  * @pf: pointer to the PF instance
4241  *
4242  * returns 0 on success, negative on error
4243  */
4244 static int ice_init_fdir(struct ice_pf *pf)
4245 {
4246 	struct device *dev = ice_pf_to_dev(pf);
4247 	struct ice_vsi *ctrl_vsi;
4248 	int err;
4249 
4250 	/* Side Band Flow Director needs to have a control VSI.
4251 	 * Allocate it and store it in the PF.
4252 	 */
4253 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4254 	if (!ctrl_vsi) {
4255 		dev_dbg(dev, "could not create control VSI\n");
4256 		return -ENOMEM;
4257 	}
4258 
4259 	err = ice_vsi_open_ctrl(ctrl_vsi);
4260 	if (err) {
4261 		dev_dbg(dev, "could not open control VSI\n");
4262 		goto err_vsi_open;
4263 	}
4264 
4265 	mutex_init(&pf->hw.fdir_fltr_lock);
4266 
4267 	err = ice_fdir_create_dflt_rules(pf);
4268 	if (err)
4269 		goto err_fdir_rule;
4270 
4271 	return 0;
4272 
4273 err_fdir_rule:
4274 	ice_fdir_release_flows(&pf->hw);
4275 	ice_vsi_close(ctrl_vsi);
4276 err_vsi_open:
4277 	ice_vsi_release(ctrl_vsi);
4278 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4279 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4280 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4281 	}
4282 	return err;
4283 }
4284 
4285 /**
4286  * ice_get_opt_fw_name - return optional firmware file name or NULL
4287  * @pf: pointer to the PF instance
4288  */
4289 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4290 {
4291 	/* Optional firmware name same as default with additional dash
4292 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4293 	 */
4294 	struct pci_dev *pdev = pf->pdev;
4295 	char *opt_fw_filename;
4296 	u64 dsn;
4297 
4298 	/* Determine the name of the optional file using the DSN (two
4299 	 * dwords following the start of the DSN Capability).
4300 	 */
4301 	dsn = pci_get_dsn(pdev);
4302 	if (!dsn)
4303 		return NULL;
4304 
4305 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4306 	if (!opt_fw_filename)
4307 		return NULL;
4308 
4309 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4310 		 ICE_DDP_PKG_PATH, dsn);
4311 
4312 	return opt_fw_filename;
4313 }
4314 
4315 /**
4316  * ice_request_fw - Device initialization routine
4317  * @pf: pointer to the PF instance
4318  */
4319 static void ice_request_fw(struct ice_pf *pf)
4320 {
4321 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4322 	const struct firmware *firmware = NULL;
4323 	struct device *dev = ice_pf_to_dev(pf);
4324 	int err = 0;
4325 
4326 	/* optional device-specific DDP (if present) overrides the default DDP
4327 	 * package file. kernel logs a debug message if the file doesn't exist,
4328 	 * and warning messages for other errors.
4329 	 */
4330 	if (opt_fw_filename) {
4331 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4332 		if (err) {
4333 			kfree(opt_fw_filename);
4334 			goto dflt_pkg_load;
4335 		}
4336 
4337 		/* request for firmware was successful. Download to device */
4338 		ice_load_pkg(firmware, pf);
4339 		kfree(opt_fw_filename);
4340 		release_firmware(firmware);
4341 		return;
4342 	}
4343 
4344 dflt_pkg_load:
4345 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4346 	if (err) {
4347 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4348 		return;
4349 	}
4350 
4351 	/* request for firmware was successful. Download to device */
4352 	ice_load_pkg(firmware, pf);
4353 	release_firmware(firmware);
4354 }
4355 
4356 /**
4357  * ice_print_wake_reason - show the wake up cause in the log
4358  * @pf: pointer to the PF struct
4359  */
4360 static void ice_print_wake_reason(struct ice_pf *pf)
4361 {
4362 	u32 wus = pf->wakeup_reason;
4363 	const char *wake_str;
4364 
4365 	/* if no wake event, nothing to print */
4366 	if (!wus)
4367 		return;
4368 
4369 	if (wus & PFPM_WUS_LNKC_M)
4370 		wake_str = "Link\n";
4371 	else if (wus & PFPM_WUS_MAG_M)
4372 		wake_str = "Magic Packet\n";
4373 	else if (wus & PFPM_WUS_MNG_M)
4374 		wake_str = "Management\n";
4375 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4376 		wake_str = "Firmware Reset\n";
4377 	else
4378 		wake_str = "Unknown\n";
4379 
4380 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4381 }
4382 
4383 /**
4384  * ice_register_netdev - register netdev and devlink port
4385  * @pf: pointer to the PF struct
4386  */
4387 static int ice_register_netdev(struct ice_pf *pf)
4388 {
4389 	struct ice_vsi *vsi;
4390 	int err = 0;
4391 
4392 	vsi = ice_get_main_vsi(pf);
4393 	if (!vsi || !vsi->netdev)
4394 		return -EIO;
4395 
4396 	err = register_netdev(vsi->netdev);
4397 	if (err)
4398 		goto err_register_netdev;
4399 
4400 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4401 	netif_carrier_off(vsi->netdev);
4402 	netif_tx_stop_all_queues(vsi->netdev);
4403 	err = ice_devlink_create_pf_port(pf);
4404 	if (err)
4405 		goto err_devlink_create;
4406 
4407 	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4408 
4409 	return 0;
4410 err_devlink_create:
4411 	unregister_netdev(vsi->netdev);
4412 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4413 err_register_netdev:
4414 	free_netdev(vsi->netdev);
4415 	vsi->netdev = NULL;
4416 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4417 	return err;
4418 }
4419 
4420 /**
4421  * ice_probe - Device initialization routine
4422  * @pdev: PCI device information struct
4423  * @ent: entry in ice_pci_tbl
4424  *
4425  * Returns 0 on success, negative on failure
4426  */
4427 static int
4428 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4429 {
4430 	struct device *dev = &pdev->dev;
4431 	struct ice_pf *pf;
4432 	struct ice_hw *hw;
4433 	int i, err;
4434 
4435 	if (pdev->is_virtfn) {
4436 		dev_err(dev, "can't probe a virtual function\n");
4437 		return -EINVAL;
4438 	}
4439 
4440 	/* this driver uses devres, see
4441 	 * Documentation/driver-api/driver-model/devres.rst
4442 	 */
4443 	err = pcim_enable_device(pdev);
4444 	if (err)
4445 		return err;
4446 
4447 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4448 	if (err) {
4449 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4450 		return err;
4451 	}
4452 
4453 	pf = ice_allocate_pf(dev);
4454 	if (!pf)
4455 		return -ENOMEM;
4456 
4457 	/* initialize Auxiliary index to invalid value */
4458 	pf->aux_idx = -1;
4459 
4460 	/* set up for high or low DMA */
4461 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4462 	if (err)
4463 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4464 	if (err) {
4465 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4466 		return err;
4467 	}
4468 
4469 	pci_enable_pcie_error_reporting(pdev);
4470 	pci_set_master(pdev);
4471 
4472 	pf->pdev = pdev;
4473 	pci_set_drvdata(pdev, pf);
4474 	set_bit(ICE_DOWN, pf->state);
4475 	/* Disable service task until DOWN bit is cleared */
4476 	set_bit(ICE_SERVICE_DIS, pf->state);
4477 
4478 	hw = &pf->hw;
4479 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4480 	pci_save_state(pdev);
4481 
4482 	hw->back = pf;
4483 	hw->vendor_id = pdev->vendor;
4484 	hw->device_id = pdev->device;
4485 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4486 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4487 	hw->subsystem_device_id = pdev->subsystem_device;
4488 	hw->bus.device = PCI_SLOT(pdev->devfn);
4489 	hw->bus.func = PCI_FUNC(pdev->devfn);
4490 	ice_set_ctrlq_len(hw);
4491 
4492 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4493 
4494 #ifndef CONFIG_DYNAMIC_DEBUG
4495 	if (debug < -1)
4496 		hw->debug_mask = debug;
4497 #endif
4498 
4499 	err = ice_init_hw(hw);
4500 	if (err) {
4501 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4502 		err = -EIO;
4503 		goto err_exit_unroll;
4504 	}
4505 
4506 	ice_init_feature_support(pf);
4507 
4508 	ice_request_fw(pf);
4509 
4510 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4511 	 * set in pf->state, which will cause ice_is_safe_mode to return
4512 	 * true
4513 	 */
4514 	if (ice_is_safe_mode(pf)) {
4515 		/* we already got function/device capabilities but these don't
4516 		 * reflect what the driver needs to do in safe mode. Instead of
4517 		 * adding conditional logic everywhere to ignore these
4518 		 * device/function capabilities, override them.
4519 		 */
4520 		ice_set_safe_mode_caps(hw);
4521 	}
4522 
4523 	err = ice_init_pf(pf);
4524 	if (err) {
4525 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4526 		goto err_init_pf_unroll;
4527 	}
4528 
4529 	ice_devlink_init_regions(pf);
4530 
4531 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4532 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4533 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4534 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4535 	i = 0;
4536 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4537 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4538 			pf->hw.tnl.valid_count[TNL_VXLAN];
4539 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4540 			UDP_TUNNEL_TYPE_VXLAN;
4541 		i++;
4542 	}
4543 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4544 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4545 			pf->hw.tnl.valid_count[TNL_GENEVE];
4546 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4547 			UDP_TUNNEL_TYPE_GENEVE;
4548 		i++;
4549 	}
4550 
4551 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4552 	if (!pf->num_alloc_vsi) {
4553 		err = -EIO;
4554 		goto err_init_pf_unroll;
4555 	}
4556 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4557 		dev_warn(&pf->pdev->dev,
4558 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4559 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4560 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4561 	}
4562 
4563 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4564 			       GFP_KERNEL);
4565 	if (!pf->vsi) {
4566 		err = -ENOMEM;
4567 		goto err_init_pf_unroll;
4568 	}
4569 
4570 	err = ice_init_interrupt_scheme(pf);
4571 	if (err) {
4572 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4573 		err = -EIO;
4574 		goto err_init_vsi_unroll;
4575 	}
4576 
4577 	/* In case of MSIX we are going to setup the misc vector right here
4578 	 * to handle admin queue events etc. In case of legacy and MSI
4579 	 * the misc functionality and queue processing is combined in
4580 	 * the same vector and that gets setup at open.
4581 	 */
4582 	err = ice_req_irq_msix_misc(pf);
4583 	if (err) {
4584 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4585 		goto err_init_interrupt_unroll;
4586 	}
4587 
4588 	/* create switch struct for the switch element created by FW on boot */
4589 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4590 	if (!pf->first_sw) {
4591 		err = -ENOMEM;
4592 		goto err_msix_misc_unroll;
4593 	}
4594 
4595 	if (hw->evb_veb)
4596 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4597 	else
4598 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4599 
4600 	pf->first_sw->pf = pf;
4601 
4602 	/* record the sw_id available for later use */
4603 	pf->first_sw->sw_id = hw->port_info->sw_id;
4604 
4605 	err = ice_setup_pf_sw(pf);
4606 	if (err) {
4607 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4608 		goto err_alloc_sw_unroll;
4609 	}
4610 
4611 	clear_bit(ICE_SERVICE_DIS, pf->state);
4612 
4613 	/* tell the firmware we are up */
4614 	err = ice_send_version(pf);
4615 	if (err) {
4616 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4617 			UTS_RELEASE, err);
4618 		goto err_send_version_unroll;
4619 	}
4620 
4621 	/* since everything is good, start the service timer */
4622 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4623 
4624 	err = ice_init_link_events(pf->hw.port_info);
4625 	if (err) {
4626 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4627 		goto err_send_version_unroll;
4628 	}
4629 
4630 	/* not a fatal error if this fails */
4631 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4632 	if (err)
4633 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4634 
4635 	/* not a fatal error if this fails */
4636 	err = ice_update_link_info(pf->hw.port_info);
4637 	if (err)
4638 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4639 
4640 	ice_init_link_dflt_override(pf->hw.port_info);
4641 
4642 	ice_check_link_cfg_err(pf,
4643 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4644 
4645 	/* if media available, initialize PHY settings */
4646 	if (pf->hw.port_info->phy.link_info.link_info &
4647 	    ICE_AQ_MEDIA_AVAILABLE) {
4648 		/* not a fatal error if this fails */
4649 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4650 		if (err)
4651 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4652 
4653 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4654 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4655 
4656 			if (vsi)
4657 				ice_configure_phy(vsi);
4658 		}
4659 	} else {
4660 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4661 	}
4662 
4663 	ice_verify_cacheline_size(pf);
4664 
4665 	/* Save wakeup reason register for later use */
4666 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4667 
4668 	/* check for a power management event */
4669 	ice_print_wake_reason(pf);
4670 
4671 	/* clear wake status, all bits */
4672 	wr32(hw, PFPM_WUS, U32_MAX);
4673 
4674 	/* Disable WoL at init, wait for user to enable */
4675 	device_set_wakeup_enable(dev, false);
4676 
4677 	if (ice_is_safe_mode(pf)) {
4678 		ice_set_safe_mode_vlan_cfg(pf);
4679 		goto probe_done;
4680 	}
4681 
4682 	/* initialize DDP driven features */
4683 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4684 		ice_ptp_init(pf);
4685 
4686 	/* Note: Flow director init failure is non-fatal to load */
4687 	if (ice_init_fdir(pf))
4688 		dev_err(dev, "could not initialize flow director\n");
4689 
4690 	/* Note: DCB init failure is non-fatal to load */
4691 	if (ice_init_pf_dcb(pf, false)) {
4692 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4693 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4694 	} else {
4695 		ice_cfg_lldp_mib_change(&pf->hw, true);
4696 	}
4697 
4698 	if (ice_init_lag(pf))
4699 		dev_warn(dev, "Failed to init link aggregation support\n");
4700 
4701 	/* print PCI link speed and width */
4702 	pcie_print_link_status(pf->pdev);
4703 
4704 probe_done:
4705 	err = ice_register_netdev(pf);
4706 	if (err)
4707 		goto err_netdev_reg;
4708 
4709 	err = ice_devlink_register_params(pf);
4710 	if (err)
4711 		goto err_netdev_reg;
4712 
4713 	/* ready to go, so clear down state bit */
4714 	clear_bit(ICE_DOWN, pf->state);
4715 	if (ice_is_aux_ena(pf)) {
4716 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4717 		if (pf->aux_idx < 0) {
4718 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4719 			err = -ENOMEM;
4720 			goto err_devlink_reg_param;
4721 		}
4722 
4723 		err = ice_init_rdma(pf);
4724 		if (err) {
4725 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4726 			err = -EIO;
4727 			goto err_init_aux_unroll;
4728 		}
4729 	} else {
4730 		dev_warn(dev, "RDMA is not supported on this device\n");
4731 	}
4732 
4733 	ice_devlink_register(pf);
4734 	return 0;
4735 
4736 err_init_aux_unroll:
4737 	pf->adev = NULL;
4738 	ida_free(&ice_aux_ida, pf->aux_idx);
4739 err_devlink_reg_param:
4740 	ice_devlink_unregister_params(pf);
4741 err_netdev_reg:
4742 err_send_version_unroll:
4743 	ice_vsi_release_all(pf);
4744 err_alloc_sw_unroll:
4745 	set_bit(ICE_SERVICE_DIS, pf->state);
4746 	set_bit(ICE_DOWN, pf->state);
4747 	devm_kfree(dev, pf->first_sw);
4748 err_msix_misc_unroll:
4749 	ice_free_irq_msix_misc(pf);
4750 err_init_interrupt_unroll:
4751 	ice_clear_interrupt_scheme(pf);
4752 err_init_vsi_unroll:
4753 	devm_kfree(dev, pf->vsi);
4754 err_init_pf_unroll:
4755 	ice_deinit_pf(pf);
4756 	ice_devlink_destroy_regions(pf);
4757 	ice_deinit_hw(hw);
4758 err_exit_unroll:
4759 	pci_disable_pcie_error_reporting(pdev);
4760 	pci_disable_device(pdev);
4761 	return err;
4762 }
4763 
4764 /**
4765  * ice_set_wake - enable or disable Wake on LAN
4766  * @pf: pointer to the PF struct
4767  *
4768  * Simple helper for WoL control
4769  */
4770 static void ice_set_wake(struct ice_pf *pf)
4771 {
4772 	struct ice_hw *hw = &pf->hw;
4773 	bool wol = pf->wol_ena;
4774 
4775 	/* clear wake state, otherwise new wake events won't fire */
4776 	wr32(hw, PFPM_WUS, U32_MAX);
4777 
4778 	/* enable / disable APM wake up, no RMW needed */
4779 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4780 
4781 	/* set magic packet filter enabled */
4782 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4783 }
4784 
4785 /**
4786  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4787  * @pf: pointer to the PF struct
4788  *
4789  * Issue firmware command to enable multicast magic wake, making
4790  * sure that any locally administered address (LAA) is used for
4791  * wake, and that PF reset doesn't undo the LAA.
4792  */
4793 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4794 {
4795 	struct device *dev = ice_pf_to_dev(pf);
4796 	struct ice_hw *hw = &pf->hw;
4797 	u8 mac_addr[ETH_ALEN];
4798 	struct ice_vsi *vsi;
4799 	int status;
4800 	u8 flags;
4801 
4802 	if (!pf->wol_ena)
4803 		return;
4804 
4805 	vsi = ice_get_main_vsi(pf);
4806 	if (!vsi)
4807 		return;
4808 
4809 	/* Get current MAC address in case it's an LAA */
4810 	if (vsi->netdev)
4811 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4812 	else
4813 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4814 
4815 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4816 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4817 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4818 
4819 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4820 	if (status)
4821 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
4822 			status, ice_aq_str(hw->adminq.sq_last_status));
4823 }
4824 
4825 /**
4826  * ice_remove - Device removal routine
4827  * @pdev: PCI device information struct
4828  */
4829 static void ice_remove(struct pci_dev *pdev)
4830 {
4831 	struct ice_pf *pf = pci_get_drvdata(pdev);
4832 	int i;
4833 
4834 	ice_devlink_unregister(pf);
4835 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4836 		if (!ice_is_reset_in_progress(pf->state))
4837 			break;
4838 		msleep(100);
4839 	}
4840 
4841 	ice_tc_indir_block_remove(pf);
4842 
4843 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4844 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4845 		ice_free_vfs(pf);
4846 	}
4847 
4848 	ice_service_task_stop(pf);
4849 
4850 	ice_aq_cancel_waiting_tasks(pf);
4851 	ice_unplug_aux_dev(pf);
4852 	if (pf->aux_idx >= 0)
4853 		ida_free(&ice_aux_ida, pf->aux_idx);
4854 	ice_devlink_unregister_params(pf);
4855 	set_bit(ICE_DOWN, pf->state);
4856 
4857 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4858 	ice_deinit_lag(pf);
4859 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4860 		ice_ptp_release(pf);
4861 	if (!ice_is_safe_mode(pf))
4862 		ice_remove_arfs(pf);
4863 	ice_setup_mc_magic_wake(pf);
4864 	ice_vsi_release_all(pf);
4865 	ice_set_wake(pf);
4866 	ice_free_irq_msix_misc(pf);
4867 	ice_for_each_vsi(pf, i) {
4868 		if (!pf->vsi[i])
4869 			continue;
4870 		ice_vsi_free_q_vectors(pf->vsi[i]);
4871 	}
4872 	ice_deinit_pf(pf);
4873 	ice_devlink_destroy_regions(pf);
4874 	ice_deinit_hw(&pf->hw);
4875 
4876 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4877 	 * do it via ice_schedule_reset() since there is no need to rebuild
4878 	 * and the service task is already stopped.
4879 	 */
4880 	ice_reset(&pf->hw, ICE_RESET_PFR);
4881 	pci_wait_for_pending_transaction(pdev);
4882 	ice_clear_interrupt_scheme(pf);
4883 	pci_disable_pcie_error_reporting(pdev);
4884 	pci_disable_device(pdev);
4885 }
4886 
4887 /**
4888  * ice_shutdown - PCI callback for shutting down device
4889  * @pdev: PCI device information struct
4890  */
4891 static void ice_shutdown(struct pci_dev *pdev)
4892 {
4893 	struct ice_pf *pf = pci_get_drvdata(pdev);
4894 
4895 	ice_remove(pdev);
4896 
4897 	if (system_state == SYSTEM_POWER_OFF) {
4898 		pci_wake_from_d3(pdev, pf->wol_ena);
4899 		pci_set_power_state(pdev, PCI_D3hot);
4900 	}
4901 }
4902 
4903 #ifdef CONFIG_PM
4904 /**
4905  * ice_prepare_for_shutdown - prep for PCI shutdown
4906  * @pf: board private structure
4907  *
4908  * Inform or close all dependent features in prep for PCI device shutdown
4909  */
4910 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4911 {
4912 	struct ice_hw *hw = &pf->hw;
4913 	u32 v;
4914 
4915 	/* Notify VFs of impending reset */
4916 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4917 		ice_vc_notify_reset(pf);
4918 
4919 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4920 
4921 	/* disable the VSIs and their queues that are not already DOWN */
4922 	ice_pf_dis_all_vsi(pf, false);
4923 
4924 	ice_for_each_vsi(pf, v)
4925 		if (pf->vsi[v])
4926 			pf->vsi[v]->vsi_num = 0;
4927 
4928 	ice_shutdown_all_ctrlq(hw);
4929 }
4930 
4931 /**
4932  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4933  * @pf: board private structure to reinitialize
4934  *
4935  * This routine reinitialize interrupt scheme that was cleared during
4936  * power management suspend callback.
4937  *
4938  * This should be called during resume routine to re-allocate the q_vectors
4939  * and reacquire interrupts.
4940  */
4941 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4942 {
4943 	struct device *dev = ice_pf_to_dev(pf);
4944 	int ret, v;
4945 
4946 	/* Since we clear MSIX flag during suspend, we need to
4947 	 * set it back during resume...
4948 	 */
4949 
4950 	ret = ice_init_interrupt_scheme(pf);
4951 	if (ret) {
4952 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4953 		return ret;
4954 	}
4955 
4956 	/* Remap vectors and rings, after successful re-init interrupts */
4957 	ice_for_each_vsi(pf, v) {
4958 		if (!pf->vsi[v])
4959 			continue;
4960 
4961 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4962 		if (ret)
4963 			goto err_reinit;
4964 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4965 	}
4966 
4967 	ret = ice_req_irq_msix_misc(pf);
4968 	if (ret) {
4969 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4970 			ret);
4971 		goto err_reinit;
4972 	}
4973 
4974 	return 0;
4975 
4976 err_reinit:
4977 	while (v--)
4978 		if (pf->vsi[v])
4979 			ice_vsi_free_q_vectors(pf->vsi[v]);
4980 
4981 	return ret;
4982 }
4983 
4984 /**
4985  * ice_suspend
4986  * @dev: generic device information structure
4987  *
4988  * Power Management callback to quiesce the device and prepare
4989  * for D3 transition.
4990  */
4991 static int __maybe_unused ice_suspend(struct device *dev)
4992 {
4993 	struct pci_dev *pdev = to_pci_dev(dev);
4994 	struct ice_pf *pf;
4995 	int disabled, v;
4996 
4997 	pf = pci_get_drvdata(pdev);
4998 
4999 	if (!ice_pf_state_is_nominal(pf)) {
5000 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5001 		return -EBUSY;
5002 	}
5003 
5004 	/* Stop watchdog tasks until resume completion.
5005 	 * Even though it is most likely that the service task is
5006 	 * disabled if the device is suspended or down, the service task's
5007 	 * state is controlled by a different state bit, and we should
5008 	 * store and honor whatever state that bit is in at this point.
5009 	 */
5010 	disabled = ice_service_task_stop(pf);
5011 
5012 	ice_unplug_aux_dev(pf);
5013 
5014 	/* Already suspended?, then there is nothing to do */
5015 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5016 		if (!disabled)
5017 			ice_service_task_restart(pf);
5018 		return 0;
5019 	}
5020 
5021 	if (test_bit(ICE_DOWN, pf->state) ||
5022 	    ice_is_reset_in_progress(pf->state)) {
5023 		dev_err(dev, "can't suspend device in reset or already down\n");
5024 		if (!disabled)
5025 			ice_service_task_restart(pf);
5026 		return 0;
5027 	}
5028 
5029 	ice_setup_mc_magic_wake(pf);
5030 
5031 	ice_prepare_for_shutdown(pf);
5032 
5033 	ice_set_wake(pf);
5034 
5035 	/* Free vectors, clear the interrupt scheme and release IRQs
5036 	 * for proper hibernation, especially with large number of CPUs.
5037 	 * Otherwise hibernation might fail when mapping all the vectors back
5038 	 * to CPU0.
5039 	 */
5040 	ice_free_irq_msix_misc(pf);
5041 	ice_for_each_vsi(pf, v) {
5042 		if (!pf->vsi[v])
5043 			continue;
5044 		ice_vsi_free_q_vectors(pf->vsi[v]);
5045 	}
5046 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
5047 	ice_clear_interrupt_scheme(pf);
5048 
5049 	pci_save_state(pdev);
5050 	pci_wake_from_d3(pdev, pf->wol_ena);
5051 	pci_set_power_state(pdev, PCI_D3hot);
5052 	return 0;
5053 }
5054 
5055 /**
5056  * ice_resume - PM callback for waking up from D3
5057  * @dev: generic device information structure
5058  */
5059 static int __maybe_unused ice_resume(struct device *dev)
5060 {
5061 	struct pci_dev *pdev = to_pci_dev(dev);
5062 	enum ice_reset_req reset_type;
5063 	struct ice_pf *pf;
5064 	struct ice_hw *hw;
5065 	int ret;
5066 
5067 	pci_set_power_state(pdev, PCI_D0);
5068 	pci_restore_state(pdev);
5069 	pci_save_state(pdev);
5070 
5071 	if (!pci_device_is_present(pdev))
5072 		return -ENODEV;
5073 
5074 	ret = pci_enable_device_mem(pdev);
5075 	if (ret) {
5076 		dev_err(dev, "Cannot enable device after suspend\n");
5077 		return ret;
5078 	}
5079 
5080 	pf = pci_get_drvdata(pdev);
5081 	hw = &pf->hw;
5082 
5083 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5084 	ice_print_wake_reason(pf);
5085 
5086 	/* We cleared the interrupt scheme when we suspended, so we need to
5087 	 * restore it now to resume device functionality.
5088 	 */
5089 	ret = ice_reinit_interrupt_scheme(pf);
5090 	if (ret)
5091 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5092 
5093 	clear_bit(ICE_DOWN, pf->state);
5094 	/* Now perform PF reset and rebuild */
5095 	reset_type = ICE_RESET_PFR;
5096 	/* re-enable service task for reset, but allow reset to schedule it */
5097 	clear_bit(ICE_SERVICE_DIS, pf->state);
5098 
5099 	if (ice_schedule_reset(pf, reset_type))
5100 		dev_err(dev, "Reset during resume failed.\n");
5101 
5102 	clear_bit(ICE_SUSPENDED, pf->state);
5103 	ice_service_task_restart(pf);
5104 
5105 	/* Restart the service task */
5106 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5107 
5108 	return 0;
5109 }
5110 #endif /* CONFIG_PM */
5111 
5112 /**
5113  * ice_pci_err_detected - warning that PCI error has been detected
5114  * @pdev: PCI device information struct
5115  * @err: the type of PCI error
5116  *
5117  * Called to warn that something happened on the PCI bus and the error handling
5118  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5119  */
5120 static pci_ers_result_t
5121 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5122 {
5123 	struct ice_pf *pf = pci_get_drvdata(pdev);
5124 
5125 	if (!pf) {
5126 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5127 			__func__, err);
5128 		return PCI_ERS_RESULT_DISCONNECT;
5129 	}
5130 
5131 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5132 		ice_service_task_stop(pf);
5133 
5134 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5135 			set_bit(ICE_PFR_REQ, pf->state);
5136 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5137 		}
5138 	}
5139 
5140 	return PCI_ERS_RESULT_NEED_RESET;
5141 }
5142 
5143 /**
5144  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5145  * @pdev: PCI device information struct
5146  *
5147  * Called to determine if the driver can recover from the PCI slot reset by
5148  * using a register read to determine if the device is recoverable.
5149  */
5150 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5151 {
5152 	struct ice_pf *pf = pci_get_drvdata(pdev);
5153 	pci_ers_result_t result;
5154 	int err;
5155 	u32 reg;
5156 
5157 	err = pci_enable_device_mem(pdev);
5158 	if (err) {
5159 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5160 			err);
5161 		result = PCI_ERS_RESULT_DISCONNECT;
5162 	} else {
5163 		pci_set_master(pdev);
5164 		pci_restore_state(pdev);
5165 		pci_save_state(pdev);
5166 		pci_wake_from_d3(pdev, false);
5167 
5168 		/* Check for life */
5169 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5170 		if (!reg)
5171 			result = PCI_ERS_RESULT_RECOVERED;
5172 		else
5173 			result = PCI_ERS_RESULT_DISCONNECT;
5174 	}
5175 
5176 	err = pci_aer_clear_nonfatal_status(pdev);
5177 	if (err)
5178 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
5179 			err);
5180 		/* non-fatal, continue */
5181 
5182 	return result;
5183 }
5184 
5185 /**
5186  * ice_pci_err_resume - restart operations after PCI error recovery
5187  * @pdev: PCI device information struct
5188  *
5189  * Called to allow the driver to bring things back up after PCI error and/or
5190  * reset recovery have finished
5191  */
5192 static void ice_pci_err_resume(struct pci_dev *pdev)
5193 {
5194 	struct ice_pf *pf = pci_get_drvdata(pdev);
5195 
5196 	if (!pf) {
5197 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5198 			__func__);
5199 		return;
5200 	}
5201 
5202 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5203 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5204 			__func__);
5205 		return;
5206 	}
5207 
5208 	ice_restore_all_vfs_msi_state(pdev);
5209 
5210 	ice_do_reset(pf, ICE_RESET_PFR);
5211 	ice_service_task_restart(pf);
5212 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5213 }
5214 
5215 /**
5216  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5217  * @pdev: PCI device information struct
5218  */
5219 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5220 {
5221 	struct ice_pf *pf = pci_get_drvdata(pdev);
5222 
5223 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5224 		ice_service_task_stop(pf);
5225 
5226 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5227 			set_bit(ICE_PFR_REQ, pf->state);
5228 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5229 		}
5230 	}
5231 }
5232 
5233 /**
5234  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5235  * @pdev: PCI device information struct
5236  */
5237 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5238 {
5239 	ice_pci_err_resume(pdev);
5240 }
5241 
5242 /* ice_pci_tbl - PCI Device ID Table
5243  *
5244  * Wildcard entries (PCI_ANY_ID) should come last
5245  * Last entry must be all 0s
5246  *
5247  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5248  *   Class, Class Mask, private data (not used) }
5249  */
5250 static const struct pci_device_id ice_pci_tbl[] = {
5251 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5252 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5253 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5254 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5255 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5256 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5257 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5258 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5259 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5260 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5261 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5262 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5263 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5264 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5265 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5266 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5267 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5268 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5269 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5270 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5271 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5272 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5273 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5274 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5275 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5276 	/* required last entry */
5277 	{ 0, }
5278 };
5279 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5280 
5281 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5282 
5283 static const struct pci_error_handlers ice_pci_err_handler = {
5284 	.error_detected = ice_pci_err_detected,
5285 	.slot_reset = ice_pci_err_slot_reset,
5286 	.reset_prepare = ice_pci_err_reset_prepare,
5287 	.reset_done = ice_pci_err_reset_done,
5288 	.resume = ice_pci_err_resume
5289 };
5290 
5291 static struct pci_driver ice_driver = {
5292 	.name = KBUILD_MODNAME,
5293 	.id_table = ice_pci_tbl,
5294 	.probe = ice_probe,
5295 	.remove = ice_remove,
5296 #ifdef CONFIG_PM
5297 	.driver.pm = &ice_pm_ops,
5298 #endif /* CONFIG_PM */
5299 	.shutdown = ice_shutdown,
5300 	.sriov_configure = ice_sriov_configure,
5301 	.err_handler = &ice_pci_err_handler
5302 };
5303 
5304 /**
5305  * ice_module_init - Driver registration routine
5306  *
5307  * ice_module_init is the first routine called when the driver is
5308  * loaded. All it does is register with the PCI subsystem.
5309  */
5310 static int __init ice_module_init(void)
5311 {
5312 	int status;
5313 
5314 	pr_info("%s\n", ice_driver_string);
5315 	pr_info("%s\n", ice_copyright);
5316 
5317 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5318 	if (!ice_wq) {
5319 		pr_err("Failed to create workqueue\n");
5320 		return -ENOMEM;
5321 	}
5322 
5323 	status = pci_register_driver(&ice_driver);
5324 	if (status) {
5325 		pr_err("failed to register PCI driver, err %d\n", status);
5326 		destroy_workqueue(ice_wq);
5327 	}
5328 
5329 	return status;
5330 }
5331 module_init(ice_module_init);
5332 
5333 /**
5334  * ice_module_exit - Driver exit cleanup routine
5335  *
5336  * ice_module_exit is called just before the driver is removed
5337  * from memory.
5338  */
5339 static void __exit ice_module_exit(void)
5340 {
5341 	pci_unregister_driver(&ice_driver);
5342 	destroy_workqueue(ice_wq);
5343 	pr_info("module unloaded\n");
5344 }
5345 module_exit(ice_module_exit);
5346 
5347 /**
5348  * ice_set_mac_address - NDO callback to set MAC address
5349  * @netdev: network interface device structure
5350  * @pi: pointer to an address structure
5351  *
5352  * Returns 0 on success, negative on failure
5353  */
5354 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5355 {
5356 	struct ice_netdev_priv *np = netdev_priv(netdev);
5357 	struct ice_vsi *vsi = np->vsi;
5358 	struct ice_pf *pf = vsi->back;
5359 	struct ice_hw *hw = &pf->hw;
5360 	struct sockaddr *addr = pi;
5361 	u8 old_mac[ETH_ALEN];
5362 	u8 flags = 0;
5363 	u8 *mac;
5364 	int err;
5365 
5366 	mac = (u8 *)addr->sa_data;
5367 
5368 	if (!is_valid_ether_addr(mac))
5369 		return -EADDRNOTAVAIL;
5370 
5371 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5372 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5373 		return 0;
5374 	}
5375 
5376 	if (test_bit(ICE_DOWN, pf->state) ||
5377 	    ice_is_reset_in_progress(pf->state)) {
5378 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5379 			   mac);
5380 		return -EBUSY;
5381 	}
5382 
5383 	if (ice_chnl_dmac_fltr_cnt(pf)) {
5384 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5385 			   mac);
5386 		return -EAGAIN;
5387 	}
5388 
5389 	netif_addr_lock_bh(netdev);
5390 	ether_addr_copy(old_mac, netdev->dev_addr);
5391 	/* change the netdev's MAC address */
5392 	eth_hw_addr_set(netdev, mac);
5393 	netif_addr_unlock_bh(netdev);
5394 
5395 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5396 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5397 	if (err && err != -ENOENT) {
5398 		err = -EADDRNOTAVAIL;
5399 		goto err_update_filters;
5400 	}
5401 
5402 	/* Add filter for new MAC. If filter exists, return success */
5403 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5404 	if (err == -EEXIST)
5405 		/* Although this MAC filter is already present in hardware it's
5406 		 * possible in some cases (e.g. bonding) that dev_addr was
5407 		 * modified outside of the driver and needs to be restored back
5408 		 * to this value.
5409 		 */
5410 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5411 	else if (err)
5412 		/* error if the new filter addition failed */
5413 		err = -EADDRNOTAVAIL;
5414 
5415 err_update_filters:
5416 	if (err) {
5417 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5418 			   mac);
5419 		netif_addr_lock_bh(netdev);
5420 		eth_hw_addr_set(netdev, old_mac);
5421 		netif_addr_unlock_bh(netdev);
5422 		return err;
5423 	}
5424 
5425 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5426 		   netdev->dev_addr);
5427 
5428 	/* write new MAC address to the firmware */
5429 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5430 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5431 	if (err) {
5432 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5433 			   mac, err);
5434 	}
5435 	return 0;
5436 }
5437 
5438 /**
5439  * ice_set_rx_mode - NDO callback to set the netdev filters
5440  * @netdev: network interface device structure
5441  */
5442 static void ice_set_rx_mode(struct net_device *netdev)
5443 {
5444 	struct ice_netdev_priv *np = netdev_priv(netdev);
5445 	struct ice_vsi *vsi = np->vsi;
5446 
5447 	if (!vsi)
5448 		return;
5449 
5450 	/* Set the flags to synchronize filters
5451 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5452 	 * flags
5453 	 */
5454 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5455 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5456 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5457 
5458 	/* schedule our worker thread which will take care of
5459 	 * applying the new filter changes
5460 	 */
5461 	ice_service_task_schedule(vsi->back);
5462 }
5463 
5464 /**
5465  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5466  * @netdev: network interface device structure
5467  * @queue_index: Queue ID
5468  * @maxrate: maximum bandwidth in Mbps
5469  */
5470 static int
5471 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5472 {
5473 	struct ice_netdev_priv *np = netdev_priv(netdev);
5474 	struct ice_vsi *vsi = np->vsi;
5475 	u16 q_handle;
5476 	int status;
5477 	u8 tc;
5478 
5479 	/* Validate maxrate requested is within permitted range */
5480 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5481 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5482 			   maxrate, queue_index);
5483 		return -EINVAL;
5484 	}
5485 
5486 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5487 	tc = ice_dcb_get_tc(vsi, queue_index);
5488 
5489 	/* Set BW back to default, when user set maxrate to 0 */
5490 	if (!maxrate)
5491 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5492 					       q_handle, ICE_MAX_BW);
5493 	else
5494 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5495 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5496 	if (status)
5497 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5498 			   status);
5499 
5500 	return status;
5501 }
5502 
5503 /**
5504  * ice_fdb_add - add an entry to the hardware database
5505  * @ndm: the input from the stack
5506  * @tb: pointer to array of nladdr (unused)
5507  * @dev: the net device pointer
5508  * @addr: the MAC address entry being added
5509  * @vid: VLAN ID
5510  * @flags: instructions from stack about fdb operation
5511  * @extack: netlink extended ack
5512  */
5513 static int
5514 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5515 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5516 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5517 {
5518 	int err;
5519 
5520 	if (vid) {
5521 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5522 		return -EINVAL;
5523 	}
5524 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5525 		netdev_err(dev, "FDB only supports static addresses\n");
5526 		return -EINVAL;
5527 	}
5528 
5529 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5530 		err = dev_uc_add_excl(dev, addr);
5531 	else if (is_multicast_ether_addr(addr))
5532 		err = dev_mc_add_excl(dev, addr);
5533 	else
5534 		err = -EINVAL;
5535 
5536 	/* Only return duplicate errors if NLM_F_EXCL is set */
5537 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5538 		err = 0;
5539 
5540 	return err;
5541 }
5542 
5543 /**
5544  * ice_fdb_del - delete an entry from the hardware database
5545  * @ndm: the input from the stack
5546  * @tb: pointer to array of nladdr (unused)
5547  * @dev: the net device pointer
5548  * @addr: the MAC address entry being added
5549  * @vid: VLAN ID
5550  */
5551 static int
5552 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5553 	    struct net_device *dev, const unsigned char *addr,
5554 	    __always_unused u16 vid)
5555 {
5556 	int err;
5557 
5558 	if (ndm->ndm_state & NUD_PERMANENT) {
5559 		netdev_err(dev, "FDB only supports static addresses\n");
5560 		return -EINVAL;
5561 	}
5562 
5563 	if (is_unicast_ether_addr(addr))
5564 		err = dev_uc_del(dev, addr);
5565 	else if (is_multicast_ether_addr(addr))
5566 		err = dev_mc_del(dev, addr);
5567 	else
5568 		err = -EINVAL;
5569 
5570 	return err;
5571 }
5572 
5573 /**
5574  * ice_set_features - set the netdev feature flags
5575  * @netdev: ptr to the netdev being adjusted
5576  * @features: the feature set that the stack is suggesting
5577  */
5578 static int
5579 ice_set_features(struct net_device *netdev, netdev_features_t features)
5580 {
5581 	struct ice_netdev_priv *np = netdev_priv(netdev);
5582 	struct ice_vsi *vsi = np->vsi;
5583 	struct ice_pf *pf = vsi->back;
5584 	int ret = 0;
5585 
5586 	/* Don't set any netdev advanced features with device in Safe Mode */
5587 	if (ice_is_safe_mode(vsi->back)) {
5588 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5589 		return ret;
5590 	}
5591 
5592 	/* Do not change setting during reset */
5593 	if (ice_is_reset_in_progress(pf->state)) {
5594 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5595 		return -EBUSY;
5596 	}
5597 
5598 	/* Multiple features can be changed in one call so keep features in
5599 	 * separate if/else statements to guarantee each feature is checked
5600 	 */
5601 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5602 		ice_vsi_manage_rss_lut(vsi, true);
5603 	else if (!(features & NETIF_F_RXHASH) &&
5604 		 netdev->features & NETIF_F_RXHASH)
5605 		ice_vsi_manage_rss_lut(vsi, false);
5606 
5607 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5608 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5609 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5610 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5611 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5612 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5613 
5614 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5615 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5616 		ret = ice_vsi_manage_vlan_insertion(vsi);
5617 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5618 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5619 		ret = ice_vsi_manage_vlan_insertion(vsi);
5620 
5621 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5622 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5623 		ret = ice_cfg_vlan_pruning(vsi, true);
5624 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5625 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5626 		ret = ice_cfg_vlan_pruning(vsi, false);
5627 
5628 	if ((features & NETIF_F_NTUPLE) &&
5629 	    !(netdev->features & NETIF_F_NTUPLE)) {
5630 		ice_vsi_manage_fdir(vsi, true);
5631 		ice_init_arfs(vsi);
5632 	} else if (!(features & NETIF_F_NTUPLE) &&
5633 		 (netdev->features & NETIF_F_NTUPLE)) {
5634 		ice_vsi_manage_fdir(vsi, false);
5635 		ice_clear_arfs(vsi);
5636 	}
5637 
5638 	/* don't turn off hw_tc_offload when ADQ is already enabled */
5639 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
5640 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
5641 		return -EACCES;
5642 	}
5643 
5644 	if ((features & NETIF_F_HW_TC) &&
5645 	    !(netdev->features & NETIF_F_HW_TC))
5646 		set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
5647 	else
5648 		clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
5649 
5650 	return ret;
5651 }
5652 
5653 /**
5654  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5655  * @vsi: VSI to setup VLAN properties for
5656  */
5657 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5658 {
5659 	int ret = 0;
5660 
5661 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5662 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5663 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5664 		ret = ice_vsi_manage_vlan_insertion(vsi);
5665 
5666 	return ret;
5667 }
5668 
5669 /**
5670  * ice_vsi_cfg - Setup the VSI
5671  * @vsi: the VSI being configured
5672  *
5673  * Return 0 on success and negative value on error
5674  */
5675 int ice_vsi_cfg(struct ice_vsi *vsi)
5676 {
5677 	int err;
5678 
5679 	if (vsi->netdev) {
5680 		ice_set_rx_mode(vsi->netdev);
5681 
5682 		err = ice_vsi_vlan_setup(vsi);
5683 
5684 		if (err)
5685 			return err;
5686 	}
5687 	ice_vsi_cfg_dcb_rings(vsi);
5688 
5689 	err = ice_vsi_cfg_lan_txqs(vsi);
5690 	if (!err && ice_is_xdp_ena_vsi(vsi))
5691 		err = ice_vsi_cfg_xdp_txqs(vsi);
5692 	if (!err)
5693 		err = ice_vsi_cfg_rxqs(vsi);
5694 
5695 	return err;
5696 }
5697 
5698 /* THEORY OF MODERATION:
5699  * The ice driver hardware works differently than the hardware that DIMLIB was
5700  * originally made for. ice hardware doesn't have packet count limits that
5701  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5702  * which is hard-coded to a limit of 250,000 ints/second.
5703  * If not using dynamic moderation, the INTRL value can be modified
5704  * by ethtool rx-usecs-high.
5705  */
5706 struct ice_dim {
5707 	/* the throttle rate for interrupts, basically worst case delay before
5708 	 * an initial interrupt fires, value is stored in microseconds.
5709 	 */
5710 	u16 itr;
5711 };
5712 
5713 /* Make a different profile for Rx that doesn't allow quite so aggressive
5714  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
5715  * second.
5716  */
5717 static const struct ice_dim rx_profile[] = {
5718 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
5719 	{8},    /* 125,000 ints/s */
5720 	{16},   /*  62,500 ints/s */
5721 	{62},   /*  16,129 ints/s */
5722 	{126}   /*   7,936 ints/s */
5723 };
5724 
5725 /* The transmit profile, which has the same sorts of values
5726  * as the previous struct
5727  */
5728 static const struct ice_dim tx_profile[] = {
5729 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
5730 	{8},    /* 125,000 ints/s */
5731 	{40},   /*  16,125 ints/s */
5732 	{128},  /*   7,812 ints/s */
5733 	{256}   /*   3,906 ints/s */
5734 };
5735 
5736 static void ice_tx_dim_work(struct work_struct *work)
5737 {
5738 	struct ice_ring_container *rc;
5739 	struct dim *dim;
5740 	u16 itr;
5741 
5742 	dim = container_of(work, struct dim, work);
5743 	rc = (struct ice_ring_container *)dim->priv;
5744 
5745 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
5746 
5747 	/* look up the values in our local table */
5748 	itr = tx_profile[dim->profile_ix].itr;
5749 
5750 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
5751 	ice_write_itr(rc, itr);
5752 
5753 	dim->state = DIM_START_MEASURE;
5754 }
5755 
5756 static void ice_rx_dim_work(struct work_struct *work)
5757 {
5758 	struct ice_ring_container *rc;
5759 	struct dim *dim;
5760 	u16 itr;
5761 
5762 	dim = container_of(work, struct dim, work);
5763 	rc = (struct ice_ring_container *)dim->priv;
5764 
5765 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
5766 
5767 	/* look up the values in our local table */
5768 	itr = rx_profile[dim->profile_ix].itr;
5769 
5770 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
5771 	ice_write_itr(rc, itr);
5772 
5773 	dim->state = DIM_START_MEASURE;
5774 }
5775 
5776 #define ICE_DIM_DEFAULT_PROFILE_IX 1
5777 
5778 /**
5779  * ice_init_moderation - set up interrupt moderation
5780  * @q_vector: the vector containing rings to be configured
5781  *
5782  * Set up interrupt moderation registers, with the intent to do the right thing
5783  * when called from reset or from probe, and whether or not dynamic moderation
5784  * is enabled or not. Take special care to write all the registers in both
5785  * dynamic moderation mode or not in order to make sure hardware is in a known
5786  * state.
5787  */
5788 static void ice_init_moderation(struct ice_q_vector *q_vector)
5789 {
5790 	struct ice_ring_container *rc;
5791 	bool tx_dynamic, rx_dynamic;
5792 
5793 	rc = &q_vector->tx;
5794 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
5795 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5796 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
5797 	rc->dim.priv = rc;
5798 	tx_dynamic = ITR_IS_DYNAMIC(rc);
5799 
5800 	/* set the initial TX ITR to match the above */
5801 	ice_write_itr(rc, tx_dynamic ?
5802 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
5803 
5804 	rc = &q_vector->rx;
5805 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
5806 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5807 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
5808 	rc->dim.priv = rc;
5809 	rx_dynamic = ITR_IS_DYNAMIC(rc);
5810 
5811 	/* set the initial RX ITR to match the above */
5812 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
5813 				       rc->itr_setting);
5814 
5815 	ice_set_q_vector_intrl(q_vector);
5816 }
5817 
5818 /**
5819  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5820  * @vsi: the VSI being configured
5821  */
5822 static void ice_napi_enable_all(struct ice_vsi *vsi)
5823 {
5824 	int q_idx;
5825 
5826 	if (!vsi->netdev)
5827 		return;
5828 
5829 	ice_for_each_q_vector(vsi, q_idx) {
5830 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5831 
5832 		ice_init_moderation(q_vector);
5833 
5834 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
5835 			napi_enable(&q_vector->napi);
5836 	}
5837 }
5838 
5839 /**
5840  * ice_up_complete - Finish the last steps of bringing up a connection
5841  * @vsi: The VSI being configured
5842  *
5843  * Return 0 on success and negative value on error
5844  */
5845 static int ice_up_complete(struct ice_vsi *vsi)
5846 {
5847 	struct ice_pf *pf = vsi->back;
5848 	int err;
5849 
5850 	ice_vsi_cfg_msix(vsi);
5851 
5852 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5853 	 * Tx queue group list was configured and the context bits were
5854 	 * programmed using ice_vsi_cfg_txqs
5855 	 */
5856 	err = ice_vsi_start_all_rx_rings(vsi);
5857 	if (err)
5858 		return err;
5859 
5860 	clear_bit(ICE_VSI_DOWN, vsi->state);
5861 	ice_napi_enable_all(vsi);
5862 	ice_vsi_ena_irq(vsi);
5863 
5864 	if (vsi->port_info &&
5865 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5866 	    vsi->netdev) {
5867 		ice_print_link_msg(vsi, true);
5868 		netif_tx_start_all_queues(vsi->netdev);
5869 		netif_carrier_on(vsi->netdev);
5870 		if (!ice_is_e810(&pf->hw))
5871 			ice_ptp_link_change(pf, pf->hw.pf_id, true);
5872 	}
5873 
5874 	/* clear this now, and the first stats read will be used as baseline */
5875 	vsi->stat_offsets_loaded = false;
5876 
5877 	ice_service_task_schedule(pf);
5878 
5879 	return 0;
5880 }
5881 
5882 /**
5883  * ice_up - Bring the connection back up after being down
5884  * @vsi: VSI being configured
5885  */
5886 int ice_up(struct ice_vsi *vsi)
5887 {
5888 	int err;
5889 
5890 	err = ice_vsi_cfg(vsi);
5891 	if (!err)
5892 		err = ice_up_complete(vsi);
5893 
5894 	return err;
5895 }
5896 
5897 /**
5898  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5899  * @syncp: pointer to u64_stats_sync
5900  * @stats: stats that pkts and bytes count will be taken from
5901  * @pkts: packets stats counter
5902  * @bytes: bytes stats counter
5903  *
5904  * This function fetches stats from the ring considering the atomic operations
5905  * that needs to be performed to read u64 values in 32 bit machine.
5906  */
5907 static void
5908 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
5909 			     u64 *pkts, u64 *bytes)
5910 {
5911 	unsigned int start;
5912 
5913 	do {
5914 		start = u64_stats_fetch_begin_irq(syncp);
5915 		*pkts = stats.pkts;
5916 		*bytes = stats.bytes;
5917 	} while (u64_stats_fetch_retry_irq(syncp, start));
5918 }
5919 
5920 /**
5921  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5922  * @vsi: the VSI to be updated
5923  * @vsi_stats: the stats struct to be updated
5924  * @rings: rings to work on
5925  * @count: number of rings
5926  */
5927 static void
5928 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
5929 			     struct rtnl_link_stats64 *vsi_stats,
5930 			     struct ice_tx_ring **rings, u16 count)
5931 {
5932 	u16 i;
5933 
5934 	for (i = 0; i < count; i++) {
5935 		struct ice_tx_ring *ring;
5936 		u64 pkts = 0, bytes = 0;
5937 
5938 		ring = READ_ONCE(rings[i]);
5939 		if (ring)
5940 			ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5941 		vsi_stats->tx_packets += pkts;
5942 		vsi_stats->tx_bytes += bytes;
5943 		vsi->tx_restart += ring->tx_stats.restart_q;
5944 		vsi->tx_busy += ring->tx_stats.tx_busy;
5945 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5946 	}
5947 }
5948 
5949 /**
5950  * ice_update_vsi_ring_stats - Update VSI stats counters
5951  * @vsi: the VSI to be updated
5952  */
5953 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5954 {
5955 	struct rtnl_link_stats64 *vsi_stats;
5956 	u64 pkts, bytes;
5957 	int i;
5958 
5959 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
5960 	if (!vsi_stats)
5961 		return;
5962 
5963 	/* reset non-netdev (extended) stats */
5964 	vsi->tx_restart = 0;
5965 	vsi->tx_busy = 0;
5966 	vsi->tx_linearize = 0;
5967 	vsi->rx_buf_failed = 0;
5968 	vsi->rx_page_failed = 0;
5969 
5970 	rcu_read_lock();
5971 
5972 	/* update Tx rings counters */
5973 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
5974 				     vsi->num_txq);
5975 
5976 	/* update Rx rings counters */
5977 	ice_for_each_rxq(vsi, i) {
5978 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5979 
5980 		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5981 		vsi_stats->rx_packets += pkts;
5982 		vsi_stats->rx_bytes += bytes;
5983 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5984 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5985 	}
5986 
5987 	/* update XDP Tx rings counters */
5988 	if (ice_is_xdp_ena_vsi(vsi))
5989 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
5990 					     vsi->num_xdp_txq);
5991 
5992 	rcu_read_unlock();
5993 
5994 	vsi->net_stats.tx_packets = vsi_stats->tx_packets;
5995 	vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
5996 	vsi->net_stats.rx_packets = vsi_stats->rx_packets;
5997 	vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
5998 
5999 	kfree(vsi_stats);
6000 }
6001 
6002 /**
6003  * ice_update_vsi_stats - Update VSI stats counters
6004  * @vsi: the VSI to be updated
6005  */
6006 void ice_update_vsi_stats(struct ice_vsi *vsi)
6007 {
6008 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6009 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6010 	struct ice_pf *pf = vsi->back;
6011 
6012 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6013 	    test_bit(ICE_CFG_BUSY, pf->state))
6014 		return;
6015 
6016 	/* get stats as recorded by Tx/Rx rings */
6017 	ice_update_vsi_ring_stats(vsi);
6018 
6019 	/* get VSI stats as recorded by the hardware */
6020 	ice_update_eth_stats(vsi);
6021 
6022 	cur_ns->tx_errors = cur_es->tx_errors;
6023 	cur_ns->rx_dropped = cur_es->rx_discards;
6024 	cur_ns->tx_dropped = cur_es->tx_discards;
6025 	cur_ns->multicast = cur_es->rx_multicast;
6026 
6027 	/* update some more netdev stats if this is main VSI */
6028 	if (vsi->type == ICE_VSI_PF) {
6029 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6030 		cur_ns->rx_errors = pf->stats.crc_errors +
6031 				    pf->stats.illegal_bytes +
6032 				    pf->stats.rx_len_errors +
6033 				    pf->stats.rx_undersize +
6034 				    pf->hw_csum_rx_error +
6035 				    pf->stats.rx_jabber +
6036 				    pf->stats.rx_fragments +
6037 				    pf->stats.rx_oversize;
6038 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6039 		/* record drops from the port level */
6040 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6041 	}
6042 }
6043 
6044 /**
6045  * ice_update_pf_stats - Update PF port stats counters
6046  * @pf: PF whose stats needs to be updated
6047  */
6048 void ice_update_pf_stats(struct ice_pf *pf)
6049 {
6050 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6051 	struct ice_hw *hw = &pf->hw;
6052 	u16 fd_ctr_base;
6053 	u8 port;
6054 
6055 	port = hw->port_info->lport;
6056 	prev_ps = &pf->stats_prev;
6057 	cur_ps = &pf->stats;
6058 
6059 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6060 			  &prev_ps->eth.rx_bytes,
6061 			  &cur_ps->eth.rx_bytes);
6062 
6063 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6064 			  &prev_ps->eth.rx_unicast,
6065 			  &cur_ps->eth.rx_unicast);
6066 
6067 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6068 			  &prev_ps->eth.rx_multicast,
6069 			  &cur_ps->eth.rx_multicast);
6070 
6071 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6072 			  &prev_ps->eth.rx_broadcast,
6073 			  &cur_ps->eth.rx_broadcast);
6074 
6075 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6076 			  &prev_ps->eth.rx_discards,
6077 			  &cur_ps->eth.rx_discards);
6078 
6079 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6080 			  &prev_ps->eth.tx_bytes,
6081 			  &cur_ps->eth.tx_bytes);
6082 
6083 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6084 			  &prev_ps->eth.tx_unicast,
6085 			  &cur_ps->eth.tx_unicast);
6086 
6087 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6088 			  &prev_ps->eth.tx_multicast,
6089 			  &cur_ps->eth.tx_multicast);
6090 
6091 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6092 			  &prev_ps->eth.tx_broadcast,
6093 			  &cur_ps->eth.tx_broadcast);
6094 
6095 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6096 			  &prev_ps->tx_dropped_link_down,
6097 			  &cur_ps->tx_dropped_link_down);
6098 
6099 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6100 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6101 
6102 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6103 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6104 
6105 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6106 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6107 
6108 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6109 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6110 
6111 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6112 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6113 
6114 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6115 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6116 
6117 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6118 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6119 
6120 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6121 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6122 
6123 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6124 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6125 
6126 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6127 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6128 
6129 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6130 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6131 
6132 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6133 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6134 
6135 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6136 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6137 
6138 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6139 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6140 
6141 	fd_ctr_base = hw->fd_ctr_base;
6142 
6143 	ice_stat_update40(hw,
6144 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6145 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6146 			  &cur_ps->fd_sb_match);
6147 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6148 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6149 
6150 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6151 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6152 
6153 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6154 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6155 
6156 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6157 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6158 
6159 	ice_update_dcb_stats(pf);
6160 
6161 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6162 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6163 
6164 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6165 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6166 
6167 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6168 			  &prev_ps->mac_local_faults,
6169 			  &cur_ps->mac_local_faults);
6170 
6171 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6172 			  &prev_ps->mac_remote_faults,
6173 			  &cur_ps->mac_remote_faults);
6174 
6175 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6176 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6177 
6178 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6179 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6180 
6181 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6182 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6183 
6184 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6185 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6186 
6187 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6188 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6189 
6190 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6191 
6192 	pf->stat_prev_loaded = true;
6193 }
6194 
6195 /**
6196  * ice_get_stats64 - get statistics for network device structure
6197  * @netdev: network interface device structure
6198  * @stats: main device statistics structure
6199  */
6200 static
6201 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6202 {
6203 	struct ice_netdev_priv *np = netdev_priv(netdev);
6204 	struct rtnl_link_stats64 *vsi_stats;
6205 	struct ice_vsi *vsi = np->vsi;
6206 
6207 	vsi_stats = &vsi->net_stats;
6208 
6209 	if (!vsi->num_txq || !vsi->num_rxq)
6210 		return;
6211 
6212 	/* netdev packet/byte stats come from ring counter. These are obtained
6213 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6214 	 * But, only call the update routine and read the registers if VSI is
6215 	 * not down.
6216 	 */
6217 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6218 		ice_update_vsi_ring_stats(vsi);
6219 	stats->tx_packets = vsi_stats->tx_packets;
6220 	stats->tx_bytes = vsi_stats->tx_bytes;
6221 	stats->rx_packets = vsi_stats->rx_packets;
6222 	stats->rx_bytes = vsi_stats->rx_bytes;
6223 
6224 	/* The rest of the stats can be read from the hardware but instead we
6225 	 * just return values that the watchdog task has already obtained from
6226 	 * the hardware.
6227 	 */
6228 	stats->multicast = vsi_stats->multicast;
6229 	stats->tx_errors = vsi_stats->tx_errors;
6230 	stats->tx_dropped = vsi_stats->tx_dropped;
6231 	stats->rx_errors = vsi_stats->rx_errors;
6232 	stats->rx_dropped = vsi_stats->rx_dropped;
6233 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6234 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6235 }
6236 
6237 /**
6238  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6239  * @vsi: VSI having NAPI disabled
6240  */
6241 static void ice_napi_disable_all(struct ice_vsi *vsi)
6242 {
6243 	int q_idx;
6244 
6245 	if (!vsi->netdev)
6246 		return;
6247 
6248 	ice_for_each_q_vector(vsi, q_idx) {
6249 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6250 
6251 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6252 			napi_disable(&q_vector->napi);
6253 
6254 		cancel_work_sync(&q_vector->tx.dim.work);
6255 		cancel_work_sync(&q_vector->rx.dim.work);
6256 	}
6257 }
6258 
6259 /**
6260  * ice_down - Shutdown the connection
6261  * @vsi: The VSI being stopped
6262  *
6263  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6264  */
6265 int ice_down(struct ice_vsi *vsi)
6266 {
6267 	int i, tx_err, rx_err, link_err = 0;
6268 
6269 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6270 
6271 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6272 		if (!ice_is_e810(&vsi->back->hw))
6273 			ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6274 		netif_carrier_off(vsi->netdev);
6275 		netif_tx_disable(vsi->netdev);
6276 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6277 		ice_eswitch_stop_all_tx_queues(vsi->back);
6278 	}
6279 
6280 	ice_vsi_dis_irq(vsi);
6281 
6282 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6283 	if (tx_err)
6284 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6285 			   vsi->vsi_num, tx_err);
6286 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6287 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6288 		if (tx_err)
6289 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6290 				   vsi->vsi_num, tx_err);
6291 	}
6292 
6293 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6294 	if (rx_err)
6295 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6296 			   vsi->vsi_num, rx_err);
6297 
6298 	ice_napi_disable_all(vsi);
6299 
6300 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6301 		link_err = ice_force_phys_link_state(vsi, false);
6302 		if (link_err)
6303 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6304 				   vsi->vsi_num, link_err);
6305 	}
6306 
6307 	ice_for_each_txq(vsi, i)
6308 		ice_clean_tx_ring(vsi->tx_rings[i]);
6309 
6310 	ice_for_each_rxq(vsi, i)
6311 		ice_clean_rx_ring(vsi->rx_rings[i]);
6312 
6313 	if (tx_err || rx_err || link_err) {
6314 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6315 			   vsi->vsi_num, vsi->vsw->sw_id);
6316 		return -EIO;
6317 	}
6318 
6319 	return 0;
6320 }
6321 
6322 /**
6323  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6324  * @vsi: VSI having resources allocated
6325  *
6326  * Return 0 on success, negative on failure
6327  */
6328 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6329 {
6330 	int i, err = 0;
6331 
6332 	if (!vsi->num_txq) {
6333 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6334 			vsi->vsi_num);
6335 		return -EINVAL;
6336 	}
6337 
6338 	ice_for_each_txq(vsi, i) {
6339 		struct ice_tx_ring *ring = vsi->tx_rings[i];
6340 
6341 		if (!ring)
6342 			return -EINVAL;
6343 
6344 		if (vsi->netdev)
6345 			ring->netdev = vsi->netdev;
6346 		err = ice_setup_tx_ring(ring);
6347 		if (err)
6348 			break;
6349 	}
6350 
6351 	return err;
6352 }
6353 
6354 /**
6355  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6356  * @vsi: VSI having resources allocated
6357  *
6358  * Return 0 on success, negative on failure
6359  */
6360 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6361 {
6362 	int i, err = 0;
6363 
6364 	if (!vsi->num_rxq) {
6365 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6366 			vsi->vsi_num);
6367 		return -EINVAL;
6368 	}
6369 
6370 	ice_for_each_rxq(vsi, i) {
6371 		struct ice_rx_ring *ring = vsi->rx_rings[i];
6372 
6373 		if (!ring)
6374 			return -EINVAL;
6375 
6376 		if (vsi->netdev)
6377 			ring->netdev = vsi->netdev;
6378 		err = ice_setup_rx_ring(ring);
6379 		if (err)
6380 			break;
6381 	}
6382 
6383 	return err;
6384 }
6385 
6386 /**
6387  * ice_vsi_open_ctrl - open control VSI for use
6388  * @vsi: the VSI to open
6389  *
6390  * Initialization of the Control VSI
6391  *
6392  * Returns 0 on success, negative value on error
6393  */
6394 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6395 {
6396 	char int_name[ICE_INT_NAME_STR_LEN];
6397 	struct ice_pf *pf = vsi->back;
6398 	struct device *dev;
6399 	int err;
6400 
6401 	dev = ice_pf_to_dev(pf);
6402 	/* allocate descriptors */
6403 	err = ice_vsi_setup_tx_rings(vsi);
6404 	if (err)
6405 		goto err_setup_tx;
6406 
6407 	err = ice_vsi_setup_rx_rings(vsi);
6408 	if (err)
6409 		goto err_setup_rx;
6410 
6411 	err = ice_vsi_cfg(vsi);
6412 	if (err)
6413 		goto err_setup_rx;
6414 
6415 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6416 		 dev_driver_string(dev), dev_name(dev));
6417 	err = ice_vsi_req_irq_msix(vsi, int_name);
6418 	if (err)
6419 		goto err_setup_rx;
6420 
6421 	ice_vsi_cfg_msix(vsi);
6422 
6423 	err = ice_vsi_start_all_rx_rings(vsi);
6424 	if (err)
6425 		goto err_up_complete;
6426 
6427 	clear_bit(ICE_VSI_DOWN, vsi->state);
6428 	ice_vsi_ena_irq(vsi);
6429 
6430 	return 0;
6431 
6432 err_up_complete:
6433 	ice_down(vsi);
6434 err_setup_rx:
6435 	ice_vsi_free_rx_rings(vsi);
6436 err_setup_tx:
6437 	ice_vsi_free_tx_rings(vsi);
6438 
6439 	return err;
6440 }
6441 
6442 /**
6443  * ice_vsi_open - Called when a network interface is made active
6444  * @vsi: the VSI to open
6445  *
6446  * Initialization of the VSI
6447  *
6448  * Returns 0 on success, negative value on error
6449  */
6450 int ice_vsi_open(struct ice_vsi *vsi)
6451 {
6452 	char int_name[ICE_INT_NAME_STR_LEN];
6453 	struct ice_pf *pf = vsi->back;
6454 	int err;
6455 
6456 	/* allocate descriptors */
6457 	err = ice_vsi_setup_tx_rings(vsi);
6458 	if (err)
6459 		goto err_setup_tx;
6460 
6461 	err = ice_vsi_setup_rx_rings(vsi);
6462 	if (err)
6463 		goto err_setup_rx;
6464 
6465 	err = ice_vsi_cfg(vsi);
6466 	if (err)
6467 		goto err_setup_rx;
6468 
6469 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6470 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6471 	err = ice_vsi_req_irq_msix(vsi, int_name);
6472 	if (err)
6473 		goto err_setup_rx;
6474 
6475 	if (vsi->type == ICE_VSI_PF) {
6476 		/* Notify the stack of the actual queue counts. */
6477 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6478 		if (err)
6479 			goto err_set_qs;
6480 
6481 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6482 		if (err)
6483 			goto err_set_qs;
6484 	}
6485 
6486 	err = ice_up_complete(vsi);
6487 	if (err)
6488 		goto err_up_complete;
6489 
6490 	return 0;
6491 
6492 err_up_complete:
6493 	ice_down(vsi);
6494 err_set_qs:
6495 	ice_vsi_free_irq(vsi);
6496 err_setup_rx:
6497 	ice_vsi_free_rx_rings(vsi);
6498 err_setup_tx:
6499 	ice_vsi_free_tx_rings(vsi);
6500 
6501 	return err;
6502 }
6503 
6504 /**
6505  * ice_vsi_release_all - Delete all VSIs
6506  * @pf: PF from which all VSIs are being removed
6507  */
6508 static void ice_vsi_release_all(struct ice_pf *pf)
6509 {
6510 	int err, i;
6511 
6512 	if (!pf->vsi)
6513 		return;
6514 
6515 	ice_for_each_vsi(pf, i) {
6516 		if (!pf->vsi[i])
6517 			continue;
6518 
6519 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
6520 			continue;
6521 
6522 		err = ice_vsi_release(pf->vsi[i]);
6523 		if (err)
6524 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6525 				i, err, pf->vsi[i]->vsi_num);
6526 	}
6527 }
6528 
6529 /**
6530  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6531  * @pf: pointer to the PF instance
6532  * @type: VSI type to rebuild
6533  *
6534  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6535  */
6536 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6537 {
6538 	struct device *dev = ice_pf_to_dev(pf);
6539 	int i, err;
6540 
6541 	ice_for_each_vsi(pf, i) {
6542 		struct ice_vsi *vsi = pf->vsi[i];
6543 
6544 		if (!vsi || vsi->type != type)
6545 			continue;
6546 
6547 		/* rebuild the VSI */
6548 		err = ice_vsi_rebuild(vsi, true);
6549 		if (err) {
6550 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6551 				err, vsi->idx, ice_vsi_type_str(type));
6552 			return err;
6553 		}
6554 
6555 		/* replay filters for the VSI */
6556 		err = ice_replay_vsi(&pf->hw, vsi->idx);
6557 		if (err) {
6558 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
6559 				err, vsi->idx, ice_vsi_type_str(type));
6560 			return err;
6561 		}
6562 
6563 		/* Re-map HW VSI number, using VSI handle that has been
6564 		 * previously validated in ice_replay_vsi() call above
6565 		 */
6566 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6567 
6568 		/* enable the VSI */
6569 		err = ice_ena_vsi(vsi, false);
6570 		if (err) {
6571 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6572 				err, vsi->idx, ice_vsi_type_str(type));
6573 			return err;
6574 		}
6575 
6576 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6577 			 ice_vsi_type_str(type));
6578 	}
6579 
6580 	return 0;
6581 }
6582 
6583 /**
6584  * ice_update_pf_netdev_link - Update PF netdev link status
6585  * @pf: pointer to the PF instance
6586  */
6587 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6588 {
6589 	bool link_up;
6590 	int i;
6591 
6592 	ice_for_each_vsi(pf, i) {
6593 		struct ice_vsi *vsi = pf->vsi[i];
6594 
6595 		if (!vsi || vsi->type != ICE_VSI_PF)
6596 			return;
6597 
6598 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6599 		if (link_up) {
6600 			netif_carrier_on(pf->vsi[i]->netdev);
6601 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6602 		} else {
6603 			netif_carrier_off(pf->vsi[i]->netdev);
6604 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6605 		}
6606 	}
6607 }
6608 
6609 /**
6610  * ice_rebuild - rebuild after reset
6611  * @pf: PF to rebuild
6612  * @reset_type: type of reset
6613  *
6614  * Do not rebuild VF VSI in this flow because that is already handled via
6615  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6616  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6617  * to reset/rebuild all the VF VSI twice.
6618  */
6619 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6620 {
6621 	struct device *dev = ice_pf_to_dev(pf);
6622 	struct ice_hw *hw = &pf->hw;
6623 	int err;
6624 
6625 	if (test_bit(ICE_DOWN, pf->state))
6626 		goto clear_recovery;
6627 
6628 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6629 
6630 	if (reset_type == ICE_RESET_EMPR) {
6631 		/* If an EMP reset has occurred, any previously pending flash
6632 		 * update will have completed. We no longer know whether or
6633 		 * not the NVM update EMP reset is restricted.
6634 		 */
6635 		pf->fw_emp_reset_disabled = false;
6636 	}
6637 
6638 	err = ice_init_all_ctrlq(hw);
6639 	if (err) {
6640 		dev_err(dev, "control queues init failed %d\n", err);
6641 		goto err_init_ctrlq;
6642 	}
6643 
6644 	/* if DDP was previously loaded successfully */
6645 	if (!ice_is_safe_mode(pf)) {
6646 		/* reload the SW DB of filter tables */
6647 		if (reset_type == ICE_RESET_PFR)
6648 			ice_fill_blk_tbls(hw);
6649 		else
6650 			/* Reload DDP Package after CORER/GLOBR reset */
6651 			ice_load_pkg(NULL, pf);
6652 	}
6653 
6654 	err = ice_clear_pf_cfg(hw);
6655 	if (err) {
6656 		dev_err(dev, "clear PF configuration failed %d\n", err);
6657 		goto err_init_ctrlq;
6658 	}
6659 
6660 	if (pf->first_sw->dflt_vsi_ena)
6661 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6662 	/* clear the default VSI configuration if it exists */
6663 	pf->first_sw->dflt_vsi = NULL;
6664 	pf->first_sw->dflt_vsi_ena = false;
6665 
6666 	ice_clear_pxe_mode(hw);
6667 
6668 	err = ice_init_nvm(hw);
6669 	if (err) {
6670 		dev_err(dev, "ice_init_nvm failed %d\n", err);
6671 		goto err_init_ctrlq;
6672 	}
6673 
6674 	err = ice_get_caps(hw);
6675 	if (err) {
6676 		dev_err(dev, "ice_get_caps failed %d\n", err);
6677 		goto err_init_ctrlq;
6678 	}
6679 
6680 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6681 	if (err) {
6682 		dev_err(dev, "set_mac_cfg failed %d\n", err);
6683 		goto err_init_ctrlq;
6684 	}
6685 
6686 	err = ice_sched_init_port(hw->port_info);
6687 	if (err)
6688 		goto err_sched_init_port;
6689 
6690 	/* start misc vector */
6691 	err = ice_req_irq_msix_misc(pf);
6692 	if (err) {
6693 		dev_err(dev, "misc vector setup failed: %d\n", err);
6694 		goto err_sched_init_port;
6695 	}
6696 
6697 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6698 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6699 		if (!rd32(hw, PFQF_FD_SIZE)) {
6700 			u16 unused, guar, b_effort;
6701 
6702 			guar = hw->func_caps.fd_fltr_guar;
6703 			b_effort = hw->func_caps.fd_fltr_best_effort;
6704 
6705 			/* force guaranteed filter pool for PF */
6706 			ice_alloc_fd_guar_item(hw, &unused, guar);
6707 			/* force shared filter pool for PF */
6708 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6709 		}
6710 	}
6711 
6712 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6713 		ice_dcb_rebuild(pf);
6714 
6715 	/* If the PF previously had enabled PTP, PTP init needs to happen before
6716 	 * the VSI rebuild. If not, this causes the PTP link status events to
6717 	 * fail.
6718 	 */
6719 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6720 		ice_ptp_reset(pf);
6721 
6722 	/* rebuild PF VSI */
6723 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6724 	if (err) {
6725 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6726 		goto err_vsi_rebuild;
6727 	}
6728 
6729 	/* configure PTP timestamping after VSI rebuild */
6730 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6731 		ice_ptp_cfg_timestamp(pf, false);
6732 
6733 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
6734 	if (err) {
6735 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
6736 		goto err_vsi_rebuild;
6737 	}
6738 
6739 	if (reset_type == ICE_RESET_PFR) {
6740 		err = ice_rebuild_channels(pf);
6741 		if (err) {
6742 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
6743 				err);
6744 			goto err_vsi_rebuild;
6745 		}
6746 	}
6747 
6748 	/* If Flow Director is active */
6749 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6750 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6751 		if (err) {
6752 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6753 			goto err_vsi_rebuild;
6754 		}
6755 
6756 		/* replay HW Flow Director recipes */
6757 		if (hw->fdir_prof)
6758 			ice_fdir_replay_flows(hw);
6759 
6760 		/* replay Flow Director filters */
6761 		ice_fdir_replay_fltrs(pf);
6762 
6763 		ice_rebuild_arfs(pf);
6764 	}
6765 
6766 	ice_update_pf_netdev_link(pf);
6767 
6768 	/* tell the firmware we are up */
6769 	err = ice_send_version(pf);
6770 	if (err) {
6771 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
6772 			err);
6773 		goto err_vsi_rebuild;
6774 	}
6775 
6776 	ice_replay_post(hw);
6777 
6778 	/* if we get here, reset flow is successful */
6779 	clear_bit(ICE_RESET_FAILED, pf->state);
6780 
6781 	ice_plug_aux_dev(pf);
6782 	return;
6783 
6784 err_vsi_rebuild:
6785 err_sched_init_port:
6786 	ice_sched_cleanup_all(hw);
6787 err_init_ctrlq:
6788 	ice_shutdown_all_ctrlq(hw);
6789 	set_bit(ICE_RESET_FAILED, pf->state);
6790 clear_recovery:
6791 	/* set this bit in PF state to control service task scheduling */
6792 	set_bit(ICE_NEEDS_RESTART, pf->state);
6793 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6794 }
6795 
6796 /**
6797  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6798  * @vsi: Pointer to VSI structure
6799  */
6800 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6801 {
6802 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6803 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6804 	else
6805 		return ICE_RXBUF_3072;
6806 }
6807 
6808 /**
6809  * ice_change_mtu - NDO callback to change the MTU
6810  * @netdev: network interface device structure
6811  * @new_mtu: new value for maximum frame size
6812  *
6813  * Returns 0 on success, negative on failure
6814  */
6815 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6816 {
6817 	struct ice_netdev_priv *np = netdev_priv(netdev);
6818 	struct ice_vsi *vsi = np->vsi;
6819 	struct ice_pf *pf = vsi->back;
6820 	struct iidc_event *event;
6821 	u8 count = 0;
6822 	int err = 0;
6823 
6824 	if (new_mtu == (int)netdev->mtu) {
6825 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6826 		return 0;
6827 	}
6828 
6829 	if (ice_is_xdp_ena_vsi(vsi)) {
6830 		int frame_size = ice_max_xdp_frame_size(vsi);
6831 
6832 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6833 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6834 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6835 			return -EINVAL;
6836 		}
6837 	}
6838 
6839 	/* if a reset is in progress, wait for some time for it to complete */
6840 	do {
6841 		if (ice_is_reset_in_progress(pf->state)) {
6842 			count++;
6843 			usleep_range(1000, 2000);
6844 		} else {
6845 			break;
6846 		}
6847 
6848 	} while (count < 100);
6849 
6850 	if (count == 100) {
6851 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6852 		return -EBUSY;
6853 	}
6854 
6855 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6856 	if (!event)
6857 		return -ENOMEM;
6858 
6859 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6860 	ice_send_event_to_aux(pf, event);
6861 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6862 
6863 	netdev->mtu = (unsigned int)new_mtu;
6864 
6865 	/* if VSI is up, bring it down and then back up */
6866 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6867 		err = ice_down(vsi);
6868 		if (err) {
6869 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6870 			goto event_after;
6871 		}
6872 
6873 		err = ice_up(vsi);
6874 		if (err) {
6875 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6876 			goto event_after;
6877 		}
6878 	}
6879 
6880 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6881 event_after:
6882 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6883 	ice_send_event_to_aux(pf, event);
6884 	kfree(event);
6885 
6886 	return err;
6887 }
6888 
6889 /**
6890  * ice_eth_ioctl - Access the hwtstamp interface
6891  * @netdev: network interface device structure
6892  * @ifr: interface request data
6893  * @cmd: ioctl command
6894  */
6895 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6896 {
6897 	struct ice_netdev_priv *np = netdev_priv(netdev);
6898 	struct ice_pf *pf = np->vsi->back;
6899 
6900 	switch (cmd) {
6901 	case SIOCGHWTSTAMP:
6902 		return ice_ptp_get_ts_config(pf, ifr);
6903 	case SIOCSHWTSTAMP:
6904 		return ice_ptp_set_ts_config(pf, ifr);
6905 	default:
6906 		return -EOPNOTSUPP;
6907 	}
6908 }
6909 
6910 /**
6911  * ice_aq_str - convert AQ err code to a string
6912  * @aq_err: the AQ error code to convert
6913  */
6914 const char *ice_aq_str(enum ice_aq_err aq_err)
6915 {
6916 	switch (aq_err) {
6917 	case ICE_AQ_RC_OK:
6918 		return "OK";
6919 	case ICE_AQ_RC_EPERM:
6920 		return "ICE_AQ_RC_EPERM";
6921 	case ICE_AQ_RC_ENOENT:
6922 		return "ICE_AQ_RC_ENOENT";
6923 	case ICE_AQ_RC_ENOMEM:
6924 		return "ICE_AQ_RC_ENOMEM";
6925 	case ICE_AQ_RC_EBUSY:
6926 		return "ICE_AQ_RC_EBUSY";
6927 	case ICE_AQ_RC_EEXIST:
6928 		return "ICE_AQ_RC_EEXIST";
6929 	case ICE_AQ_RC_EINVAL:
6930 		return "ICE_AQ_RC_EINVAL";
6931 	case ICE_AQ_RC_ENOSPC:
6932 		return "ICE_AQ_RC_ENOSPC";
6933 	case ICE_AQ_RC_ENOSYS:
6934 		return "ICE_AQ_RC_ENOSYS";
6935 	case ICE_AQ_RC_EMODE:
6936 		return "ICE_AQ_RC_EMODE";
6937 	case ICE_AQ_RC_ENOSEC:
6938 		return "ICE_AQ_RC_ENOSEC";
6939 	case ICE_AQ_RC_EBADSIG:
6940 		return "ICE_AQ_RC_EBADSIG";
6941 	case ICE_AQ_RC_ESVN:
6942 		return "ICE_AQ_RC_ESVN";
6943 	case ICE_AQ_RC_EBADMAN:
6944 		return "ICE_AQ_RC_EBADMAN";
6945 	case ICE_AQ_RC_EBADBUF:
6946 		return "ICE_AQ_RC_EBADBUF";
6947 	}
6948 
6949 	return "ICE_AQ_RC_UNKNOWN";
6950 }
6951 
6952 /**
6953  * ice_set_rss_lut - Set RSS LUT
6954  * @vsi: Pointer to VSI structure
6955  * @lut: Lookup table
6956  * @lut_size: Lookup table size
6957  *
6958  * Returns 0 on success, negative on failure
6959  */
6960 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6961 {
6962 	struct ice_aq_get_set_rss_lut_params params = {};
6963 	struct ice_hw *hw = &vsi->back->hw;
6964 	int status;
6965 
6966 	if (!lut)
6967 		return -EINVAL;
6968 
6969 	params.vsi_handle = vsi->idx;
6970 	params.lut_size = lut_size;
6971 	params.lut_type = vsi->rss_lut_type;
6972 	params.lut = lut;
6973 
6974 	status = ice_aq_set_rss_lut(hw, &params);
6975 	if (status)
6976 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
6977 			status, ice_aq_str(hw->adminq.sq_last_status));
6978 
6979 	return status;
6980 }
6981 
6982 /**
6983  * ice_set_rss_key - Set RSS key
6984  * @vsi: Pointer to the VSI structure
6985  * @seed: RSS hash seed
6986  *
6987  * Returns 0 on success, negative on failure
6988  */
6989 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6990 {
6991 	struct ice_hw *hw = &vsi->back->hw;
6992 	int status;
6993 
6994 	if (!seed)
6995 		return -EINVAL;
6996 
6997 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6998 	if (status)
6999 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7000 			status, ice_aq_str(hw->adminq.sq_last_status));
7001 
7002 	return status;
7003 }
7004 
7005 /**
7006  * ice_get_rss_lut - Get RSS LUT
7007  * @vsi: Pointer to VSI structure
7008  * @lut: Buffer to store the lookup table entries
7009  * @lut_size: Size of buffer to store the lookup table entries
7010  *
7011  * Returns 0 on success, negative on failure
7012  */
7013 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7014 {
7015 	struct ice_aq_get_set_rss_lut_params params = {};
7016 	struct ice_hw *hw = &vsi->back->hw;
7017 	int status;
7018 
7019 	if (!lut)
7020 		return -EINVAL;
7021 
7022 	params.vsi_handle = vsi->idx;
7023 	params.lut_size = lut_size;
7024 	params.lut_type = vsi->rss_lut_type;
7025 	params.lut = lut;
7026 
7027 	status = ice_aq_get_rss_lut(hw, &params);
7028 	if (status)
7029 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7030 			status, ice_aq_str(hw->adminq.sq_last_status));
7031 
7032 	return status;
7033 }
7034 
7035 /**
7036  * ice_get_rss_key - Get RSS key
7037  * @vsi: Pointer to VSI structure
7038  * @seed: Buffer to store the key in
7039  *
7040  * Returns 0 on success, negative on failure
7041  */
7042 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7043 {
7044 	struct ice_hw *hw = &vsi->back->hw;
7045 	int status;
7046 
7047 	if (!seed)
7048 		return -EINVAL;
7049 
7050 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7051 	if (status)
7052 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7053 			status, ice_aq_str(hw->adminq.sq_last_status));
7054 
7055 	return status;
7056 }
7057 
7058 /**
7059  * ice_bridge_getlink - Get the hardware bridge mode
7060  * @skb: skb buff
7061  * @pid: process ID
7062  * @seq: RTNL message seq
7063  * @dev: the netdev being configured
7064  * @filter_mask: filter mask passed in
7065  * @nlflags: netlink flags passed in
7066  *
7067  * Return the bridge mode (VEB/VEPA)
7068  */
7069 static int
7070 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7071 		   struct net_device *dev, u32 filter_mask, int nlflags)
7072 {
7073 	struct ice_netdev_priv *np = netdev_priv(dev);
7074 	struct ice_vsi *vsi = np->vsi;
7075 	struct ice_pf *pf = vsi->back;
7076 	u16 bmode;
7077 
7078 	bmode = pf->first_sw->bridge_mode;
7079 
7080 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7081 				       filter_mask, NULL);
7082 }
7083 
7084 /**
7085  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7086  * @vsi: Pointer to VSI structure
7087  * @bmode: Hardware bridge mode (VEB/VEPA)
7088  *
7089  * Returns 0 on success, negative on failure
7090  */
7091 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7092 {
7093 	struct ice_aqc_vsi_props *vsi_props;
7094 	struct ice_hw *hw = &vsi->back->hw;
7095 	struct ice_vsi_ctx *ctxt;
7096 	int ret;
7097 
7098 	vsi_props = &vsi->info;
7099 
7100 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7101 	if (!ctxt)
7102 		return -ENOMEM;
7103 
7104 	ctxt->info = vsi->info;
7105 
7106 	if (bmode == BRIDGE_MODE_VEB)
7107 		/* change from VEPA to VEB mode */
7108 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7109 	else
7110 		/* change from VEB to VEPA mode */
7111 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7112 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7113 
7114 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7115 	if (ret) {
7116 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7117 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7118 		goto out;
7119 	}
7120 	/* Update sw flags for book keeping */
7121 	vsi_props->sw_flags = ctxt->info.sw_flags;
7122 
7123 out:
7124 	kfree(ctxt);
7125 	return ret;
7126 }
7127 
7128 /**
7129  * ice_bridge_setlink - Set the hardware bridge mode
7130  * @dev: the netdev being configured
7131  * @nlh: RTNL message
7132  * @flags: bridge setlink flags
7133  * @extack: netlink extended ack
7134  *
7135  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7136  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7137  * not already set for all VSIs connected to this switch. And also update the
7138  * unicast switch filter rules for the corresponding switch of the netdev.
7139  */
7140 static int
7141 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7142 		   u16 __always_unused flags,
7143 		   struct netlink_ext_ack __always_unused *extack)
7144 {
7145 	struct ice_netdev_priv *np = netdev_priv(dev);
7146 	struct ice_pf *pf = np->vsi->back;
7147 	struct nlattr *attr, *br_spec;
7148 	struct ice_hw *hw = &pf->hw;
7149 	struct ice_sw *pf_sw;
7150 	int rem, v, err = 0;
7151 
7152 	pf_sw = pf->first_sw;
7153 	/* find the attribute in the netlink message */
7154 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7155 
7156 	nla_for_each_nested(attr, br_spec, rem) {
7157 		__u16 mode;
7158 
7159 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7160 			continue;
7161 		mode = nla_get_u16(attr);
7162 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7163 			return -EINVAL;
7164 		/* Continue  if bridge mode is not being flipped */
7165 		if (mode == pf_sw->bridge_mode)
7166 			continue;
7167 		/* Iterates through the PF VSI list and update the loopback
7168 		 * mode of the VSI
7169 		 */
7170 		ice_for_each_vsi(pf, v) {
7171 			if (!pf->vsi[v])
7172 				continue;
7173 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7174 			if (err)
7175 				return err;
7176 		}
7177 
7178 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7179 		/* Update the unicast switch filter rules for the corresponding
7180 		 * switch of the netdev
7181 		 */
7182 		err = ice_update_sw_rule_bridge_mode(hw);
7183 		if (err) {
7184 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7185 				   mode, err,
7186 				   ice_aq_str(hw->adminq.sq_last_status));
7187 			/* revert hw->evb_veb */
7188 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7189 			return err;
7190 		}
7191 
7192 		pf_sw->bridge_mode = mode;
7193 	}
7194 
7195 	return 0;
7196 }
7197 
7198 /**
7199  * ice_tx_timeout - Respond to a Tx Hang
7200  * @netdev: network interface device structure
7201  * @txqueue: Tx queue
7202  */
7203 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7204 {
7205 	struct ice_netdev_priv *np = netdev_priv(netdev);
7206 	struct ice_tx_ring *tx_ring = NULL;
7207 	struct ice_vsi *vsi = np->vsi;
7208 	struct ice_pf *pf = vsi->back;
7209 	u32 i;
7210 
7211 	pf->tx_timeout_count++;
7212 
7213 	/* Check if PFC is enabled for the TC to which the queue belongs
7214 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7215 	 * need to reset and rebuild
7216 	 */
7217 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7218 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7219 			 txqueue);
7220 		return;
7221 	}
7222 
7223 	/* now that we have an index, find the tx_ring struct */
7224 	ice_for_each_txq(vsi, i)
7225 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7226 			if (txqueue == vsi->tx_rings[i]->q_index) {
7227 				tx_ring = vsi->tx_rings[i];
7228 				break;
7229 			}
7230 
7231 	/* Reset recovery level if enough time has elapsed after last timeout.
7232 	 * Also ensure no new reset action happens before next timeout period.
7233 	 */
7234 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7235 		pf->tx_timeout_recovery_level = 1;
7236 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7237 				       netdev->watchdog_timeo)))
7238 		return;
7239 
7240 	if (tx_ring) {
7241 		struct ice_hw *hw = &pf->hw;
7242 		u32 head, val = 0;
7243 
7244 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7245 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7246 		/* Read interrupt register */
7247 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7248 
7249 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7250 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7251 			    head, tx_ring->next_to_use, val);
7252 	}
7253 
7254 	pf->tx_timeout_last_recovery = jiffies;
7255 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7256 		    pf->tx_timeout_recovery_level, txqueue);
7257 
7258 	switch (pf->tx_timeout_recovery_level) {
7259 	case 1:
7260 		set_bit(ICE_PFR_REQ, pf->state);
7261 		break;
7262 	case 2:
7263 		set_bit(ICE_CORER_REQ, pf->state);
7264 		break;
7265 	case 3:
7266 		set_bit(ICE_GLOBR_REQ, pf->state);
7267 		break;
7268 	default:
7269 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7270 		set_bit(ICE_DOWN, pf->state);
7271 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7272 		set_bit(ICE_SERVICE_DIS, pf->state);
7273 		break;
7274 	}
7275 
7276 	ice_service_task_schedule(pf);
7277 	pf->tx_timeout_recovery_level++;
7278 }
7279 
7280 /**
7281  * ice_setup_tc_cls_flower - flower classifier offloads
7282  * @np: net device to configure
7283  * @filter_dev: device on which filter is added
7284  * @cls_flower: offload data
7285  */
7286 static int
7287 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7288 			struct net_device *filter_dev,
7289 			struct flow_cls_offload *cls_flower)
7290 {
7291 	struct ice_vsi *vsi = np->vsi;
7292 
7293 	if (cls_flower->common.chain_index)
7294 		return -EOPNOTSUPP;
7295 
7296 	switch (cls_flower->command) {
7297 	case FLOW_CLS_REPLACE:
7298 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7299 	case FLOW_CLS_DESTROY:
7300 		return ice_del_cls_flower(vsi, cls_flower);
7301 	default:
7302 		return -EINVAL;
7303 	}
7304 }
7305 
7306 /**
7307  * ice_setup_tc_block_cb - callback handler registered for TC block
7308  * @type: TC SETUP type
7309  * @type_data: TC flower offload data that contains user input
7310  * @cb_priv: netdev private data
7311  */
7312 static int
7313 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7314 {
7315 	struct ice_netdev_priv *np = cb_priv;
7316 
7317 	switch (type) {
7318 	case TC_SETUP_CLSFLOWER:
7319 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7320 					       type_data);
7321 	default:
7322 		return -EOPNOTSUPP;
7323 	}
7324 }
7325 
7326 /**
7327  * ice_validate_mqprio_qopt - Validate TCF input parameters
7328  * @vsi: Pointer to VSI
7329  * @mqprio_qopt: input parameters for mqprio queue configuration
7330  *
7331  * This function validates MQPRIO params, such as qcount (power of 2 wherever
7332  * needed), and make sure user doesn't specify qcount and BW rate limit
7333  * for TCs, which are more than "num_tc"
7334  */
7335 static int
7336 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7337 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
7338 {
7339 	u64 sum_max_rate = 0, sum_min_rate = 0;
7340 	int non_power_of_2_qcount = 0;
7341 	struct ice_pf *pf = vsi->back;
7342 	int max_rss_q_cnt = 0;
7343 	struct device *dev;
7344 	int i, speed;
7345 	u8 num_tc;
7346 
7347 	if (vsi->type != ICE_VSI_PF)
7348 		return -EINVAL;
7349 
7350 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7351 	    mqprio_qopt->qopt.num_tc < 1 ||
7352 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7353 		return -EINVAL;
7354 
7355 	dev = ice_pf_to_dev(pf);
7356 	vsi->ch_rss_size = 0;
7357 	num_tc = mqprio_qopt->qopt.num_tc;
7358 
7359 	for (i = 0; num_tc; i++) {
7360 		int qcount = mqprio_qopt->qopt.count[i];
7361 		u64 max_rate, min_rate, rem;
7362 
7363 		if (!qcount)
7364 			return -EINVAL;
7365 
7366 		if (is_power_of_2(qcount)) {
7367 			if (non_power_of_2_qcount &&
7368 			    qcount > non_power_of_2_qcount) {
7369 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7370 					qcount, non_power_of_2_qcount);
7371 				return -EINVAL;
7372 			}
7373 			if (qcount > max_rss_q_cnt)
7374 				max_rss_q_cnt = qcount;
7375 		} else {
7376 			if (non_power_of_2_qcount &&
7377 			    qcount != non_power_of_2_qcount) {
7378 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7379 					qcount, non_power_of_2_qcount);
7380 				return -EINVAL;
7381 			}
7382 			if (qcount < max_rss_q_cnt) {
7383 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7384 					qcount, max_rss_q_cnt);
7385 				return -EINVAL;
7386 			}
7387 			max_rss_q_cnt = qcount;
7388 			non_power_of_2_qcount = qcount;
7389 		}
7390 
7391 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7392 		 * converts the bandwidth rate limit into Bytes/s when
7393 		 * passing it down to the driver. So convert input bandwidth
7394 		 * from Bytes/s to Kbps
7395 		 */
7396 		max_rate = mqprio_qopt->max_rate[i];
7397 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7398 		sum_max_rate += max_rate;
7399 
7400 		/* min_rate is minimum guaranteed rate and it can't be zero */
7401 		min_rate = mqprio_qopt->min_rate[i];
7402 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7403 		sum_min_rate += min_rate;
7404 
7405 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7406 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7407 				min_rate, ICE_MIN_BW_LIMIT);
7408 			return -EINVAL;
7409 		}
7410 
7411 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7412 		if (rem) {
7413 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7414 				i, ICE_MIN_BW_LIMIT);
7415 			return -EINVAL;
7416 		}
7417 
7418 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7419 		if (rem) {
7420 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7421 				i, ICE_MIN_BW_LIMIT);
7422 			return -EINVAL;
7423 		}
7424 
7425 		/* min_rate can't be more than max_rate, except when max_rate
7426 		 * is zero (implies max_rate sought is max line rate). In such
7427 		 * a case min_rate can be more than max.
7428 		 */
7429 		if (max_rate && min_rate > max_rate) {
7430 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7431 				min_rate, max_rate);
7432 			return -EINVAL;
7433 		}
7434 
7435 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7436 			break;
7437 		if (mqprio_qopt->qopt.offset[i + 1] !=
7438 		    (mqprio_qopt->qopt.offset[i] + qcount))
7439 			return -EINVAL;
7440 	}
7441 	if (vsi->num_rxq <
7442 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7443 		return -EINVAL;
7444 	if (vsi->num_txq <
7445 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7446 		return -EINVAL;
7447 
7448 	speed = ice_get_link_speed_kbps(vsi);
7449 	if (sum_max_rate && sum_max_rate > (u64)speed) {
7450 		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7451 			sum_max_rate, speed);
7452 		return -EINVAL;
7453 	}
7454 	if (sum_min_rate && sum_min_rate > (u64)speed) {
7455 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7456 			sum_min_rate, speed);
7457 		return -EINVAL;
7458 	}
7459 
7460 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7461 	vsi->ch_rss_size = max_rss_q_cnt;
7462 
7463 	return 0;
7464 }
7465 
7466 /**
7467  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7468  * @pf: ptr to PF device
7469  * @vsi: ptr to VSI
7470  */
7471 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
7472 {
7473 	struct device *dev = ice_pf_to_dev(pf);
7474 	bool added = false;
7475 	struct ice_hw *hw;
7476 	int flow;
7477 
7478 	if (!(vsi->num_gfltr || vsi->num_bfltr))
7479 		return -EINVAL;
7480 
7481 	hw = &pf->hw;
7482 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
7483 		struct ice_fd_hw_prof *prof;
7484 		int tun, status;
7485 		u64 entry_h;
7486 
7487 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
7488 		      hw->fdir_prof[flow]->cnt))
7489 			continue;
7490 
7491 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
7492 			enum ice_flow_priority prio;
7493 			u64 prof_id;
7494 
7495 			/* add this VSI to FDir profile for this flow */
7496 			prio = ICE_FLOW_PRIO_NORMAL;
7497 			prof = hw->fdir_prof[flow];
7498 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
7499 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
7500 						    prof->vsi_h[0], vsi->idx,
7501 						    prio, prof->fdir_seg[tun],
7502 						    &entry_h);
7503 			if (status) {
7504 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
7505 					vsi->idx, flow);
7506 				continue;
7507 			}
7508 
7509 			prof->entry_h[prof->cnt][tun] = entry_h;
7510 		}
7511 
7512 		/* store VSI for filter replay and delete */
7513 		prof->vsi_h[prof->cnt] = vsi->idx;
7514 		prof->cnt++;
7515 
7516 		added = true;
7517 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
7518 			flow);
7519 	}
7520 
7521 	if (!added)
7522 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
7523 
7524 	return 0;
7525 }
7526 
7527 /**
7528  * ice_add_channel - add a channel by adding VSI
7529  * @pf: ptr to PF device
7530  * @sw_id: underlying HW switching element ID
7531  * @ch: ptr to channel structure
7532  *
7533  * Add a channel (VSI) using add_vsi and queue_map
7534  */
7535 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
7536 {
7537 	struct device *dev = ice_pf_to_dev(pf);
7538 	struct ice_vsi *vsi;
7539 
7540 	if (ch->type != ICE_VSI_CHNL) {
7541 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
7542 		return -EINVAL;
7543 	}
7544 
7545 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
7546 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
7547 		dev_err(dev, "create chnl VSI failure\n");
7548 		return -EINVAL;
7549 	}
7550 
7551 	ice_add_vsi_to_fdir(pf, vsi);
7552 
7553 	ch->sw_id = sw_id;
7554 	ch->vsi_num = vsi->vsi_num;
7555 	ch->info.mapping_flags = vsi->info.mapping_flags;
7556 	ch->ch_vsi = vsi;
7557 	/* set the back pointer of channel for newly created VSI */
7558 	vsi->ch = ch;
7559 
7560 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
7561 	       sizeof(vsi->info.q_mapping));
7562 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
7563 	       sizeof(vsi->info.tc_mapping));
7564 
7565 	return 0;
7566 }
7567 
7568 /**
7569  * ice_chnl_cfg_res
7570  * @vsi: the VSI being setup
7571  * @ch: ptr to channel structure
7572  *
7573  * Configure channel specific resources such as rings, vector.
7574  */
7575 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
7576 {
7577 	int i;
7578 
7579 	for (i = 0; i < ch->num_txq; i++) {
7580 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
7581 		struct ice_ring_container *rc;
7582 		struct ice_tx_ring *tx_ring;
7583 		struct ice_rx_ring *rx_ring;
7584 
7585 		tx_ring = vsi->tx_rings[ch->base_q + i];
7586 		rx_ring = vsi->rx_rings[ch->base_q + i];
7587 		if (!tx_ring || !rx_ring)
7588 			continue;
7589 
7590 		/* setup ring being channel enabled */
7591 		tx_ring->ch = ch;
7592 		rx_ring->ch = ch;
7593 
7594 		/* following code block sets up vector specific attributes */
7595 		tx_q_vector = tx_ring->q_vector;
7596 		rx_q_vector = rx_ring->q_vector;
7597 		if (!tx_q_vector && !rx_q_vector)
7598 			continue;
7599 
7600 		if (tx_q_vector) {
7601 			tx_q_vector->ch = ch;
7602 			/* setup Tx and Rx ITR setting if DIM is off */
7603 			rc = &tx_q_vector->tx;
7604 			if (!ITR_IS_DYNAMIC(rc))
7605 				ice_write_itr(rc, rc->itr_setting);
7606 		}
7607 		if (rx_q_vector) {
7608 			rx_q_vector->ch = ch;
7609 			/* setup Tx and Rx ITR setting if DIM is off */
7610 			rc = &rx_q_vector->rx;
7611 			if (!ITR_IS_DYNAMIC(rc))
7612 				ice_write_itr(rc, rc->itr_setting);
7613 		}
7614 	}
7615 
7616 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
7617 	 * GLINT_ITR register would have written to perform in-context
7618 	 * update, hence perform flush
7619 	 */
7620 	if (ch->num_txq || ch->num_rxq)
7621 		ice_flush(&vsi->back->hw);
7622 }
7623 
7624 /**
7625  * ice_cfg_chnl_all_res - configure channel resources
7626  * @vsi: pte to main_vsi
7627  * @ch: ptr to channel structure
7628  *
7629  * This function configures channel specific resources such as flow-director
7630  * counter index, and other resources such as queues, vectors, ITR settings
7631  */
7632 static void
7633 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
7634 {
7635 	/* configure channel (aka ADQ) resources such as queues, vectors,
7636 	 * ITR settings for channel specific vectors and anything else
7637 	 */
7638 	ice_chnl_cfg_res(vsi, ch);
7639 }
7640 
7641 /**
7642  * ice_setup_hw_channel - setup new channel
7643  * @pf: ptr to PF device
7644  * @vsi: the VSI being setup
7645  * @ch: ptr to channel structure
7646  * @sw_id: underlying HW switching element ID
7647  * @type: type of channel to be created (VMDq2/VF)
7648  *
7649  * Setup new channel (VSI) based on specified type (VMDq2/VF)
7650  * and configures Tx rings accordingly
7651  */
7652 static int
7653 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
7654 		     struct ice_channel *ch, u16 sw_id, u8 type)
7655 {
7656 	struct device *dev = ice_pf_to_dev(pf);
7657 	int ret;
7658 
7659 	ch->base_q = vsi->next_base_q;
7660 	ch->type = type;
7661 
7662 	ret = ice_add_channel(pf, sw_id, ch);
7663 	if (ret) {
7664 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
7665 		return ret;
7666 	}
7667 
7668 	/* configure/setup ADQ specific resources */
7669 	ice_cfg_chnl_all_res(vsi, ch);
7670 
7671 	/* make sure to update the next_base_q so that subsequent channel's
7672 	 * (aka ADQ) VSI queue map is correct
7673 	 */
7674 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
7675 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
7676 		ch->num_rxq);
7677 
7678 	return 0;
7679 }
7680 
7681 /**
7682  * ice_setup_channel - setup new channel using uplink element
7683  * @pf: ptr to PF device
7684  * @vsi: the VSI being setup
7685  * @ch: ptr to channel structure
7686  *
7687  * Setup new channel (VSI) based on specified type (VMDq2/VF)
7688  * and uplink switching element
7689  */
7690 static bool
7691 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
7692 		  struct ice_channel *ch)
7693 {
7694 	struct device *dev = ice_pf_to_dev(pf);
7695 	u16 sw_id;
7696 	int ret;
7697 
7698 	if (vsi->type != ICE_VSI_PF) {
7699 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
7700 		return false;
7701 	}
7702 
7703 	sw_id = pf->first_sw->sw_id;
7704 
7705 	/* create channel (VSI) */
7706 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
7707 	if (ret) {
7708 		dev_err(dev, "failed to setup hw_channel\n");
7709 		return false;
7710 	}
7711 	dev_dbg(dev, "successfully created channel()\n");
7712 
7713 	return ch->ch_vsi ? true : false;
7714 }
7715 
7716 /**
7717  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
7718  * @vsi: VSI to be configured
7719  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
7720  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
7721  */
7722 static int
7723 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
7724 {
7725 	int err;
7726 
7727 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
7728 	if (err)
7729 		return err;
7730 
7731 	return ice_set_max_bw_limit(vsi, max_tx_rate);
7732 }
7733 
7734 /**
7735  * ice_create_q_channel - function to create channel
7736  * @vsi: VSI to be configured
7737  * @ch: ptr to channel (it contains channel specific params)
7738  *
7739  * This function creates channel (VSI) using num_queues specified by user,
7740  * reconfigs RSS if needed.
7741  */
7742 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
7743 {
7744 	struct ice_pf *pf = vsi->back;
7745 	struct device *dev;
7746 
7747 	if (!ch)
7748 		return -EINVAL;
7749 
7750 	dev = ice_pf_to_dev(pf);
7751 	if (!ch->num_txq || !ch->num_rxq) {
7752 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
7753 		return -EINVAL;
7754 	}
7755 
7756 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
7757 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
7758 			vsi->cnt_q_avail, ch->num_txq);
7759 		return -EINVAL;
7760 	}
7761 
7762 	if (!ice_setup_channel(pf, vsi, ch)) {
7763 		dev_info(dev, "Failed to setup channel\n");
7764 		return -EINVAL;
7765 	}
7766 	/* configure BW rate limit */
7767 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
7768 		int ret;
7769 
7770 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
7771 				       ch->min_tx_rate);
7772 		if (ret)
7773 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
7774 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
7775 		else
7776 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
7777 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
7778 	}
7779 
7780 	vsi->cnt_q_avail -= ch->num_txq;
7781 
7782 	return 0;
7783 }
7784 
7785 /**
7786  * ice_rem_all_chnl_fltrs - removes all channel filters
7787  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
7788  *
7789  * Remove all advanced switch filters only if they are channel specific
7790  * tc-flower based filter
7791  */
7792 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
7793 {
7794 	struct ice_tc_flower_fltr *fltr;
7795 	struct hlist_node *node;
7796 
7797 	/* to remove all channel filters, iterate an ordered list of filters */
7798 	hlist_for_each_entry_safe(fltr, node,
7799 				  &pf->tc_flower_fltr_list,
7800 				  tc_flower_node) {
7801 		struct ice_rule_query_data rule;
7802 		int status;
7803 
7804 		/* for now process only channel specific filters */
7805 		if (!ice_is_chnl_fltr(fltr))
7806 			continue;
7807 
7808 		rule.rid = fltr->rid;
7809 		rule.rule_id = fltr->rule_id;
7810 		rule.vsi_handle = fltr->dest_id;
7811 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
7812 		if (status) {
7813 			if (status == -ENOENT)
7814 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
7815 					rule.rule_id);
7816 			else
7817 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
7818 					status);
7819 		} else if (fltr->dest_vsi) {
7820 			/* update advanced switch filter count */
7821 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
7822 				u32 flags = fltr->flags;
7823 
7824 				fltr->dest_vsi->num_chnl_fltr--;
7825 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
7826 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
7827 					pf->num_dmac_chnl_fltrs--;
7828 			}
7829 		}
7830 
7831 		hlist_del(&fltr->tc_flower_node);
7832 		kfree(fltr);
7833 	}
7834 }
7835 
7836 /**
7837  * ice_remove_q_channels - Remove queue channels for the TCs
7838  * @vsi: VSI to be configured
7839  * @rem_fltr: delete advanced switch filter or not
7840  *
7841  * Remove queue channels for the TCs
7842  */
7843 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
7844 {
7845 	struct ice_channel *ch, *ch_tmp;
7846 	struct ice_pf *pf = vsi->back;
7847 	int i;
7848 
7849 	/* remove all tc-flower based filter if they are channel filters only */
7850 	if (rem_fltr)
7851 		ice_rem_all_chnl_fltrs(pf);
7852 
7853 	/* remove ntuple filters since queue configuration is being changed */
7854 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
7855 		struct ice_hw *hw = &pf->hw;
7856 
7857 		mutex_lock(&hw->fdir_fltr_lock);
7858 		ice_fdir_del_all_fltrs(vsi);
7859 		mutex_unlock(&hw->fdir_fltr_lock);
7860 	}
7861 
7862 	/* perform cleanup for channels if they exist */
7863 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
7864 		struct ice_vsi *ch_vsi;
7865 
7866 		list_del(&ch->list);
7867 		ch_vsi = ch->ch_vsi;
7868 		if (!ch_vsi) {
7869 			kfree(ch);
7870 			continue;
7871 		}
7872 
7873 		/* Reset queue contexts */
7874 		for (i = 0; i < ch->num_rxq; i++) {
7875 			struct ice_tx_ring *tx_ring;
7876 			struct ice_rx_ring *rx_ring;
7877 
7878 			tx_ring = vsi->tx_rings[ch->base_q + i];
7879 			rx_ring = vsi->rx_rings[ch->base_q + i];
7880 			if (tx_ring) {
7881 				tx_ring->ch = NULL;
7882 				if (tx_ring->q_vector)
7883 					tx_ring->q_vector->ch = NULL;
7884 			}
7885 			if (rx_ring) {
7886 				rx_ring->ch = NULL;
7887 				if (rx_ring->q_vector)
7888 					rx_ring->q_vector->ch = NULL;
7889 			}
7890 		}
7891 
7892 		/* Release FD resources for the channel VSI */
7893 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
7894 
7895 		/* clear the VSI from scheduler tree */
7896 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
7897 
7898 		/* Delete VSI from FW */
7899 		ice_vsi_delete(ch->ch_vsi);
7900 
7901 		/* Delete VSI from PF and HW VSI arrays */
7902 		ice_vsi_clear(ch->ch_vsi);
7903 
7904 		/* free the channel */
7905 		kfree(ch);
7906 	}
7907 
7908 	/* clear the channel VSI map which is stored in main VSI */
7909 	ice_for_each_chnl_tc(i)
7910 		vsi->tc_map_vsi[i] = NULL;
7911 
7912 	/* reset main VSI's all TC information */
7913 	vsi->all_enatc = 0;
7914 	vsi->all_numtc = 0;
7915 }
7916 
7917 /**
7918  * ice_rebuild_channels - rebuild channel
7919  * @pf: ptr to PF
7920  *
7921  * Recreate channel VSIs and replay filters
7922  */
7923 static int ice_rebuild_channels(struct ice_pf *pf)
7924 {
7925 	struct device *dev = ice_pf_to_dev(pf);
7926 	struct ice_vsi *main_vsi;
7927 	bool rem_adv_fltr = true;
7928 	struct ice_channel *ch;
7929 	struct ice_vsi *vsi;
7930 	int tc_idx = 1;
7931 	int i, err;
7932 
7933 	main_vsi = ice_get_main_vsi(pf);
7934 	if (!main_vsi)
7935 		return 0;
7936 
7937 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
7938 	    main_vsi->old_numtc == 1)
7939 		return 0; /* nothing to be done */
7940 
7941 	/* reconfigure main VSI based on old value of TC and cached values
7942 	 * for MQPRIO opts
7943 	 */
7944 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
7945 	if (err) {
7946 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
7947 			main_vsi->old_ena_tc, main_vsi->vsi_num);
7948 		return err;
7949 	}
7950 
7951 	/* rebuild ADQ VSIs */
7952 	ice_for_each_vsi(pf, i) {
7953 		enum ice_vsi_type type;
7954 
7955 		vsi = pf->vsi[i];
7956 		if (!vsi || vsi->type != ICE_VSI_CHNL)
7957 			continue;
7958 
7959 		type = vsi->type;
7960 
7961 		/* rebuild ADQ VSI */
7962 		err = ice_vsi_rebuild(vsi, true);
7963 		if (err) {
7964 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
7965 				ice_vsi_type_str(type), vsi->idx, err);
7966 			goto cleanup;
7967 		}
7968 
7969 		/* Re-map HW VSI number, using VSI handle that has been
7970 		 * previously validated in ice_replay_vsi() call above
7971 		 */
7972 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7973 
7974 		/* replay filters for the VSI */
7975 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7976 		if (err) {
7977 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
7978 				ice_vsi_type_str(type), err, vsi->idx);
7979 			rem_adv_fltr = false;
7980 			goto cleanup;
7981 		}
7982 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
7983 			 ice_vsi_type_str(type), vsi->idx);
7984 
7985 		/* store ADQ VSI at correct TC index in main VSI's
7986 		 * map of TC to VSI
7987 		 */
7988 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
7989 	}
7990 
7991 	/* ADQ VSI(s) has been rebuilt successfully, so setup
7992 	 * channel for main VSI's Tx and Rx rings
7993 	 */
7994 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
7995 		struct ice_vsi *ch_vsi;
7996 
7997 		ch_vsi = ch->ch_vsi;
7998 		if (!ch_vsi)
7999 			continue;
8000 
8001 		/* reconfig channel resources */
8002 		ice_cfg_chnl_all_res(main_vsi, ch);
8003 
8004 		/* replay BW rate limit if it is non-zero */
8005 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8006 			continue;
8007 
8008 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8009 				       ch->min_tx_rate);
8010 		if (err)
8011 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8012 				err, ch->max_tx_rate, ch->min_tx_rate,
8013 				ch_vsi->vsi_num);
8014 		else
8015 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8016 				ch->max_tx_rate, ch->min_tx_rate,
8017 				ch_vsi->vsi_num);
8018 	}
8019 
8020 	/* reconfig RSS for main VSI */
8021 	if (main_vsi->ch_rss_size)
8022 		ice_vsi_cfg_rss_lut_key(main_vsi);
8023 
8024 	return 0;
8025 
8026 cleanup:
8027 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8028 	return err;
8029 }
8030 
8031 /**
8032  * ice_create_q_channels - Add queue channel for the given TCs
8033  * @vsi: VSI to be configured
8034  *
8035  * Configures queue channel mapping to the given TCs
8036  */
8037 static int ice_create_q_channels(struct ice_vsi *vsi)
8038 {
8039 	struct ice_pf *pf = vsi->back;
8040 	struct ice_channel *ch;
8041 	int ret = 0, i;
8042 
8043 	ice_for_each_chnl_tc(i) {
8044 		if (!(vsi->all_enatc & BIT(i)))
8045 			continue;
8046 
8047 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8048 		if (!ch) {
8049 			ret = -ENOMEM;
8050 			goto err_free;
8051 		}
8052 		INIT_LIST_HEAD(&ch->list);
8053 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8054 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8055 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8056 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8057 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8058 
8059 		/* convert to Kbits/s */
8060 		if (ch->max_tx_rate)
8061 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8062 						  ICE_BW_KBPS_DIVISOR);
8063 		if (ch->min_tx_rate)
8064 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8065 						  ICE_BW_KBPS_DIVISOR);
8066 
8067 		ret = ice_create_q_channel(vsi, ch);
8068 		if (ret) {
8069 			dev_err(ice_pf_to_dev(pf),
8070 				"failed creating channel TC:%d\n", i);
8071 			kfree(ch);
8072 			goto err_free;
8073 		}
8074 		list_add_tail(&ch->list, &vsi->ch_list);
8075 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8076 		dev_dbg(ice_pf_to_dev(pf),
8077 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8078 	}
8079 	return 0;
8080 
8081 err_free:
8082 	ice_remove_q_channels(vsi, false);
8083 
8084 	return ret;
8085 }
8086 
8087 /**
8088  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8089  * @netdev: net device to configure
8090  * @type_data: TC offload data
8091  */
8092 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8093 {
8094 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8095 	struct ice_netdev_priv *np = netdev_priv(netdev);
8096 	struct ice_vsi *vsi = np->vsi;
8097 	struct ice_pf *pf = vsi->back;
8098 	u16 mode, ena_tc_qdisc = 0;
8099 	int cur_txq, cur_rxq;
8100 	u8 hw = 0, num_tcf;
8101 	struct device *dev;
8102 	int ret, i;
8103 
8104 	dev = ice_pf_to_dev(pf);
8105 	num_tcf = mqprio_qopt->qopt.num_tc;
8106 	hw = mqprio_qopt->qopt.hw;
8107 	mode = mqprio_qopt->mode;
8108 	if (!hw) {
8109 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8110 		vsi->ch_rss_size = 0;
8111 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8112 		goto config_tcf;
8113 	}
8114 
8115 	/* Generate queue region map for number of TCF requested */
8116 	for (i = 0; i < num_tcf; i++)
8117 		ena_tc_qdisc |= BIT(i);
8118 
8119 	switch (mode) {
8120 	case TC_MQPRIO_MODE_CHANNEL:
8121 
8122 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8123 		if (ret) {
8124 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8125 				   ret);
8126 			return ret;
8127 		}
8128 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8129 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8130 		/* don't assume state of hw_tc_offload during driver load
8131 		 * and set the flag for TC flower filter if hw_tc_offload
8132 		 * already ON
8133 		 */
8134 		if (vsi->netdev->features & NETIF_F_HW_TC)
8135 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8136 		break;
8137 	default:
8138 		return -EINVAL;
8139 	}
8140 
8141 config_tcf:
8142 
8143 	/* Requesting same TCF configuration as already enabled */
8144 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8145 	    mode != TC_MQPRIO_MODE_CHANNEL)
8146 		return 0;
8147 
8148 	/* Pause VSI queues */
8149 	ice_dis_vsi(vsi, true);
8150 
8151 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8152 		ice_remove_q_channels(vsi, true);
8153 
8154 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8155 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8156 				     num_online_cpus());
8157 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8158 				     num_online_cpus());
8159 	} else {
8160 		/* logic to rebuild VSI, same like ethtool -L */
8161 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8162 
8163 		for (i = 0; i < num_tcf; i++) {
8164 			if (!(ena_tc_qdisc & BIT(i)))
8165 				continue;
8166 
8167 			offset = vsi->mqprio_qopt.qopt.offset[i];
8168 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8169 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8170 		}
8171 		vsi->req_txq = offset + qcount_tx;
8172 		vsi->req_rxq = offset + qcount_rx;
8173 
8174 		/* store away original rss_size info, so that it gets reused
8175 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8176 		 * determine, what should be the rss_sizefor main VSI
8177 		 */
8178 		vsi->orig_rss_size = vsi->rss_size;
8179 	}
8180 
8181 	/* save current values of Tx and Rx queues before calling VSI rebuild
8182 	 * for fallback option
8183 	 */
8184 	cur_txq = vsi->num_txq;
8185 	cur_rxq = vsi->num_rxq;
8186 
8187 	/* proceed with rebuild main VSI using correct number of queues */
8188 	ret = ice_vsi_rebuild(vsi, false);
8189 	if (ret) {
8190 		/* fallback to current number of queues */
8191 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8192 		vsi->req_txq = cur_txq;
8193 		vsi->req_rxq = cur_rxq;
8194 		clear_bit(ICE_RESET_FAILED, pf->state);
8195 		if (ice_vsi_rebuild(vsi, false)) {
8196 			dev_err(dev, "Rebuild of main VSI failed again\n");
8197 			return ret;
8198 		}
8199 	}
8200 
8201 	vsi->all_numtc = num_tcf;
8202 	vsi->all_enatc = ena_tc_qdisc;
8203 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8204 	if (ret) {
8205 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8206 			   vsi->vsi_num);
8207 		goto exit;
8208 	}
8209 
8210 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8211 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8212 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8213 
8214 		/* set TC0 rate limit if specified */
8215 		if (max_tx_rate || min_tx_rate) {
8216 			/* convert to Kbits/s */
8217 			if (max_tx_rate)
8218 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8219 			if (min_tx_rate)
8220 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8221 
8222 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8223 			if (!ret) {
8224 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8225 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8226 			} else {
8227 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8228 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8229 				goto exit;
8230 			}
8231 		}
8232 		ret = ice_create_q_channels(vsi);
8233 		if (ret) {
8234 			netdev_err(netdev, "failed configuring queue channels\n");
8235 			goto exit;
8236 		} else {
8237 			netdev_dbg(netdev, "successfully configured channels\n");
8238 		}
8239 	}
8240 
8241 	if (vsi->ch_rss_size)
8242 		ice_vsi_cfg_rss_lut_key(vsi);
8243 
8244 exit:
8245 	/* if error, reset the all_numtc and all_enatc */
8246 	if (ret) {
8247 		vsi->all_numtc = 0;
8248 		vsi->all_enatc = 0;
8249 	}
8250 	/* resume VSI */
8251 	ice_ena_vsi(vsi, true);
8252 
8253 	return ret;
8254 }
8255 
8256 static LIST_HEAD(ice_block_cb_list);
8257 
8258 static int
8259 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8260 	     void *type_data)
8261 {
8262 	struct ice_netdev_priv *np = netdev_priv(netdev);
8263 	struct ice_pf *pf = np->vsi->back;
8264 	int err;
8265 
8266 	switch (type) {
8267 	case TC_SETUP_BLOCK:
8268 		return flow_block_cb_setup_simple(type_data,
8269 						  &ice_block_cb_list,
8270 						  ice_setup_tc_block_cb,
8271 						  np, np, true);
8272 	case TC_SETUP_QDISC_MQPRIO:
8273 		/* setup traffic classifier for receive side */
8274 		mutex_lock(&pf->tc_mutex);
8275 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8276 		mutex_unlock(&pf->tc_mutex);
8277 		return err;
8278 	default:
8279 		return -EOPNOTSUPP;
8280 	}
8281 	return -EOPNOTSUPP;
8282 }
8283 
8284 static struct ice_indr_block_priv *
8285 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8286 			   struct net_device *netdev)
8287 {
8288 	struct ice_indr_block_priv *cb_priv;
8289 
8290 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8291 		if (!cb_priv->netdev)
8292 			return NULL;
8293 		if (cb_priv->netdev == netdev)
8294 			return cb_priv;
8295 	}
8296 	return NULL;
8297 }
8298 
8299 static int
8300 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8301 			void *indr_priv)
8302 {
8303 	struct ice_indr_block_priv *priv = indr_priv;
8304 	struct ice_netdev_priv *np = priv->np;
8305 
8306 	switch (type) {
8307 	case TC_SETUP_CLSFLOWER:
8308 		return ice_setup_tc_cls_flower(np, priv->netdev,
8309 					       (struct flow_cls_offload *)
8310 					       type_data);
8311 	default:
8312 		return -EOPNOTSUPP;
8313 	}
8314 }
8315 
8316 static int
8317 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8318 			struct ice_netdev_priv *np,
8319 			struct flow_block_offload *f, void *data,
8320 			void (*cleanup)(struct flow_block_cb *block_cb))
8321 {
8322 	struct ice_indr_block_priv *indr_priv;
8323 	struct flow_block_cb *block_cb;
8324 
8325 	if (!ice_is_tunnel_supported(netdev) &&
8326 	    !(is_vlan_dev(netdev) &&
8327 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
8328 		return -EOPNOTSUPP;
8329 
8330 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8331 		return -EOPNOTSUPP;
8332 
8333 	switch (f->command) {
8334 	case FLOW_BLOCK_BIND:
8335 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8336 		if (indr_priv)
8337 			return -EEXIST;
8338 
8339 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8340 		if (!indr_priv)
8341 			return -ENOMEM;
8342 
8343 		indr_priv->netdev = netdev;
8344 		indr_priv->np = np;
8345 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8346 
8347 		block_cb =
8348 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8349 						 indr_priv, indr_priv,
8350 						 ice_rep_indr_tc_block_unbind,
8351 						 f, netdev, sch, data, np,
8352 						 cleanup);
8353 
8354 		if (IS_ERR(block_cb)) {
8355 			list_del(&indr_priv->list);
8356 			kfree(indr_priv);
8357 			return PTR_ERR(block_cb);
8358 		}
8359 		flow_block_cb_add(block_cb, f);
8360 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8361 		break;
8362 	case FLOW_BLOCK_UNBIND:
8363 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8364 		if (!indr_priv)
8365 			return -ENOENT;
8366 
8367 		block_cb = flow_block_cb_lookup(f->block,
8368 						ice_indr_setup_block_cb,
8369 						indr_priv);
8370 		if (!block_cb)
8371 			return -ENOENT;
8372 
8373 		flow_indr_block_cb_remove(block_cb, f);
8374 
8375 		list_del(&block_cb->driver_list);
8376 		break;
8377 	default:
8378 		return -EOPNOTSUPP;
8379 	}
8380 	return 0;
8381 }
8382 
8383 static int
8384 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8385 		     void *cb_priv, enum tc_setup_type type, void *type_data,
8386 		     void *data,
8387 		     void (*cleanup)(struct flow_block_cb *block_cb))
8388 {
8389 	switch (type) {
8390 	case TC_SETUP_BLOCK:
8391 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8392 					       data, cleanup);
8393 
8394 	default:
8395 		return -EOPNOTSUPP;
8396 	}
8397 }
8398 
8399 /**
8400  * ice_open - Called when a network interface becomes active
8401  * @netdev: network interface device structure
8402  *
8403  * The open entry point is called when a network interface is made
8404  * active by the system (IFF_UP). At this point all resources needed
8405  * for transmit and receive operations are allocated, the interrupt
8406  * handler is registered with the OS, the netdev watchdog is enabled,
8407  * and the stack is notified that the interface is ready.
8408  *
8409  * Returns 0 on success, negative value on failure
8410  */
8411 int ice_open(struct net_device *netdev)
8412 {
8413 	struct ice_netdev_priv *np = netdev_priv(netdev);
8414 	struct ice_pf *pf = np->vsi->back;
8415 
8416 	if (ice_is_reset_in_progress(pf->state)) {
8417 		netdev_err(netdev, "can't open net device while reset is in progress");
8418 		return -EBUSY;
8419 	}
8420 
8421 	return ice_open_internal(netdev);
8422 }
8423 
8424 /**
8425  * ice_open_internal - Called when a network interface becomes active
8426  * @netdev: network interface device structure
8427  *
8428  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8429  * handling routine
8430  *
8431  * Returns 0 on success, negative value on failure
8432  */
8433 int ice_open_internal(struct net_device *netdev)
8434 {
8435 	struct ice_netdev_priv *np = netdev_priv(netdev);
8436 	struct ice_vsi *vsi = np->vsi;
8437 	struct ice_pf *pf = vsi->back;
8438 	struct ice_port_info *pi;
8439 	int err;
8440 
8441 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8442 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8443 		return -EIO;
8444 	}
8445 
8446 	netif_carrier_off(netdev);
8447 
8448 	pi = vsi->port_info;
8449 	err = ice_update_link_info(pi);
8450 	if (err) {
8451 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
8452 		return err;
8453 	}
8454 
8455 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
8456 
8457 	/* Set PHY if there is media, otherwise, turn off PHY */
8458 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
8459 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8460 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
8461 			err = ice_init_phy_user_cfg(pi);
8462 			if (err) {
8463 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
8464 					   err);
8465 				return err;
8466 			}
8467 		}
8468 
8469 		err = ice_configure_phy(vsi);
8470 		if (err) {
8471 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
8472 				   err);
8473 			return err;
8474 		}
8475 	} else {
8476 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8477 		ice_set_link(vsi, false);
8478 	}
8479 
8480 	err = ice_vsi_open(vsi);
8481 	if (err)
8482 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
8483 			   vsi->vsi_num, vsi->vsw->sw_id);
8484 
8485 	/* Update existing tunnels information */
8486 	udp_tunnel_get_rx_info(netdev);
8487 
8488 	return err;
8489 }
8490 
8491 /**
8492  * ice_stop - Disables a network interface
8493  * @netdev: network interface device structure
8494  *
8495  * The stop entry point is called when an interface is de-activated by the OS,
8496  * and the netdevice enters the DOWN state. The hardware is still under the
8497  * driver's control, but the netdev interface is disabled.
8498  *
8499  * Returns success only - not allowed to fail
8500  */
8501 int ice_stop(struct net_device *netdev)
8502 {
8503 	struct ice_netdev_priv *np = netdev_priv(netdev);
8504 	struct ice_vsi *vsi = np->vsi;
8505 	struct ice_pf *pf = vsi->back;
8506 
8507 	if (ice_is_reset_in_progress(pf->state)) {
8508 		netdev_err(netdev, "can't stop net device while reset is in progress");
8509 		return -EBUSY;
8510 	}
8511 
8512 	ice_vsi_close(vsi);
8513 
8514 	return 0;
8515 }
8516 
8517 /**
8518  * ice_features_check - Validate encapsulated packet conforms to limits
8519  * @skb: skb buffer
8520  * @netdev: This port's netdev
8521  * @features: Offload features that the stack believes apply
8522  */
8523 static netdev_features_t
8524 ice_features_check(struct sk_buff *skb,
8525 		   struct net_device __always_unused *netdev,
8526 		   netdev_features_t features)
8527 {
8528 	size_t len;
8529 
8530 	/* No point in doing any of this if neither checksum nor GSO are
8531 	 * being requested for this frame. We can rule out both by just
8532 	 * checking for CHECKSUM_PARTIAL
8533 	 */
8534 	if (skb->ip_summed != CHECKSUM_PARTIAL)
8535 		return features;
8536 
8537 	/* We cannot support GSO if the MSS is going to be less than
8538 	 * 64 bytes. If it is then we need to drop support for GSO.
8539 	 */
8540 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
8541 		features &= ~NETIF_F_GSO_MASK;
8542 
8543 	len = skb_network_header(skb) - skb->data;
8544 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
8545 		goto out_rm_features;
8546 
8547 	len = skb_transport_header(skb) - skb_network_header(skb);
8548 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8549 		goto out_rm_features;
8550 
8551 	if (skb->encapsulation) {
8552 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
8553 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
8554 			goto out_rm_features;
8555 
8556 		len = skb_inner_transport_header(skb) -
8557 		      skb_inner_network_header(skb);
8558 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8559 			goto out_rm_features;
8560 	}
8561 
8562 	return features;
8563 out_rm_features:
8564 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8565 }
8566 
8567 static const struct net_device_ops ice_netdev_safe_mode_ops = {
8568 	.ndo_open = ice_open,
8569 	.ndo_stop = ice_stop,
8570 	.ndo_start_xmit = ice_start_xmit,
8571 	.ndo_set_mac_address = ice_set_mac_address,
8572 	.ndo_validate_addr = eth_validate_addr,
8573 	.ndo_change_mtu = ice_change_mtu,
8574 	.ndo_get_stats64 = ice_get_stats64,
8575 	.ndo_tx_timeout = ice_tx_timeout,
8576 	.ndo_bpf = ice_xdp_safe_mode,
8577 };
8578 
8579 static const struct net_device_ops ice_netdev_ops = {
8580 	.ndo_open = ice_open,
8581 	.ndo_stop = ice_stop,
8582 	.ndo_start_xmit = ice_start_xmit,
8583 	.ndo_select_queue = ice_select_queue,
8584 	.ndo_features_check = ice_features_check,
8585 	.ndo_set_rx_mode = ice_set_rx_mode,
8586 	.ndo_set_mac_address = ice_set_mac_address,
8587 	.ndo_validate_addr = eth_validate_addr,
8588 	.ndo_change_mtu = ice_change_mtu,
8589 	.ndo_get_stats64 = ice_get_stats64,
8590 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
8591 	.ndo_eth_ioctl = ice_eth_ioctl,
8592 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
8593 	.ndo_set_vf_mac = ice_set_vf_mac,
8594 	.ndo_get_vf_config = ice_get_vf_cfg,
8595 	.ndo_set_vf_trust = ice_set_vf_trust,
8596 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
8597 	.ndo_set_vf_link_state = ice_set_vf_link_state,
8598 	.ndo_get_vf_stats = ice_get_vf_stats,
8599 	.ndo_set_vf_rate = ice_set_vf_bw,
8600 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
8601 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
8602 	.ndo_setup_tc = ice_setup_tc,
8603 	.ndo_set_features = ice_set_features,
8604 	.ndo_bridge_getlink = ice_bridge_getlink,
8605 	.ndo_bridge_setlink = ice_bridge_setlink,
8606 	.ndo_fdb_add = ice_fdb_add,
8607 	.ndo_fdb_del = ice_fdb_del,
8608 #ifdef CONFIG_RFS_ACCEL
8609 	.ndo_rx_flow_steer = ice_rx_flow_steer,
8610 #endif
8611 	.ndo_tx_timeout = ice_tx_timeout,
8612 	.ndo_bpf = ice_xdp,
8613 	.ndo_xdp_xmit = ice_xdp_xmit,
8614 	.ndo_xsk_wakeup = ice_xsk_wakeup,
8615 };
8616