1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 #include "ice_vsi_vlan_ops.h"
25 #include <net/xdp_sock_drv.h>
26 
27 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
28 static const char ice_driver_string[] = DRV_SUMMARY;
29 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
30 
31 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
32 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
33 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
34 
35 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36 MODULE_DESCRIPTION(DRV_SUMMARY);
37 MODULE_LICENSE("GPL v2");
38 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
39 
40 static int debug = -1;
41 module_param(debug, int, 0644);
42 #ifndef CONFIG_DYNAMIC_DEBUG
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
44 #else
45 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
46 #endif /* !CONFIG_DYNAMIC_DEBUG */
47 
48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49 EXPORT_SYMBOL(ice_xdp_locking_key);
50 
51 /**
52  * ice_hw_to_dev - Get device pointer from the hardware structure
53  * @hw: pointer to the device HW structure
54  *
55  * Used to access the device pointer from compilation units which can't easily
56  * include the definition of struct ice_pf without leading to circular header
57  * dependencies.
58  */
59 struct device *ice_hw_to_dev(struct ice_hw *hw)
60 {
61 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
62 
63 	return &pf->pdev->dev;
64 }
65 
66 static struct workqueue_struct *ice_wq;
67 static const struct net_device_ops ice_netdev_safe_mode_ops;
68 static const struct net_device_ops ice_netdev_ops;
69 
70 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
71 
72 static void ice_vsi_release_all(struct ice_pf *pf);
73 
74 static int ice_rebuild_channels(struct ice_pf *pf);
75 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
76 
77 static int
78 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
79 		     void *cb_priv, enum tc_setup_type type, void *type_data,
80 		     void *data,
81 		     void (*cleanup)(struct flow_block_cb *block_cb));
82 
83 bool netif_is_ice(struct net_device *dev)
84 {
85 	return dev && (dev->netdev_ops == &ice_netdev_ops);
86 }
87 
88 /**
89  * ice_get_tx_pending - returns number of Tx descriptors not processed
90  * @ring: the ring of descriptors
91  */
92 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
93 {
94 	u16 head, tail;
95 
96 	head = ring->next_to_clean;
97 	tail = ring->next_to_use;
98 
99 	if (head != tail)
100 		return (head < tail) ?
101 			tail - head : (tail + ring->count - head);
102 	return 0;
103 }
104 
105 /**
106  * ice_check_for_hang_subtask - check for and recover hung queues
107  * @pf: pointer to PF struct
108  */
109 static void ice_check_for_hang_subtask(struct ice_pf *pf)
110 {
111 	struct ice_vsi *vsi = NULL;
112 	struct ice_hw *hw;
113 	unsigned int i;
114 	int packets;
115 	u32 v;
116 
117 	ice_for_each_vsi(pf, v)
118 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
119 			vsi = pf->vsi[v];
120 			break;
121 		}
122 
123 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
124 		return;
125 
126 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
127 		return;
128 
129 	hw = &vsi->back->hw;
130 
131 	ice_for_each_txq(vsi, i) {
132 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
133 		struct ice_ring_stats *ring_stats;
134 
135 		if (!tx_ring)
136 			continue;
137 		if (ice_ring_ch_enabled(tx_ring))
138 			continue;
139 
140 		ring_stats = tx_ring->ring_stats;
141 		if (!ring_stats)
142 			continue;
143 
144 		if (tx_ring->desc) {
145 			/* If packet counter has not changed the queue is
146 			 * likely stalled, so force an interrupt for this
147 			 * queue.
148 			 *
149 			 * prev_pkt would be negative if there was no
150 			 * pending work.
151 			 */
152 			packets = ring_stats->stats.pkts & INT_MAX;
153 			if (ring_stats->tx_stats.prev_pkt == packets) {
154 				/* Trigger sw interrupt to revive the queue */
155 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
156 				continue;
157 			}
158 
159 			/* Memory barrier between read of packet count and call
160 			 * to ice_get_tx_pending()
161 			 */
162 			smp_rmb();
163 			ring_stats->tx_stats.prev_pkt =
164 			    ice_get_tx_pending(tx_ring) ? packets : -1;
165 		}
166 	}
167 }
168 
169 /**
170  * ice_init_mac_fltr - Set initial MAC filters
171  * @pf: board private structure
172  *
173  * Set initial set of MAC filters for PF VSI; configure filters for permanent
174  * address and broadcast address. If an error is encountered, netdevice will be
175  * unregistered.
176  */
177 static int ice_init_mac_fltr(struct ice_pf *pf)
178 {
179 	struct ice_vsi *vsi;
180 	u8 *perm_addr;
181 
182 	vsi = ice_get_main_vsi(pf);
183 	if (!vsi)
184 		return -EINVAL;
185 
186 	perm_addr = vsi->port_info->mac.perm_addr;
187 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
188 }
189 
190 /**
191  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
192  * @netdev: the net device on which the sync is happening
193  * @addr: MAC address to sync
194  *
195  * This is a callback function which is called by the in kernel device sync
196  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
197  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
198  * MAC filters from the hardware.
199  */
200 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
201 {
202 	struct ice_netdev_priv *np = netdev_priv(netdev);
203 	struct ice_vsi *vsi = np->vsi;
204 
205 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
206 				     ICE_FWD_TO_VSI))
207 		return -EINVAL;
208 
209 	return 0;
210 }
211 
212 /**
213  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
214  * @netdev: the net device on which the unsync is happening
215  * @addr: MAC address to unsync
216  *
217  * This is a callback function which is called by the in kernel device unsync
218  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
219  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
220  * delete the MAC filters from the hardware.
221  */
222 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
223 {
224 	struct ice_netdev_priv *np = netdev_priv(netdev);
225 	struct ice_vsi *vsi = np->vsi;
226 
227 	/* Under some circumstances, we might receive a request to delete our
228 	 * own device address from our uc list. Because we store the device
229 	 * address in the VSI's MAC filter list, we need to ignore such
230 	 * requests and not delete our device address from this list.
231 	 */
232 	if (ether_addr_equal(addr, netdev->dev_addr))
233 		return 0;
234 
235 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
236 				     ICE_FWD_TO_VSI))
237 		return -EINVAL;
238 
239 	return 0;
240 }
241 
242 /**
243  * ice_vsi_fltr_changed - check if filter state changed
244  * @vsi: VSI to be checked
245  *
246  * returns true if filter state has changed, false otherwise.
247  */
248 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
249 {
250 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
251 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
252 }
253 
254 /**
255  * ice_set_promisc - Enable promiscuous mode for a given PF
256  * @vsi: the VSI being configured
257  * @promisc_m: mask of promiscuous config bits
258  *
259  */
260 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
261 {
262 	int status;
263 
264 	if (vsi->type != ICE_VSI_PF)
265 		return 0;
266 
267 	if (ice_vsi_has_non_zero_vlans(vsi)) {
268 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
269 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
270 						       promisc_m);
271 	} else {
272 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
273 						  promisc_m, 0);
274 	}
275 	if (status && status != -EEXIST)
276 		return status;
277 
278 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
279 		   vsi->vsi_num, promisc_m);
280 	return 0;
281 }
282 
283 /**
284  * ice_clear_promisc - Disable promiscuous mode for a given PF
285  * @vsi: the VSI being configured
286  * @promisc_m: mask of promiscuous config bits
287  *
288  */
289 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
290 {
291 	int status;
292 
293 	if (vsi->type != ICE_VSI_PF)
294 		return 0;
295 
296 	if (ice_vsi_has_non_zero_vlans(vsi)) {
297 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
298 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
299 							 promisc_m);
300 	} else {
301 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
302 						    promisc_m, 0);
303 	}
304 
305 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
306 		   vsi->vsi_num, promisc_m);
307 	return status;
308 }
309 
310 /**
311  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
312  * @vsi: ptr to the VSI
313  *
314  * Push any outstanding VSI filter changes through the AdminQ.
315  */
316 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
317 {
318 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
319 	struct device *dev = ice_pf_to_dev(vsi->back);
320 	struct net_device *netdev = vsi->netdev;
321 	bool promisc_forced_on = false;
322 	struct ice_pf *pf = vsi->back;
323 	struct ice_hw *hw = &pf->hw;
324 	u32 changed_flags = 0;
325 	int err;
326 
327 	if (!vsi->netdev)
328 		return -EINVAL;
329 
330 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
331 		usleep_range(1000, 2000);
332 
333 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
334 	vsi->current_netdev_flags = vsi->netdev->flags;
335 
336 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
337 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
338 
339 	if (ice_vsi_fltr_changed(vsi)) {
340 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
341 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
342 
343 		/* grab the netdev's addr_list_lock */
344 		netif_addr_lock_bh(netdev);
345 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
346 			      ice_add_mac_to_unsync_list);
347 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
348 			      ice_add_mac_to_unsync_list);
349 		/* our temp lists are populated. release lock */
350 		netif_addr_unlock_bh(netdev);
351 	}
352 
353 	/* Remove MAC addresses in the unsync list */
354 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
355 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
356 	if (err) {
357 		netdev_err(netdev, "Failed to delete MAC filters\n");
358 		/* if we failed because of alloc failures, just bail */
359 		if (err == -ENOMEM)
360 			goto out;
361 	}
362 
363 	/* Add MAC addresses in the sync list */
364 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
365 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
366 	/* If filter is added successfully or already exists, do not go into
367 	 * 'if' condition and report it as error. Instead continue processing
368 	 * rest of the function.
369 	 */
370 	if (err && err != -EEXIST) {
371 		netdev_err(netdev, "Failed to add MAC filters\n");
372 		/* If there is no more space for new umac filters, VSI
373 		 * should go into promiscuous mode. There should be some
374 		 * space reserved for promiscuous filters.
375 		 */
376 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
377 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
378 				      vsi->state)) {
379 			promisc_forced_on = true;
380 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
381 				    vsi->vsi_num);
382 		} else {
383 			goto out;
384 		}
385 	}
386 	err = 0;
387 	/* check for changes in promiscuous modes */
388 	if (changed_flags & IFF_ALLMULTI) {
389 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
390 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
391 			if (err) {
392 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
393 				goto out_promisc;
394 			}
395 		} else {
396 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
397 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
398 			if (err) {
399 				vsi->current_netdev_flags |= IFF_ALLMULTI;
400 				goto out_promisc;
401 			}
402 		}
403 	}
404 
405 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
406 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
407 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
408 		if (vsi->current_netdev_flags & IFF_PROMISC) {
409 			/* Apply Rx filter rule to get traffic from wire */
410 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
411 				err = ice_set_dflt_vsi(vsi);
412 				if (err && err != -EEXIST) {
413 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
414 						   err, vsi->vsi_num);
415 					vsi->current_netdev_flags &=
416 						~IFF_PROMISC;
417 					goto out_promisc;
418 				}
419 				err = 0;
420 				vlan_ops->dis_rx_filtering(vsi);
421 
422 				/* promiscuous mode implies allmulticast so
423 				 * that VSIs that are in promiscuous mode are
424 				 * subscribed to multicast packets coming to
425 				 * the port
426 				 */
427 				err = ice_set_promisc(vsi,
428 						      ICE_MCAST_PROMISC_BITS);
429 				if (err)
430 					goto out_promisc;
431 			}
432 		} else {
433 			/* Clear Rx filter to remove traffic from wire */
434 			if (ice_is_vsi_dflt_vsi(vsi)) {
435 				err = ice_clear_dflt_vsi(vsi);
436 				if (err) {
437 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
438 						   err, vsi->vsi_num);
439 					vsi->current_netdev_flags |=
440 						IFF_PROMISC;
441 					goto out_promisc;
442 				}
443 				if (vsi->netdev->features &
444 				    NETIF_F_HW_VLAN_CTAG_FILTER)
445 					vlan_ops->ena_rx_filtering(vsi);
446 			}
447 
448 			/* disable allmulti here, but only if allmulti is not
449 			 * still enabled for the netdev
450 			 */
451 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
452 				err = ice_clear_promisc(vsi,
453 							ICE_MCAST_PROMISC_BITS);
454 				if (err) {
455 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
456 						   err, vsi->vsi_num);
457 				}
458 			}
459 		}
460 	}
461 	goto exit;
462 
463 out_promisc:
464 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
465 	goto exit;
466 out:
467 	/* if something went wrong then set the changed flag so we try again */
468 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
469 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
470 exit:
471 	clear_bit(ICE_CFG_BUSY, vsi->state);
472 	return err;
473 }
474 
475 /**
476  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
477  * @pf: board private structure
478  */
479 static void ice_sync_fltr_subtask(struct ice_pf *pf)
480 {
481 	int v;
482 
483 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
484 		return;
485 
486 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
487 
488 	ice_for_each_vsi(pf, v)
489 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
490 		    ice_vsi_sync_fltr(pf->vsi[v])) {
491 			/* come back and try again later */
492 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
493 			break;
494 		}
495 }
496 
497 /**
498  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
499  * @pf: the PF
500  * @locked: is the rtnl_lock already held
501  */
502 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
503 {
504 	int node;
505 	int v;
506 
507 	ice_for_each_vsi(pf, v)
508 		if (pf->vsi[v])
509 			ice_dis_vsi(pf->vsi[v], locked);
510 
511 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
512 		pf->pf_agg_node[node].num_vsis = 0;
513 
514 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
515 		pf->vf_agg_node[node].num_vsis = 0;
516 }
517 
518 /**
519  * ice_clear_sw_switch_recipes - clear switch recipes
520  * @pf: board private structure
521  *
522  * Mark switch recipes as not created in sw structures. There are cases where
523  * rules (especially advanced rules) need to be restored, either re-read from
524  * hardware or added again. For example after the reset. 'recp_created' flag
525  * prevents from doing that and need to be cleared upfront.
526  */
527 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
528 {
529 	struct ice_sw_recipe *recp;
530 	u8 i;
531 
532 	recp = pf->hw.switch_info->recp_list;
533 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
534 		recp[i].recp_created = false;
535 }
536 
537 /**
538  * ice_prepare_for_reset - prep for reset
539  * @pf: board private structure
540  * @reset_type: reset type requested
541  *
542  * Inform or close all dependent features in prep for reset.
543  */
544 static void
545 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
546 {
547 	struct ice_hw *hw = &pf->hw;
548 	struct ice_vsi *vsi;
549 	struct ice_vf *vf;
550 	unsigned int bkt;
551 
552 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
553 
554 	/* already prepared for reset */
555 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
556 		return;
557 
558 	ice_unplug_aux_dev(pf);
559 
560 	/* Notify VFs of impending reset */
561 	if (ice_check_sq_alive(hw, &hw->mailboxq))
562 		ice_vc_notify_reset(pf);
563 
564 	/* Disable VFs until reset is completed */
565 	mutex_lock(&pf->vfs.table_lock);
566 	ice_for_each_vf(pf, bkt, vf)
567 		ice_set_vf_state_dis(vf);
568 	mutex_unlock(&pf->vfs.table_lock);
569 
570 	if (ice_is_eswitch_mode_switchdev(pf)) {
571 		if (reset_type != ICE_RESET_PFR)
572 			ice_clear_sw_switch_recipes(pf);
573 	}
574 
575 	/* release ADQ specific HW and SW resources */
576 	vsi = ice_get_main_vsi(pf);
577 	if (!vsi)
578 		goto skip;
579 
580 	/* to be on safe side, reset orig_rss_size so that normal flow
581 	 * of deciding rss_size can take precedence
582 	 */
583 	vsi->orig_rss_size = 0;
584 
585 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
586 		if (reset_type == ICE_RESET_PFR) {
587 			vsi->old_ena_tc = vsi->all_enatc;
588 			vsi->old_numtc = vsi->all_numtc;
589 		} else {
590 			ice_remove_q_channels(vsi, true);
591 
592 			/* for other reset type, do not support channel rebuild
593 			 * hence reset needed info
594 			 */
595 			vsi->old_ena_tc = 0;
596 			vsi->all_enatc = 0;
597 			vsi->old_numtc = 0;
598 			vsi->all_numtc = 0;
599 			vsi->req_txq = 0;
600 			vsi->req_rxq = 0;
601 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
602 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
603 		}
604 	}
605 skip:
606 
607 	/* clear SW filtering DB */
608 	ice_clear_hw_tbls(hw);
609 	/* disable the VSIs and their queues that are not already DOWN */
610 	ice_pf_dis_all_vsi(pf, false);
611 
612 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
613 		ice_ptp_prepare_for_reset(pf);
614 
615 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
616 		ice_gnss_exit(pf);
617 
618 	if (hw->port_info)
619 		ice_sched_clear_port(hw->port_info);
620 
621 	ice_shutdown_all_ctrlq(hw);
622 
623 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
624 }
625 
626 /**
627  * ice_do_reset - Initiate one of many types of resets
628  * @pf: board private structure
629  * @reset_type: reset type requested before this function was called.
630  */
631 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
632 {
633 	struct device *dev = ice_pf_to_dev(pf);
634 	struct ice_hw *hw = &pf->hw;
635 
636 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
637 
638 	ice_prepare_for_reset(pf, reset_type);
639 
640 	/* trigger the reset */
641 	if (ice_reset(hw, reset_type)) {
642 		dev_err(dev, "reset %d failed\n", reset_type);
643 		set_bit(ICE_RESET_FAILED, pf->state);
644 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
645 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
646 		clear_bit(ICE_PFR_REQ, pf->state);
647 		clear_bit(ICE_CORER_REQ, pf->state);
648 		clear_bit(ICE_GLOBR_REQ, pf->state);
649 		wake_up(&pf->reset_wait_queue);
650 		return;
651 	}
652 
653 	/* PFR is a bit of a special case because it doesn't result in an OICR
654 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
655 	 * associated state bits.
656 	 */
657 	if (reset_type == ICE_RESET_PFR) {
658 		pf->pfr_count++;
659 		ice_rebuild(pf, reset_type);
660 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
661 		clear_bit(ICE_PFR_REQ, pf->state);
662 		wake_up(&pf->reset_wait_queue);
663 		ice_reset_all_vfs(pf);
664 	}
665 }
666 
667 /**
668  * ice_reset_subtask - Set up for resetting the device and driver
669  * @pf: board private structure
670  */
671 static void ice_reset_subtask(struct ice_pf *pf)
672 {
673 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
674 
675 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
676 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
677 	 * of reset is pending and sets bits in pf->state indicating the reset
678 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
679 	 * prepare for pending reset if not already (for PF software-initiated
680 	 * global resets the software should already be prepared for it as
681 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
682 	 * by firmware or software on other PFs, that bit is not set so prepare
683 	 * for the reset now), poll for reset done, rebuild and return.
684 	 */
685 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
686 		/* Perform the largest reset requested */
687 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
688 			reset_type = ICE_RESET_CORER;
689 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
690 			reset_type = ICE_RESET_GLOBR;
691 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
692 			reset_type = ICE_RESET_EMPR;
693 		/* return if no valid reset type requested */
694 		if (reset_type == ICE_RESET_INVAL)
695 			return;
696 		ice_prepare_for_reset(pf, reset_type);
697 
698 		/* make sure we are ready to rebuild */
699 		if (ice_check_reset(&pf->hw)) {
700 			set_bit(ICE_RESET_FAILED, pf->state);
701 		} else {
702 			/* done with reset. start rebuild */
703 			pf->hw.reset_ongoing = false;
704 			ice_rebuild(pf, reset_type);
705 			/* clear bit to resume normal operations, but
706 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
707 			 */
708 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
709 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
710 			clear_bit(ICE_PFR_REQ, pf->state);
711 			clear_bit(ICE_CORER_REQ, pf->state);
712 			clear_bit(ICE_GLOBR_REQ, pf->state);
713 			wake_up(&pf->reset_wait_queue);
714 			ice_reset_all_vfs(pf);
715 		}
716 
717 		return;
718 	}
719 
720 	/* No pending resets to finish processing. Check for new resets */
721 	if (test_bit(ICE_PFR_REQ, pf->state))
722 		reset_type = ICE_RESET_PFR;
723 	if (test_bit(ICE_CORER_REQ, pf->state))
724 		reset_type = ICE_RESET_CORER;
725 	if (test_bit(ICE_GLOBR_REQ, pf->state))
726 		reset_type = ICE_RESET_GLOBR;
727 	/* If no valid reset type requested just return */
728 	if (reset_type == ICE_RESET_INVAL)
729 		return;
730 
731 	/* reset if not already down or busy */
732 	if (!test_bit(ICE_DOWN, pf->state) &&
733 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
734 		ice_do_reset(pf, reset_type);
735 	}
736 }
737 
738 /**
739  * ice_print_topo_conflict - print topology conflict message
740  * @vsi: the VSI whose topology status is being checked
741  */
742 static void ice_print_topo_conflict(struct ice_vsi *vsi)
743 {
744 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
745 	case ICE_AQ_LINK_TOPO_CONFLICT:
746 	case ICE_AQ_LINK_MEDIA_CONFLICT:
747 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
748 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
749 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
750 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
751 		break;
752 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
753 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
754 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
755 		else
756 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
757 		break;
758 	default:
759 		break;
760 	}
761 }
762 
763 /**
764  * ice_print_link_msg - print link up or down message
765  * @vsi: the VSI whose link status is being queried
766  * @isup: boolean for if the link is now up or down
767  */
768 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
769 {
770 	struct ice_aqc_get_phy_caps_data *caps;
771 	const char *an_advertised;
772 	const char *fec_req;
773 	const char *speed;
774 	const char *fec;
775 	const char *fc;
776 	const char *an;
777 	int status;
778 
779 	if (!vsi)
780 		return;
781 
782 	if (vsi->current_isup == isup)
783 		return;
784 
785 	vsi->current_isup = isup;
786 
787 	if (!isup) {
788 		netdev_info(vsi->netdev, "NIC Link is Down\n");
789 		return;
790 	}
791 
792 	switch (vsi->port_info->phy.link_info.link_speed) {
793 	case ICE_AQ_LINK_SPEED_100GB:
794 		speed = "100 G";
795 		break;
796 	case ICE_AQ_LINK_SPEED_50GB:
797 		speed = "50 G";
798 		break;
799 	case ICE_AQ_LINK_SPEED_40GB:
800 		speed = "40 G";
801 		break;
802 	case ICE_AQ_LINK_SPEED_25GB:
803 		speed = "25 G";
804 		break;
805 	case ICE_AQ_LINK_SPEED_20GB:
806 		speed = "20 G";
807 		break;
808 	case ICE_AQ_LINK_SPEED_10GB:
809 		speed = "10 G";
810 		break;
811 	case ICE_AQ_LINK_SPEED_5GB:
812 		speed = "5 G";
813 		break;
814 	case ICE_AQ_LINK_SPEED_2500MB:
815 		speed = "2.5 G";
816 		break;
817 	case ICE_AQ_LINK_SPEED_1000MB:
818 		speed = "1 G";
819 		break;
820 	case ICE_AQ_LINK_SPEED_100MB:
821 		speed = "100 M";
822 		break;
823 	default:
824 		speed = "Unknown ";
825 		break;
826 	}
827 
828 	switch (vsi->port_info->fc.current_mode) {
829 	case ICE_FC_FULL:
830 		fc = "Rx/Tx";
831 		break;
832 	case ICE_FC_TX_PAUSE:
833 		fc = "Tx";
834 		break;
835 	case ICE_FC_RX_PAUSE:
836 		fc = "Rx";
837 		break;
838 	case ICE_FC_NONE:
839 		fc = "None";
840 		break;
841 	default:
842 		fc = "Unknown";
843 		break;
844 	}
845 
846 	/* Get FEC mode based on negotiated link info */
847 	switch (vsi->port_info->phy.link_info.fec_info) {
848 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
849 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
850 		fec = "RS-FEC";
851 		break;
852 	case ICE_AQ_LINK_25G_KR_FEC_EN:
853 		fec = "FC-FEC/BASE-R";
854 		break;
855 	default:
856 		fec = "NONE";
857 		break;
858 	}
859 
860 	/* check if autoneg completed, might be false due to not supported */
861 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
862 		an = "True";
863 	else
864 		an = "False";
865 
866 	/* Get FEC mode requested based on PHY caps last SW configuration */
867 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
868 	if (!caps) {
869 		fec_req = "Unknown";
870 		an_advertised = "Unknown";
871 		goto done;
872 	}
873 
874 	status = ice_aq_get_phy_caps(vsi->port_info, false,
875 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
876 	if (status)
877 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
878 
879 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
880 
881 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
882 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
883 		fec_req = "RS-FEC";
884 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
885 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
886 		fec_req = "FC-FEC/BASE-R";
887 	else
888 		fec_req = "NONE";
889 
890 	kfree(caps);
891 
892 done:
893 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
894 		    speed, fec_req, fec, an_advertised, an, fc);
895 	ice_print_topo_conflict(vsi);
896 }
897 
898 /**
899  * ice_vsi_link_event - update the VSI's netdev
900  * @vsi: the VSI on which the link event occurred
901  * @link_up: whether or not the VSI needs to be set up or down
902  */
903 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
904 {
905 	if (!vsi)
906 		return;
907 
908 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
909 		return;
910 
911 	if (vsi->type == ICE_VSI_PF) {
912 		if (link_up == netif_carrier_ok(vsi->netdev))
913 			return;
914 
915 		if (link_up) {
916 			netif_carrier_on(vsi->netdev);
917 			netif_tx_wake_all_queues(vsi->netdev);
918 		} else {
919 			netif_carrier_off(vsi->netdev);
920 			netif_tx_stop_all_queues(vsi->netdev);
921 		}
922 	}
923 }
924 
925 /**
926  * ice_set_dflt_mib - send a default config MIB to the FW
927  * @pf: private PF struct
928  *
929  * This function sends a default configuration MIB to the FW.
930  *
931  * If this function errors out at any point, the driver is still able to
932  * function.  The main impact is that LFC may not operate as expected.
933  * Therefore an error state in this function should be treated with a DBG
934  * message and continue on with driver rebuild/reenable.
935  */
936 static void ice_set_dflt_mib(struct ice_pf *pf)
937 {
938 	struct device *dev = ice_pf_to_dev(pf);
939 	u8 mib_type, *buf, *lldpmib = NULL;
940 	u16 len, typelen, offset = 0;
941 	struct ice_lldp_org_tlv *tlv;
942 	struct ice_hw *hw = &pf->hw;
943 	u32 ouisubtype;
944 
945 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
946 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
947 	if (!lldpmib) {
948 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
949 			__func__);
950 		return;
951 	}
952 
953 	/* Add ETS CFG TLV */
954 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
955 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
956 		   ICE_IEEE_ETS_TLV_LEN);
957 	tlv->typelen = htons(typelen);
958 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
959 		      ICE_IEEE_SUBTYPE_ETS_CFG);
960 	tlv->ouisubtype = htonl(ouisubtype);
961 
962 	buf = tlv->tlvinfo;
963 	buf[0] = 0;
964 
965 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
966 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
967 	 * Octets 13 - 20 are TSA values - leave as zeros
968 	 */
969 	buf[5] = 0x64;
970 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
971 	offset += len + 2;
972 	tlv = (struct ice_lldp_org_tlv *)
973 		((char *)tlv + sizeof(tlv->typelen) + len);
974 
975 	/* Add ETS REC TLV */
976 	buf = tlv->tlvinfo;
977 	tlv->typelen = htons(typelen);
978 
979 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
980 		      ICE_IEEE_SUBTYPE_ETS_REC);
981 	tlv->ouisubtype = htonl(ouisubtype);
982 
983 	/* First octet of buf is reserved
984 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
985 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
986 	 * Octets 13 - 20 are TSA value - leave as zeros
987 	 */
988 	buf[5] = 0x64;
989 	offset += len + 2;
990 	tlv = (struct ice_lldp_org_tlv *)
991 		((char *)tlv + sizeof(tlv->typelen) + len);
992 
993 	/* Add PFC CFG TLV */
994 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
995 		   ICE_IEEE_PFC_TLV_LEN);
996 	tlv->typelen = htons(typelen);
997 
998 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
999 		      ICE_IEEE_SUBTYPE_PFC_CFG);
1000 	tlv->ouisubtype = htonl(ouisubtype);
1001 
1002 	/* Octet 1 left as all zeros - PFC disabled */
1003 	buf[0] = 0x08;
1004 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1005 	offset += len + 2;
1006 
1007 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1008 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1009 
1010 	kfree(lldpmib);
1011 }
1012 
1013 /**
1014  * ice_check_phy_fw_load - check if PHY FW load failed
1015  * @pf: pointer to PF struct
1016  * @link_cfg_err: bitmap from the link info structure
1017  *
1018  * check if external PHY FW load failed and print an error message if it did
1019  */
1020 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1021 {
1022 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1023 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1024 		return;
1025 	}
1026 
1027 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1028 		return;
1029 
1030 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1031 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1032 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1033 	}
1034 }
1035 
1036 /**
1037  * ice_check_module_power
1038  * @pf: pointer to PF struct
1039  * @link_cfg_err: bitmap from the link info structure
1040  *
1041  * check module power level returned by a previous call to aq_get_link_info
1042  * and print error messages if module power level is not supported
1043  */
1044 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1045 {
1046 	/* if module power level is supported, clear the flag */
1047 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1048 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1049 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1050 		return;
1051 	}
1052 
1053 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1054 	 * above block didn't clear this bit, there's nothing to do
1055 	 */
1056 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1057 		return;
1058 
1059 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1060 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1061 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1062 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1063 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1064 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1065 	}
1066 }
1067 
1068 /**
1069  * ice_check_link_cfg_err - check if link configuration failed
1070  * @pf: pointer to the PF struct
1071  * @link_cfg_err: bitmap from the link info structure
1072  *
1073  * print if any link configuration failure happens due to the value in the
1074  * link_cfg_err parameter in the link info structure
1075  */
1076 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1077 {
1078 	ice_check_module_power(pf, link_cfg_err);
1079 	ice_check_phy_fw_load(pf, link_cfg_err);
1080 }
1081 
1082 /**
1083  * ice_link_event - process the link event
1084  * @pf: PF that the link event is associated with
1085  * @pi: port_info for the port that the link event is associated with
1086  * @link_up: true if the physical link is up and false if it is down
1087  * @link_speed: current link speed received from the link event
1088  *
1089  * Returns 0 on success and negative on failure
1090  */
1091 static int
1092 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1093 	       u16 link_speed)
1094 {
1095 	struct device *dev = ice_pf_to_dev(pf);
1096 	struct ice_phy_info *phy_info;
1097 	struct ice_vsi *vsi;
1098 	u16 old_link_speed;
1099 	bool old_link;
1100 	int status;
1101 
1102 	phy_info = &pi->phy;
1103 	phy_info->link_info_old = phy_info->link_info;
1104 
1105 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1106 	old_link_speed = phy_info->link_info_old.link_speed;
1107 
1108 	/* update the link info structures and re-enable link events,
1109 	 * don't bail on failure due to other book keeping needed
1110 	 */
1111 	status = ice_update_link_info(pi);
1112 	if (status)
1113 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1114 			pi->lport, status,
1115 			ice_aq_str(pi->hw->adminq.sq_last_status));
1116 
1117 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1118 
1119 	/* Check if the link state is up after updating link info, and treat
1120 	 * this event as an UP event since the link is actually UP now.
1121 	 */
1122 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1123 		link_up = true;
1124 
1125 	vsi = ice_get_main_vsi(pf);
1126 	if (!vsi || !vsi->port_info)
1127 		return -EINVAL;
1128 
1129 	/* turn off PHY if media was removed */
1130 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1131 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1132 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1133 		ice_set_link(vsi, false);
1134 	}
1135 
1136 	/* if the old link up/down and speed is the same as the new */
1137 	if (link_up == old_link && link_speed == old_link_speed)
1138 		return 0;
1139 
1140 	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1141 
1142 	if (ice_is_dcb_active(pf)) {
1143 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1144 			ice_dcb_rebuild(pf);
1145 	} else {
1146 		if (link_up)
1147 			ice_set_dflt_mib(pf);
1148 	}
1149 	ice_vsi_link_event(vsi, link_up);
1150 	ice_print_link_msg(vsi, link_up);
1151 
1152 	ice_vc_notify_link_state(pf);
1153 
1154 	return 0;
1155 }
1156 
1157 /**
1158  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1159  * @pf: board private structure
1160  */
1161 static void ice_watchdog_subtask(struct ice_pf *pf)
1162 {
1163 	int i;
1164 
1165 	/* if interface is down do nothing */
1166 	if (test_bit(ICE_DOWN, pf->state) ||
1167 	    test_bit(ICE_CFG_BUSY, pf->state))
1168 		return;
1169 
1170 	/* make sure we don't do these things too often */
1171 	if (time_before(jiffies,
1172 			pf->serv_tmr_prev + pf->serv_tmr_period))
1173 		return;
1174 
1175 	pf->serv_tmr_prev = jiffies;
1176 
1177 	/* Update the stats for active netdevs so the network stack
1178 	 * can look at updated numbers whenever it cares to
1179 	 */
1180 	ice_update_pf_stats(pf);
1181 	ice_for_each_vsi(pf, i)
1182 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1183 			ice_update_vsi_stats(pf->vsi[i]);
1184 }
1185 
1186 /**
1187  * ice_init_link_events - enable/initialize link events
1188  * @pi: pointer to the port_info instance
1189  *
1190  * Returns -EIO on failure, 0 on success
1191  */
1192 static int ice_init_link_events(struct ice_port_info *pi)
1193 {
1194 	u16 mask;
1195 
1196 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1197 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1198 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1199 
1200 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1201 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1202 			pi->lport);
1203 		return -EIO;
1204 	}
1205 
1206 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1207 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1208 			pi->lport);
1209 		return -EIO;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * ice_handle_link_event - handle link event via ARQ
1217  * @pf: PF that the link event is associated with
1218  * @event: event structure containing link status info
1219  */
1220 static int
1221 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1222 {
1223 	struct ice_aqc_get_link_status_data *link_data;
1224 	struct ice_port_info *port_info;
1225 	int status;
1226 
1227 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1228 	port_info = pf->hw.port_info;
1229 	if (!port_info)
1230 		return -EINVAL;
1231 
1232 	status = ice_link_event(pf, port_info,
1233 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1234 				le16_to_cpu(link_data->link_speed));
1235 	if (status)
1236 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1237 			status);
1238 
1239 	return status;
1240 }
1241 
1242 enum ice_aq_task_state {
1243 	ICE_AQ_TASK_WAITING = 0,
1244 	ICE_AQ_TASK_COMPLETE,
1245 	ICE_AQ_TASK_CANCELED,
1246 };
1247 
1248 struct ice_aq_task {
1249 	struct hlist_node entry;
1250 
1251 	u16 opcode;
1252 	struct ice_rq_event_info *event;
1253 	enum ice_aq_task_state state;
1254 };
1255 
1256 /**
1257  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1258  * @pf: pointer to the PF private structure
1259  * @opcode: the opcode to wait for
1260  * @timeout: how long to wait, in jiffies
1261  * @event: storage for the event info
1262  *
1263  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1264  * current thread will be put to sleep until the specified event occurs or
1265  * until the given timeout is reached.
1266  *
1267  * To obtain only the descriptor contents, pass an event without an allocated
1268  * msg_buf. If the complete data buffer is desired, allocate the
1269  * event->msg_buf with enough space ahead of time.
1270  *
1271  * Returns: zero on success, or a negative error code on failure.
1272  */
1273 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1274 			  struct ice_rq_event_info *event)
1275 {
1276 	struct device *dev = ice_pf_to_dev(pf);
1277 	struct ice_aq_task *task;
1278 	unsigned long start;
1279 	long ret;
1280 	int err;
1281 
1282 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1283 	if (!task)
1284 		return -ENOMEM;
1285 
1286 	INIT_HLIST_NODE(&task->entry);
1287 	task->opcode = opcode;
1288 	task->event = event;
1289 	task->state = ICE_AQ_TASK_WAITING;
1290 
1291 	spin_lock_bh(&pf->aq_wait_lock);
1292 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1293 	spin_unlock_bh(&pf->aq_wait_lock);
1294 
1295 	start = jiffies;
1296 
1297 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1298 					       timeout);
1299 	switch (task->state) {
1300 	case ICE_AQ_TASK_WAITING:
1301 		err = ret < 0 ? ret : -ETIMEDOUT;
1302 		break;
1303 	case ICE_AQ_TASK_CANCELED:
1304 		err = ret < 0 ? ret : -ECANCELED;
1305 		break;
1306 	case ICE_AQ_TASK_COMPLETE:
1307 		err = ret < 0 ? ret : 0;
1308 		break;
1309 	default:
1310 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1311 		err = -EINVAL;
1312 		break;
1313 	}
1314 
1315 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1316 		jiffies_to_msecs(jiffies - start),
1317 		jiffies_to_msecs(timeout),
1318 		opcode);
1319 
1320 	spin_lock_bh(&pf->aq_wait_lock);
1321 	hlist_del(&task->entry);
1322 	spin_unlock_bh(&pf->aq_wait_lock);
1323 	kfree(task);
1324 
1325 	return err;
1326 }
1327 
1328 /**
1329  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1330  * @pf: pointer to the PF private structure
1331  * @opcode: the opcode of the event
1332  * @event: the event to check
1333  *
1334  * Loops over the current list of pending threads waiting for an AdminQ event.
1335  * For each matching task, copy the contents of the event into the task
1336  * structure and wake up the thread.
1337  *
1338  * If multiple threads wait for the same opcode, they will all be woken up.
1339  *
1340  * Note that event->msg_buf will only be duplicated if the event has a buffer
1341  * with enough space already allocated. Otherwise, only the descriptor and
1342  * message length will be copied.
1343  *
1344  * Returns: true if an event was found, false otherwise
1345  */
1346 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1347 				struct ice_rq_event_info *event)
1348 {
1349 	struct ice_aq_task *task;
1350 	bool found = false;
1351 
1352 	spin_lock_bh(&pf->aq_wait_lock);
1353 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1354 		if (task->state || task->opcode != opcode)
1355 			continue;
1356 
1357 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1358 		task->event->msg_len = event->msg_len;
1359 
1360 		/* Only copy the data buffer if a destination was set */
1361 		if (task->event->msg_buf &&
1362 		    task->event->buf_len > event->buf_len) {
1363 			memcpy(task->event->msg_buf, event->msg_buf,
1364 			       event->buf_len);
1365 			task->event->buf_len = event->buf_len;
1366 		}
1367 
1368 		task->state = ICE_AQ_TASK_COMPLETE;
1369 		found = true;
1370 	}
1371 	spin_unlock_bh(&pf->aq_wait_lock);
1372 
1373 	if (found)
1374 		wake_up(&pf->aq_wait_queue);
1375 }
1376 
1377 /**
1378  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1379  * @pf: the PF private structure
1380  *
1381  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1382  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1383  */
1384 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1385 {
1386 	struct ice_aq_task *task;
1387 
1388 	spin_lock_bh(&pf->aq_wait_lock);
1389 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1390 		task->state = ICE_AQ_TASK_CANCELED;
1391 	spin_unlock_bh(&pf->aq_wait_lock);
1392 
1393 	wake_up(&pf->aq_wait_queue);
1394 }
1395 
1396 #define ICE_MBX_OVERFLOW_WATERMARK 64
1397 
1398 /**
1399  * __ice_clean_ctrlq - helper function to clean controlq rings
1400  * @pf: ptr to struct ice_pf
1401  * @q_type: specific Control queue type
1402  */
1403 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1404 {
1405 	struct device *dev = ice_pf_to_dev(pf);
1406 	struct ice_rq_event_info event;
1407 	struct ice_hw *hw = &pf->hw;
1408 	struct ice_ctl_q_info *cq;
1409 	u16 pending, i = 0;
1410 	const char *qtype;
1411 	u32 oldval, val;
1412 
1413 	/* Do not clean control queue if/when PF reset fails */
1414 	if (test_bit(ICE_RESET_FAILED, pf->state))
1415 		return 0;
1416 
1417 	switch (q_type) {
1418 	case ICE_CTL_Q_ADMIN:
1419 		cq = &hw->adminq;
1420 		qtype = "Admin";
1421 		break;
1422 	case ICE_CTL_Q_SB:
1423 		cq = &hw->sbq;
1424 		qtype = "Sideband";
1425 		break;
1426 	case ICE_CTL_Q_MAILBOX:
1427 		cq = &hw->mailboxq;
1428 		qtype = "Mailbox";
1429 		/* we are going to try to detect a malicious VF, so set the
1430 		 * state to begin detection
1431 		 */
1432 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1433 		break;
1434 	default:
1435 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1436 		return 0;
1437 	}
1438 
1439 	/* check for error indications - PF_xx_AxQLEN register layout for
1440 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1441 	 */
1442 	val = rd32(hw, cq->rq.len);
1443 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1444 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1445 		oldval = val;
1446 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1447 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1448 				qtype);
1449 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1450 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1451 				qtype);
1452 		}
1453 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1454 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1455 				qtype);
1456 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1457 			 PF_FW_ARQLEN_ARQCRIT_M);
1458 		if (oldval != val)
1459 			wr32(hw, cq->rq.len, val);
1460 	}
1461 
1462 	val = rd32(hw, cq->sq.len);
1463 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1464 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1465 		oldval = val;
1466 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1467 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1468 				qtype);
1469 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1470 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1471 				qtype);
1472 		}
1473 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1474 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1475 				qtype);
1476 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1477 			 PF_FW_ATQLEN_ATQCRIT_M);
1478 		if (oldval != val)
1479 			wr32(hw, cq->sq.len, val);
1480 	}
1481 
1482 	event.buf_len = cq->rq_buf_size;
1483 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1484 	if (!event.msg_buf)
1485 		return 0;
1486 
1487 	do {
1488 		struct ice_mbx_data data = {};
1489 		u16 opcode;
1490 		int ret;
1491 
1492 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1493 		if (ret == -EALREADY)
1494 			break;
1495 		if (ret) {
1496 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1497 				ret);
1498 			break;
1499 		}
1500 
1501 		opcode = le16_to_cpu(event.desc.opcode);
1502 
1503 		/* Notify any thread that might be waiting for this event */
1504 		ice_aq_check_events(pf, opcode, &event);
1505 
1506 		switch (opcode) {
1507 		case ice_aqc_opc_get_link_status:
1508 			if (ice_handle_link_event(pf, &event))
1509 				dev_err(dev, "Could not handle link event\n");
1510 			break;
1511 		case ice_aqc_opc_event_lan_overflow:
1512 			ice_vf_lan_overflow_event(pf, &event);
1513 			break;
1514 		case ice_mbx_opc_send_msg_to_pf:
1515 			data.num_msg_proc = i;
1516 			data.num_pending_arq = pending;
1517 			data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1518 			data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1519 
1520 			ice_vc_process_vf_msg(pf, &event, &data);
1521 			break;
1522 		case ice_aqc_opc_fw_logging:
1523 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1524 			break;
1525 		case ice_aqc_opc_lldp_set_mib_change:
1526 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1527 			break;
1528 		default:
1529 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1530 				qtype, opcode);
1531 			break;
1532 		}
1533 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1534 
1535 	kfree(event.msg_buf);
1536 
1537 	return pending && (i == ICE_DFLT_IRQ_WORK);
1538 }
1539 
1540 /**
1541  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1542  * @hw: pointer to hardware info
1543  * @cq: control queue information
1544  *
1545  * returns true if there are pending messages in a queue, false if there aren't
1546  */
1547 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1548 {
1549 	u16 ntu;
1550 
1551 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1552 	return cq->rq.next_to_clean != ntu;
1553 }
1554 
1555 /**
1556  * ice_clean_adminq_subtask - clean the AdminQ rings
1557  * @pf: board private structure
1558  */
1559 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1560 {
1561 	struct ice_hw *hw = &pf->hw;
1562 
1563 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1564 		return;
1565 
1566 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1567 		return;
1568 
1569 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1570 
1571 	/* There might be a situation where new messages arrive to a control
1572 	 * queue between processing the last message and clearing the
1573 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1574 	 * ice_ctrlq_pending) and process new messages if any.
1575 	 */
1576 	if (ice_ctrlq_pending(hw, &hw->adminq))
1577 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1578 
1579 	ice_flush(hw);
1580 }
1581 
1582 /**
1583  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1584  * @pf: board private structure
1585  */
1586 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1587 {
1588 	struct ice_hw *hw = &pf->hw;
1589 
1590 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1591 		return;
1592 
1593 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1594 		return;
1595 
1596 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1597 
1598 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1599 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1600 
1601 	ice_flush(hw);
1602 }
1603 
1604 /**
1605  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1606  * @pf: board private structure
1607  */
1608 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1609 {
1610 	struct ice_hw *hw = &pf->hw;
1611 
1612 	/* Nothing to do here if sideband queue is not supported */
1613 	if (!ice_is_sbq_supported(hw)) {
1614 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1615 		return;
1616 	}
1617 
1618 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1619 		return;
1620 
1621 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1622 		return;
1623 
1624 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1625 
1626 	if (ice_ctrlq_pending(hw, &hw->sbq))
1627 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1628 
1629 	ice_flush(hw);
1630 }
1631 
1632 /**
1633  * ice_service_task_schedule - schedule the service task to wake up
1634  * @pf: board private structure
1635  *
1636  * If not already scheduled, this puts the task into the work queue.
1637  */
1638 void ice_service_task_schedule(struct ice_pf *pf)
1639 {
1640 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1641 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1642 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1643 		queue_work(ice_wq, &pf->serv_task);
1644 }
1645 
1646 /**
1647  * ice_service_task_complete - finish up the service task
1648  * @pf: board private structure
1649  */
1650 static void ice_service_task_complete(struct ice_pf *pf)
1651 {
1652 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1653 
1654 	/* force memory (pf->state) to sync before next service task */
1655 	smp_mb__before_atomic();
1656 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1657 }
1658 
1659 /**
1660  * ice_service_task_stop - stop service task and cancel works
1661  * @pf: board private structure
1662  *
1663  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1664  * 1 otherwise.
1665  */
1666 static int ice_service_task_stop(struct ice_pf *pf)
1667 {
1668 	int ret;
1669 
1670 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1671 
1672 	if (pf->serv_tmr.function)
1673 		del_timer_sync(&pf->serv_tmr);
1674 	if (pf->serv_task.func)
1675 		cancel_work_sync(&pf->serv_task);
1676 
1677 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1678 	return ret;
1679 }
1680 
1681 /**
1682  * ice_service_task_restart - restart service task and schedule works
1683  * @pf: board private structure
1684  *
1685  * This function is needed for suspend and resume works (e.g WoL scenario)
1686  */
1687 static void ice_service_task_restart(struct ice_pf *pf)
1688 {
1689 	clear_bit(ICE_SERVICE_DIS, pf->state);
1690 	ice_service_task_schedule(pf);
1691 }
1692 
1693 /**
1694  * ice_service_timer - timer callback to schedule service task
1695  * @t: pointer to timer_list
1696  */
1697 static void ice_service_timer(struct timer_list *t)
1698 {
1699 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1700 
1701 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1702 	ice_service_task_schedule(pf);
1703 }
1704 
1705 /**
1706  * ice_handle_mdd_event - handle malicious driver detect event
1707  * @pf: pointer to the PF structure
1708  *
1709  * Called from service task. OICR interrupt handler indicates MDD event.
1710  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1711  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1712  * disable the queue, the PF can be configured to reset the VF using ethtool
1713  * private flag mdd-auto-reset-vf.
1714  */
1715 static void ice_handle_mdd_event(struct ice_pf *pf)
1716 {
1717 	struct device *dev = ice_pf_to_dev(pf);
1718 	struct ice_hw *hw = &pf->hw;
1719 	struct ice_vf *vf;
1720 	unsigned int bkt;
1721 	u32 reg;
1722 
1723 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1724 		/* Since the VF MDD event logging is rate limited, check if
1725 		 * there are pending MDD events.
1726 		 */
1727 		ice_print_vfs_mdd_events(pf);
1728 		return;
1729 	}
1730 
1731 	/* find what triggered an MDD event */
1732 	reg = rd32(hw, GL_MDET_TX_PQM);
1733 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1734 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1735 				GL_MDET_TX_PQM_PF_NUM_S;
1736 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1737 				GL_MDET_TX_PQM_VF_NUM_S;
1738 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1739 				GL_MDET_TX_PQM_MAL_TYPE_S;
1740 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1741 				GL_MDET_TX_PQM_QNUM_S);
1742 
1743 		if (netif_msg_tx_err(pf))
1744 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1745 				 event, queue, pf_num, vf_num);
1746 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1747 	}
1748 
1749 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1750 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1751 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1752 				GL_MDET_TX_TCLAN_PF_NUM_S;
1753 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1754 				GL_MDET_TX_TCLAN_VF_NUM_S;
1755 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1756 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1757 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1758 				GL_MDET_TX_TCLAN_QNUM_S);
1759 
1760 		if (netif_msg_tx_err(pf))
1761 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1762 				 event, queue, pf_num, vf_num);
1763 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1764 	}
1765 
1766 	reg = rd32(hw, GL_MDET_RX);
1767 	if (reg & GL_MDET_RX_VALID_M) {
1768 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1769 				GL_MDET_RX_PF_NUM_S;
1770 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1771 				GL_MDET_RX_VF_NUM_S;
1772 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1773 				GL_MDET_RX_MAL_TYPE_S;
1774 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1775 				GL_MDET_RX_QNUM_S);
1776 
1777 		if (netif_msg_rx_err(pf))
1778 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1779 				 event, queue, pf_num, vf_num);
1780 		wr32(hw, GL_MDET_RX, 0xffffffff);
1781 	}
1782 
1783 	/* check to see if this PF caused an MDD event */
1784 	reg = rd32(hw, PF_MDET_TX_PQM);
1785 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1786 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1787 		if (netif_msg_tx_err(pf))
1788 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1789 	}
1790 
1791 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1792 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1793 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1794 		if (netif_msg_tx_err(pf))
1795 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1796 	}
1797 
1798 	reg = rd32(hw, PF_MDET_RX);
1799 	if (reg & PF_MDET_RX_VALID_M) {
1800 		wr32(hw, PF_MDET_RX, 0xFFFF);
1801 		if (netif_msg_rx_err(pf))
1802 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1803 	}
1804 
1805 	/* Check to see if one of the VFs caused an MDD event, and then
1806 	 * increment counters and set print pending
1807 	 */
1808 	mutex_lock(&pf->vfs.table_lock);
1809 	ice_for_each_vf(pf, bkt, vf) {
1810 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1811 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1812 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1813 			vf->mdd_tx_events.count++;
1814 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1815 			if (netif_msg_tx_err(pf))
1816 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1817 					 vf->vf_id);
1818 		}
1819 
1820 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1821 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1822 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1823 			vf->mdd_tx_events.count++;
1824 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1825 			if (netif_msg_tx_err(pf))
1826 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1827 					 vf->vf_id);
1828 		}
1829 
1830 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1831 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1832 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1833 			vf->mdd_tx_events.count++;
1834 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1835 			if (netif_msg_tx_err(pf))
1836 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1837 					 vf->vf_id);
1838 		}
1839 
1840 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1841 		if (reg & VP_MDET_RX_VALID_M) {
1842 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1843 			vf->mdd_rx_events.count++;
1844 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1845 			if (netif_msg_rx_err(pf))
1846 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1847 					 vf->vf_id);
1848 
1849 			/* Since the queue is disabled on VF Rx MDD events, the
1850 			 * PF can be configured to reset the VF through ethtool
1851 			 * private flag mdd-auto-reset-vf.
1852 			 */
1853 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1854 				/* VF MDD event counters will be cleared by
1855 				 * reset, so print the event prior to reset.
1856 				 */
1857 				ice_print_vf_rx_mdd_event(vf);
1858 				ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1859 			}
1860 		}
1861 	}
1862 	mutex_unlock(&pf->vfs.table_lock);
1863 
1864 	ice_print_vfs_mdd_events(pf);
1865 }
1866 
1867 /**
1868  * ice_force_phys_link_state - Force the physical link state
1869  * @vsi: VSI to force the physical link state to up/down
1870  * @link_up: true/false indicates to set the physical link to up/down
1871  *
1872  * Force the physical link state by getting the current PHY capabilities from
1873  * hardware and setting the PHY config based on the determined capabilities. If
1874  * link changes a link event will be triggered because both the Enable Automatic
1875  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1876  *
1877  * Returns 0 on success, negative on failure
1878  */
1879 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1880 {
1881 	struct ice_aqc_get_phy_caps_data *pcaps;
1882 	struct ice_aqc_set_phy_cfg_data *cfg;
1883 	struct ice_port_info *pi;
1884 	struct device *dev;
1885 	int retcode;
1886 
1887 	if (!vsi || !vsi->port_info || !vsi->back)
1888 		return -EINVAL;
1889 	if (vsi->type != ICE_VSI_PF)
1890 		return 0;
1891 
1892 	dev = ice_pf_to_dev(vsi->back);
1893 
1894 	pi = vsi->port_info;
1895 
1896 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1897 	if (!pcaps)
1898 		return -ENOMEM;
1899 
1900 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1901 				      NULL);
1902 	if (retcode) {
1903 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1904 			vsi->vsi_num, retcode);
1905 		retcode = -EIO;
1906 		goto out;
1907 	}
1908 
1909 	/* No change in link */
1910 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1911 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1912 		goto out;
1913 
1914 	/* Use the current user PHY configuration. The current user PHY
1915 	 * configuration is initialized during probe from PHY capabilities
1916 	 * software mode, and updated on set PHY configuration.
1917 	 */
1918 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1919 	if (!cfg) {
1920 		retcode = -ENOMEM;
1921 		goto out;
1922 	}
1923 
1924 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1925 	if (link_up)
1926 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1927 	else
1928 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1929 
1930 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1931 	if (retcode) {
1932 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1933 			vsi->vsi_num, retcode);
1934 		retcode = -EIO;
1935 	}
1936 
1937 	kfree(cfg);
1938 out:
1939 	kfree(pcaps);
1940 	return retcode;
1941 }
1942 
1943 /**
1944  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1945  * @pi: port info structure
1946  *
1947  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1948  */
1949 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1950 {
1951 	struct ice_aqc_get_phy_caps_data *pcaps;
1952 	struct ice_pf *pf = pi->hw->back;
1953 	int err;
1954 
1955 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1956 	if (!pcaps)
1957 		return -ENOMEM;
1958 
1959 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1960 				  pcaps, NULL);
1961 
1962 	if (err) {
1963 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1964 		goto out;
1965 	}
1966 
1967 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1968 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1969 
1970 out:
1971 	kfree(pcaps);
1972 	return err;
1973 }
1974 
1975 /**
1976  * ice_init_link_dflt_override - Initialize link default override
1977  * @pi: port info structure
1978  *
1979  * Initialize link default override and PHY total port shutdown during probe
1980  */
1981 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1982 {
1983 	struct ice_link_default_override_tlv *ldo;
1984 	struct ice_pf *pf = pi->hw->back;
1985 
1986 	ldo = &pf->link_dflt_override;
1987 	if (ice_get_link_default_override(ldo, pi))
1988 		return;
1989 
1990 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1991 		return;
1992 
1993 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1994 	 * ethtool private flag) for ports with Port Disable bit set.
1995 	 */
1996 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1997 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1998 }
1999 
2000 /**
2001  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2002  * @pi: port info structure
2003  *
2004  * If default override is enabled, initialize the user PHY cfg speed and FEC
2005  * settings using the default override mask from the NVM.
2006  *
2007  * The PHY should only be configured with the default override settings the
2008  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2009  * is used to indicate that the user PHY cfg default override is initialized
2010  * and the PHY has not been configured with the default override settings. The
2011  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2012  * configured.
2013  *
2014  * This function should be called only if the FW doesn't support default
2015  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2016  */
2017 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2018 {
2019 	struct ice_link_default_override_tlv *ldo;
2020 	struct ice_aqc_set_phy_cfg_data *cfg;
2021 	struct ice_phy_info *phy = &pi->phy;
2022 	struct ice_pf *pf = pi->hw->back;
2023 
2024 	ldo = &pf->link_dflt_override;
2025 
2026 	/* If link default override is enabled, use to mask NVM PHY capabilities
2027 	 * for speed and FEC default configuration.
2028 	 */
2029 	cfg = &phy->curr_user_phy_cfg;
2030 
2031 	if (ldo->phy_type_low || ldo->phy_type_high) {
2032 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2033 				    cpu_to_le64(ldo->phy_type_low);
2034 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2035 				     cpu_to_le64(ldo->phy_type_high);
2036 	}
2037 	cfg->link_fec_opt = ldo->fec_options;
2038 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2039 
2040 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2041 }
2042 
2043 /**
2044  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2045  * @pi: port info structure
2046  *
2047  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2048  * mode to default. The PHY defaults are from get PHY capabilities topology
2049  * with media so call when media is first available. An error is returned if
2050  * called when media is not available. The PHY initialization completed state is
2051  * set here.
2052  *
2053  * These configurations are used when setting PHY
2054  * configuration. The user PHY configuration is updated on set PHY
2055  * configuration. Returns 0 on success, negative on failure
2056  */
2057 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2058 {
2059 	struct ice_aqc_get_phy_caps_data *pcaps;
2060 	struct ice_phy_info *phy = &pi->phy;
2061 	struct ice_pf *pf = pi->hw->back;
2062 	int err;
2063 
2064 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2065 		return -EIO;
2066 
2067 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2068 	if (!pcaps)
2069 		return -ENOMEM;
2070 
2071 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2072 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2073 					  pcaps, NULL);
2074 	else
2075 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2076 					  pcaps, NULL);
2077 	if (err) {
2078 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2079 		goto err_out;
2080 	}
2081 
2082 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2083 
2084 	/* check if lenient mode is supported and enabled */
2085 	if (ice_fw_supports_link_override(pi->hw) &&
2086 	    !(pcaps->module_compliance_enforcement &
2087 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2088 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2089 
2090 		/* if the FW supports default PHY configuration mode, then the driver
2091 		 * does not have to apply link override settings. If not,
2092 		 * initialize user PHY configuration with link override values
2093 		 */
2094 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2095 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2096 			ice_init_phy_cfg_dflt_override(pi);
2097 			goto out;
2098 		}
2099 	}
2100 
2101 	/* if link default override is not enabled, set user flow control and
2102 	 * FEC settings based on what get_phy_caps returned
2103 	 */
2104 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2105 						      pcaps->link_fec_options);
2106 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2107 
2108 out:
2109 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2110 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2111 err_out:
2112 	kfree(pcaps);
2113 	return err;
2114 }
2115 
2116 /**
2117  * ice_configure_phy - configure PHY
2118  * @vsi: VSI of PHY
2119  *
2120  * Set the PHY configuration. If the current PHY configuration is the same as
2121  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2122  * configure the based get PHY capabilities for topology with media.
2123  */
2124 static int ice_configure_phy(struct ice_vsi *vsi)
2125 {
2126 	struct device *dev = ice_pf_to_dev(vsi->back);
2127 	struct ice_port_info *pi = vsi->port_info;
2128 	struct ice_aqc_get_phy_caps_data *pcaps;
2129 	struct ice_aqc_set_phy_cfg_data *cfg;
2130 	struct ice_phy_info *phy = &pi->phy;
2131 	struct ice_pf *pf = vsi->back;
2132 	int err;
2133 
2134 	/* Ensure we have media as we cannot configure a medialess port */
2135 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2136 		return -EPERM;
2137 
2138 	ice_print_topo_conflict(vsi);
2139 
2140 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2141 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2142 		return -EPERM;
2143 
2144 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2145 		return ice_force_phys_link_state(vsi, true);
2146 
2147 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2148 	if (!pcaps)
2149 		return -ENOMEM;
2150 
2151 	/* Get current PHY config */
2152 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2153 				  NULL);
2154 	if (err) {
2155 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2156 			vsi->vsi_num, err);
2157 		goto done;
2158 	}
2159 
2160 	/* If PHY enable link is configured and configuration has not changed,
2161 	 * there's nothing to do
2162 	 */
2163 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2164 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2165 		goto done;
2166 
2167 	/* Use PHY topology as baseline for configuration */
2168 	memset(pcaps, 0, sizeof(*pcaps));
2169 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2170 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2171 					  pcaps, NULL);
2172 	else
2173 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2174 					  pcaps, NULL);
2175 	if (err) {
2176 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2177 			vsi->vsi_num, err);
2178 		goto done;
2179 	}
2180 
2181 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2182 	if (!cfg) {
2183 		err = -ENOMEM;
2184 		goto done;
2185 	}
2186 
2187 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2188 
2189 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2190 	 * ice_init_phy_user_cfg_ldo.
2191 	 */
2192 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2193 			       vsi->back->state)) {
2194 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2195 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2196 	} else {
2197 		u64 phy_low = 0, phy_high = 0;
2198 
2199 		ice_update_phy_type(&phy_low, &phy_high,
2200 				    pi->phy.curr_user_speed_req);
2201 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2202 		cfg->phy_type_high = pcaps->phy_type_high &
2203 				     cpu_to_le64(phy_high);
2204 	}
2205 
2206 	/* Can't provide what was requested; use PHY capabilities */
2207 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2208 		cfg->phy_type_low = pcaps->phy_type_low;
2209 		cfg->phy_type_high = pcaps->phy_type_high;
2210 	}
2211 
2212 	/* FEC */
2213 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2214 
2215 	/* Can't provide what was requested; use PHY capabilities */
2216 	if (cfg->link_fec_opt !=
2217 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2218 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2219 		cfg->link_fec_opt = pcaps->link_fec_options;
2220 	}
2221 
2222 	/* Flow Control - always supported; no need to check against
2223 	 * capabilities
2224 	 */
2225 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2226 
2227 	/* Enable link and link update */
2228 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2229 
2230 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2231 	if (err)
2232 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2233 			vsi->vsi_num, err);
2234 
2235 	kfree(cfg);
2236 done:
2237 	kfree(pcaps);
2238 	return err;
2239 }
2240 
2241 /**
2242  * ice_check_media_subtask - Check for media
2243  * @pf: pointer to PF struct
2244  *
2245  * If media is available, then initialize PHY user configuration if it is not
2246  * been, and configure the PHY if the interface is up.
2247  */
2248 static void ice_check_media_subtask(struct ice_pf *pf)
2249 {
2250 	struct ice_port_info *pi;
2251 	struct ice_vsi *vsi;
2252 	int err;
2253 
2254 	/* No need to check for media if it's already present */
2255 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2256 		return;
2257 
2258 	vsi = ice_get_main_vsi(pf);
2259 	if (!vsi)
2260 		return;
2261 
2262 	/* Refresh link info and check if media is present */
2263 	pi = vsi->port_info;
2264 	err = ice_update_link_info(pi);
2265 	if (err)
2266 		return;
2267 
2268 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2269 
2270 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2271 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2272 			ice_init_phy_user_cfg(pi);
2273 
2274 		/* PHY settings are reset on media insertion, reconfigure
2275 		 * PHY to preserve settings.
2276 		 */
2277 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2278 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2279 			return;
2280 
2281 		err = ice_configure_phy(vsi);
2282 		if (!err)
2283 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2284 
2285 		/* A Link Status Event will be generated; the event handler
2286 		 * will complete bringing the interface up
2287 		 */
2288 	}
2289 }
2290 
2291 /**
2292  * ice_service_task - manage and run subtasks
2293  * @work: pointer to work_struct contained by the PF struct
2294  */
2295 static void ice_service_task(struct work_struct *work)
2296 {
2297 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2298 	unsigned long start_time = jiffies;
2299 
2300 	/* subtasks */
2301 
2302 	/* process reset requests first */
2303 	ice_reset_subtask(pf);
2304 
2305 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2306 	if (ice_is_reset_in_progress(pf->state) ||
2307 	    test_bit(ICE_SUSPENDED, pf->state) ||
2308 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2309 		ice_service_task_complete(pf);
2310 		return;
2311 	}
2312 
2313 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2314 		struct iidc_event *event;
2315 
2316 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2317 		if (event) {
2318 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2319 			/* report the entire OICR value to AUX driver */
2320 			swap(event->reg, pf->oicr_err_reg);
2321 			ice_send_event_to_aux(pf, event);
2322 			kfree(event);
2323 		}
2324 	}
2325 
2326 	/* unplug aux dev per request, if an unplug request came in
2327 	 * while processing a plug request, this will handle it
2328 	 */
2329 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2330 		ice_unplug_aux_dev(pf);
2331 
2332 	/* Plug aux device per request */
2333 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2334 		ice_plug_aux_dev(pf);
2335 
2336 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2337 		struct iidc_event *event;
2338 
2339 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2340 		if (event) {
2341 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2342 			ice_send_event_to_aux(pf, event);
2343 			kfree(event);
2344 		}
2345 	}
2346 
2347 	ice_clean_adminq_subtask(pf);
2348 	ice_check_media_subtask(pf);
2349 	ice_check_for_hang_subtask(pf);
2350 	ice_sync_fltr_subtask(pf);
2351 	ice_handle_mdd_event(pf);
2352 	ice_watchdog_subtask(pf);
2353 
2354 	if (ice_is_safe_mode(pf)) {
2355 		ice_service_task_complete(pf);
2356 		return;
2357 	}
2358 
2359 	ice_process_vflr_event(pf);
2360 	ice_clean_mailboxq_subtask(pf);
2361 	ice_clean_sbq_subtask(pf);
2362 	ice_sync_arfs_fltrs(pf);
2363 	ice_flush_fdir_ctx(pf);
2364 
2365 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2366 	ice_service_task_complete(pf);
2367 
2368 	/* If the tasks have taken longer than one service timer period
2369 	 * or there is more work to be done, reset the service timer to
2370 	 * schedule the service task now.
2371 	 */
2372 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2373 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2374 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2375 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2376 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2377 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2378 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2379 		mod_timer(&pf->serv_tmr, jiffies);
2380 }
2381 
2382 /**
2383  * ice_set_ctrlq_len - helper function to set controlq length
2384  * @hw: pointer to the HW instance
2385  */
2386 static void ice_set_ctrlq_len(struct ice_hw *hw)
2387 {
2388 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2389 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2390 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2391 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2392 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2393 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2394 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2395 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2396 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2397 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2398 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2399 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2400 }
2401 
2402 /**
2403  * ice_schedule_reset - schedule a reset
2404  * @pf: board private structure
2405  * @reset: reset being requested
2406  */
2407 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2408 {
2409 	struct device *dev = ice_pf_to_dev(pf);
2410 
2411 	/* bail out if earlier reset has failed */
2412 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2413 		dev_dbg(dev, "earlier reset has failed\n");
2414 		return -EIO;
2415 	}
2416 	/* bail if reset/recovery already in progress */
2417 	if (ice_is_reset_in_progress(pf->state)) {
2418 		dev_dbg(dev, "Reset already in progress\n");
2419 		return -EBUSY;
2420 	}
2421 
2422 	switch (reset) {
2423 	case ICE_RESET_PFR:
2424 		set_bit(ICE_PFR_REQ, pf->state);
2425 		break;
2426 	case ICE_RESET_CORER:
2427 		set_bit(ICE_CORER_REQ, pf->state);
2428 		break;
2429 	case ICE_RESET_GLOBR:
2430 		set_bit(ICE_GLOBR_REQ, pf->state);
2431 		break;
2432 	default:
2433 		return -EINVAL;
2434 	}
2435 
2436 	ice_service_task_schedule(pf);
2437 	return 0;
2438 }
2439 
2440 /**
2441  * ice_irq_affinity_notify - Callback for affinity changes
2442  * @notify: context as to what irq was changed
2443  * @mask: the new affinity mask
2444  *
2445  * This is a callback function used by the irq_set_affinity_notifier function
2446  * so that we may register to receive changes to the irq affinity masks.
2447  */
2448 static void
2449 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2450 			const cpumask_t *mask)
2451 {
2452 	struct ice_q_vector *q_vector =
2453 		container_of(notify, struct ice_q_vector, affinity_notify);
2454 
2455 	cpumask_copy(&q_vector->affinity_mask, mask);
2456 }
2457 
2458 /**
2459  * ice_irq_affinity_release - Callback for affinity notifier release
2460  * @ref: internal core kernel usage
2461  *
2462  * This is a callback function used by the irq_set_affinity_notifier function
2463  * to inform the current notification subscriber that they will no longer
2464  * receive notifications.
2465  */
2466 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2467 
2468 /**
2469  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2470  * @vsi: the VSI being configured
2471  */
2472 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2473 {
2474 	struct ice_hw *hw = &vsi->back->hw;
2475 	int i;
2476 
2477 	ice_for_each_q_vector(vsi, i)
2478 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2479 
2480 	ice_flush(hw);
2481 	return 0;
2482 }
2483 
2484 /**
2485  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2486  * @vsi: the VSI being configured
2487  * @basename: name for the vector
2488  */
2489 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2490 {
2491 	int q_vectors = vsi->num_q_vectors;
2492 	struct ice_pf *pf = vsi->back;
2493 	struct device *dev;
2494 	int rx_int_idx = 0;
2495 	int tx_int_idx = 0;
2496 	int vector, err;
2497 	int irq_num;
2498 
2499 	dev = ice_pf_to_dev(pf);
2500 	for (vector = 0; vector < q_vectors; vector++) {
2501 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2502 
2503 		irq_num = q_vector->irq.virq;
2504 
2505 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2506 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2507 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2508 			tx_int_idx++;
2509 		} else if (q_vector->rx.rx_ring) {
2510 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2511 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2512 		} else if (q_vector->tx.tx_ring) {
2513 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2514 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2515 		} else {
2516 			/* skip this unused q_vector */
2517 			continue;
2518 		}
2519 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2520 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2521 					       IRQF_SHARED, q_vector->name,
2522 					       q_vector);
2523 		else
2524 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2525 					       0, q_vector->name, q_vector);
2526 		if (err) {
2527 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2528 				   err);
2529 			goto free_q_irqs;
2530 		}
2531 
2532 		/* register for affinity change notifications */
2533 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2534 			struct irq_affinity_notify *affinity_notify;
2535 
2536 			affinity_notify = &q_vector->affinity_notify;
2537 			affinity_notify->notify = ice_irq_affinity_notify;
2538 			affinity_notify->release = ice_irq_affinity_release;
2539 			irq_set_affinity_notifier(irq_num, affinity_notify);
2540 		}
2541 
2542 		/* assign the mask for this irq */
2543 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2544 	}
2545 
2546 	err = ice_set_cpu_rx_rmap(vsi);
2547 	if (err) {
2548 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2549 			   vsi->vsi_num, ERR_PTR(err));
2550 		goto free_q_irqs;
2551 	}
2552 
2553 	vsi->irqs_ready = true;
2554 	return 0;
2555 
2556 free_q_irqs:
2557 	while (vector--) {
2558 		irq_num = vsi->q_vectors[vector]->irq.virq;
2559 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2560 			irq_set_affinity_notifier(irq_num, NULL);
2561 		irq_set_affinity_hint(irq_num, NULL);
2562 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2563 	}
2564 	return err;
2565 }
2566 
2567 /**
2568  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2569  * @vsi: VSI to setup Tx rings used by XDP
2570  *
2571  * Return 0 on success and negative value on error
2572  */
2573 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2574 {
2575 	struct device *dev = ice_pf_to_dev(vsi->back);
2576 	struct ice_tx_desc *tx_desc;
2577 	int i, j;
2578 
2579 	ice_for_each_xdp_txq(vsi, i) {
2580 		u16 xdp_q_idx = vsi->alloc_txq + i;
2581 		struct ice_ring_stats *ring_stats;
2582 		struct ice_tx_ring *xdp_ring;
2583 
2584 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2585 		if (!xdp_ring)
2586 			goto free_xdp_rings;
2587 
2588 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2589 		if (!ring_stats) {
2590 			ice_free_tx_ring(xdp_ring);
2591 			goto free_xdp_rings;
2592 		}
2593 
2594 		xdp_ring->ring_stats = ring_stats;
2595 		xdp_ring->q_index = xdp_q_idx;
2596 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2597 		xdp_ring->vsi = vsi;
2598 		xdp_ring->netdev = NULL;
2599 		xdp_ring->dev = dev;
2600 		xdp_ring->count = vsi->num_tx_desc;
2601 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2602 		if (ice_setup_tx_ring(xdp_ring))
2603 			goto free_xdp_rings;
2604 		ice_set_ring_xdp(xdp_ring);
2605 		spin_lock_init(&xdp_ring->tx_lock);
2606 		for (j = 0; j < xdp_ring->count; j++) {
2607 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2608 			tx_desc->cmd_type_offset_bsz = 0;
2609 		}
2610 	}
2611 
2612 	return 0;
2613 
2614 free_xdp_rings:
2615 	for (; i >= 0; i--) {
2616 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2617 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2618 			vsi->xdp_rings[i]->ring_stats = NULL;
2619 			ice_free_tx_ring(vsi->xdp_rings[i]);
2620 		}
2621 	}
2622 	return -ENOMEM;
2623 }
2624 
2625 /**
2626  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2627  * @vsi: VSI to set the bpf prog on
2628  * @prog: the bpf prog pointer
2629  */
2630 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2631 {
2632 	struct bpf_prog *old_prog;
2633 	int i;
2634 
2635 	old_prog = xchg(&vsi->xdp_prog, prog);
2636 	if (old_prog)
2637 		bpf_prog_put(old_prog);
2638 
2639 	ice_for_each_rxq(vsi, i)
2640 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2641 }
2642 
2643 /**
2644  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2645  * @vsi: VSI to bring up Tx rings used by XDP
2646  * @prog: bpf program that will be assigned to VSI
2647  *
2648  * Return 0 on success and negative value on error
2649  */
2650 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2651 {
2652 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2653 	int xdp_rings_rem = vsi->num_xdp_txq;
2654 	struct ice_pf *pf = vsi->back;
2655 	struct ice_qs_cfg xdp_qs_cfg = {
2656 		.qs_mutex = &pf->avail_q_mutex,
2657 		.pf_map = pf->avail_txqs,
2658 		.pf_map_size = pf->max_pf_txqs,
2659 		.q_count = vsi->num_xdp_txq,
2660 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2661 		.vsi_map = vsi->txq_map,
2662 		.vsi_map_offset = vsi->alloc_txq,
2663 		.mapping_mode = ICE_VSI_MAP_CONTIG
2664 	};
2665 	struct device *dev;
2666 	int i, v_idx;
2667 	int status;
2668 
2669 	dev = ice_pf_to_dev(pf);
2670 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2671 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2672 	if (!vsi->xdp_rings)
2673 		return -ENOMEM;
2674 
2675 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2676 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2677 		goto err_map_xdp;
2678 
2679 	if (static_key_enabled(&ice_xdp_locking_key))
2680 		netdev_warn(vsi->netdev,
2681 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2682 
2683 	if (ice_xdp_alloc_setup_rings(vsi))
2684 		goto clear_xdp_rings;
2685 
2686 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2687 	ice_for_each_q_vector(vsi, v_idx) {
2688 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2689 		int xdp_rings_per_v, q_id, q_base;
2690 
2691 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2692 					       vsi->num_q_vectors - v_idx);
2693 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2694 
2695 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2696 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2697 
2698 			xdp_ring->q_vector = q_vector;
2699 			xdp_ring->next = q_vector->tx.tx_ring;
2700 			q_vector->tx.tx_ring = xdp_ring;
2701 		}
2702 		xdp_rings_rem -= xdp_rings_per_v;
2703 	}
2704 
2705 	ice_for_each_rxq(vsi, i) {
2706 		if (static_key_enabled(&ice_xdp_locking_key)) {
2707 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2708 		} else {
2709 			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2710 			struct ice_tx_ring *ring;
2711 
2712 			ice_for_each_tx_ring(ring, q_vector->tx) {
2713 				if (ice_ring_is_xdp(ring)) {
2714 					vsi->rx_rings[i]->xdp_ring = ring;
2715 					break;
2716 				}
2717 			}
2718 		}
2719 		ice_tx_xsk_pool(vsi, i);
2720 	}
2721 
2722 	/* omit the scheduler update if in reset path; XDP queues will be
2723 	 * taken into account at the end of ice_vsi_rebuild, where
2724 	 * ice_cfg_vsi_lan is being called
2725 	 */
2726 	if (ice_is_reset_in_progress(pf->state))
2727 		return 0;
2728 
2729 	/* tell the Tx scheduler that right now we have
2730 	 * additional queues
2731 	 */
2732 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2733 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2734 
2735 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2736 				 max_txqs);
2737 	if (status) {
2738 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2739 			status);
2740 		goto clear_xdp_rings;
2741 	}
2742 
2743 	/* assign the prog only when it's not already present on VSI;
2744 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2745 	 * VSI rebuild that happens under ethtool -L can expose us to
2746 	 * the bpf_prog refcount issues as we would be swapping same
2747 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2748 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2749 	 * this is not harmful as dev_xdp_install bumps the refcount
2750 	 * before calling the op exposed by the driver;
2751 	 */
2752 	if (!ice_is_xdp_ena_vsi(vsi))
2753 		ice_vsi_assign_bpf_prog(vsi, prog);
2754 
2755 	return 0;
2756 clear_xdp_rings:
2757 	ice_for_each_xdp_txq(vsi, i)
2758 		if (vsi->xdp_rings[i]) {
2759 			kfree_rcu(vsi->xdp_rings[i], rcu);
2760 			vsi->xdp_rings[i] = NULL;
2761 		}
2762 
2763 err_map_xdp:
2764 	mutex_lock(&pf->avail_q_mutex);
2765 	ice_for_each_xdp_txq(vsi, i) {
2766 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2767 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2768 	}
2769 	mutex_unlock(&pf->avail_q_mutex);
2770 
2771 	devm_kfree(dev, vsi->xdp_rings);
2772 	return -ENOMEM;
2773 }
2774 
2775 /**
2776  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2777  * @vsi: VSI to remove XDP rings
2778  *
2779  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2780  * resources
2781  */
2782 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2783 {
2784 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2785 	struct ice_pf *pf = vsi->back;
2786 	int i, v_idx;
2787 
2788 	/* q_vectors are freed in reset path so there's no point in detaching
2789 	 * rings; in case of rebuild being triggered not from reset bits
2790 	 * in pf->state won't be set, so additionally check first q_vector
2791 	 * against NULL
2792 	 */
2793 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2794 		goto free_qmap;
2795 
2796 	ice_for_each_q_vector(vsi, v_idx) {
2797 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2798 		struct ice_tx_ring *ring;
2799 
2800 		ice_for_each_tx_ring(ring, q_vector->tx)
2801 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2802 				break;
2803 
2804 		/* restore the value of last node prior to XDP setup */
2805 		q_vector->tx.tx_ring = ring;
2806 	}
2807 
2808 free_qmap:
2809 	mutex_lock(&pf->avail_q_mutex);
2810 	ice_for_each_xdp_txq(vsi, i) {
2811 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2812 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2813 	}
2814 	mutex_unlock(&pf->avail_q_mutex);
2815 
2816 	ice_for_each_xdp_txq(vsi, i)
2817 		if (vsi->xdp_rings[i]) {
2818 			if (vsi->xdp_rings[i]->desc) {
2819 				synchronize_rcu();
2820 				ice_free_tx_ring(vsi->xdp_rings[i]);
2821 			}
2822 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2823 			vsi->xdp_rings[i]->ring_stats = NULL;
2824 			kfree_rcu(vsi->xdp_rings[i], rcu);
2825 			vsi->xdp_rings[i] = NULL;
2826 		}
2827 
2828 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2829 	vsi->xdp_rings = NULL;
2830 
2831 	if (static_key_enabled(&ice_xdp_locking_key))
2832 		static_branch_dec(&ice_xdp_locking_key);
2833 
2834 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2835 		return 0;
2836 
2837 	ice_vsi_assign_bpf_prog(vsi, NULL);
2838 
2839 	/* notify Tx scheduler that we destroyed XDP queues and bring
2840 	 * back the old number of child nodes
2841 	 */
2842 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2843 		max_txqs[i] = vsi->num_txq;
2844 
2845 	/* change number of XDP Tx queues to 0 */
2846 	vsi->num_xdp_txq = 0;
2847 
2848 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2849 			       max_txqs);
2850 }
2851 
2852 /**
2853  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2854  * @vsi: VSI to schedule napi on
2855  */
2856 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2857 {
2858 	int i;
2859 
2860 	ice_for_each_rxq(vsi, i) {
2861 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2862 
2863 		if (rx_ring->xsk_pool)
2864 			napi_schedule(&rx_ring->q_vector->napi);
2865 	}
2866 }
2867 
2868 /**
2869  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2870  * @vsi: VSI to determine the count of XDP Tx qs
2871  *
2872  * returns 0 if Tx qs count is higher than at least half of CPU count,
2873  * -ENOMEM otherwise
2874  */
2875 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2876 {
2877 	u16 avail = ice_get_avail_txq_count(vsi->back);
2878 	u16 cpus = num_possible_cpus();
2879 
2880 	if (avail < cpus / 2)
2881 		return -ENOMEM;
2882 
2883 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2884 
2885 	if (vsi->num_xdp_txq < cpus)
2886 		static_branch_inc(&ice_xdp_locking_key);
2887 
2888 	return 0;
2889 }
2890 
2891 /**
2892  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2893  * @vsi: Pointer to VSI structure
2894  */
2895 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2896 {
2897 	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2898 		return ICE_RXBUF_1664;
2899 	else
2900 		return ICE_RXBUF_3072;
2901 }
2902 
2903 /**
2904  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2905  * @vsi: VSI to setup XDP for
2906  * @prog: XDP program
2907  * @extack: netlink extended ack
2908  */
2909 static int
2910 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2911 		   struct netlink_ext_ack *extack)
2912 {
2913 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2914 	bool if_running = netif_running(vsi->netdev);
2915 	int ret = 0, xdp_ring_err = 0;
2916 
2917 	if (prog && !prog->aux->xdp_has_frags) {
2918 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
2919 			NL_SET_ERR_MSG_MOD(extack,
2920 					   "MTU is too large for linear frames and XDP prog does not support frags");
2921 			return -EOPNOTSUPP;
2922 		}
2923 	}
2924 
2925 	/* need to stop netdev while setting up the program for Rx rings */
2926 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2927 		ret = ice_down(vsi);
2928 		if (ret) {
2929 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2930 			return ret;
2931 		}
2932 	}
2933 
2934 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2935 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2936 		if (xdp_ring_err) {
2937 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2938 		} else {
2939 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2940 			if (xdp_ring_err)
2941 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2942 		}
2943 		xdp_features_set_redirect_target(vsi->netdev, true);
2944 		/* reallocate Rx queues that are used for zero-copy */
2945 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2946 		if (xdp_ring_err)
2947 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2948 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2949 		xdp_features_clear_redirect_target(vsi->netdev);
2950 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2951 		if (xdp_ring_err)
2952 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2953 		/* reallocate Rx queues that were used for zero-copy */
2954 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2955 		if (xdp_ring_err)
2956 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2957 	} else {
2958 		/* safe to call even when prog == vsi->xdp_prog as
2959 		 * dev_xdp_install in net/core/dev.c incremented prog's
2960 		 * refcount so corresponding bpf_prog_put won't cause
2961 		 * underflow
2962 		 */
2963 		ice_vsi_assign_bpf_prog(vsi, prog);
2964 	}
2965 
2966 	if (if_running)
2967 		ret = ice_up(vsi);
2968 
2969 	if (!ret && prog)
2970 		ice_vsi_rx_napi_schedule(vsi);
2971 
2972 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2973 }
2974 
2975 /**
2976  * ice_xdp_safe_mode - XDP handler for safe mode
2977  * @dev: netdevice
2978  * @xdp: XDP command
2979  */
2980 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2981 			     struct netdev_bpf *xdp)
2982 {
2983 	NL_SET_ERR_MSG_MOD(xdp->extack,
2984 			   "Please provide working DDP firmware package in order to use XDP\n"
2985 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2986 	return -EOPNOTSUPP;
2987 }
2988 
2989 /**
2990  * ice_xdp - implements XDP handler
2991  * @dev: netdevice
2992  * @xdp: XDP command
2993  */
2994 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2995 {
2996 	struct ice_netdev_priv *np = netdev_priv(dev);
2997 	struct ice_vsi *vsi = np->vsi;
2998 
2999 	if (vsi->type != ICE_VSI_PF) {
3000 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3001 		return -EINVAL;
3002 	}
3003 
3004 	switch (xdp->command) {
3005 	case XDP_SETUP_PROG:
3006 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3007 	case XDP_SETUP_XSK_POOL:
3008 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3009 					  xdp->xsk.queue_id);
3010 	default:
3011 		return -EINVAL;
3012 	}
3013 }
3014 
3015 /**
3016  * ice_ena_misc_vector - enable the non-queue interrupts
3017  * @pf: board private structure
3018  */
3019 static void ice_ena_misc_vector(struct ice_pf *pf)
3020 {
3021 	struct ice_hw *hw = &pf->hw;
3022 	u32 val;
3023 
3024 	/* Disable anti-spoof detection interrupt to prevent spurious event
3025 	 * interrupts during a function reset. Anti-spoof functionally is
3026 	 * still supported.
3027 	 */
3028 	val = rd32(hw, GL_MDCK_TX_TDPU);
3029 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3030 	wr32(hw, GL_MDCK_TX_TDPU, val);
3031 
3032 	/* clear things first */
3033 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3034 	rd32(hw, PFINT_OICR);		/* read to clear */
3035 
3036 	val = (PFINT_OICR_ECC_ERR_M |
3037 	       PFINT_OICR_MAL_DETECT_M |
3038 	       PFINT_OICR_GRST_M |
3039 	       PFINT_OICR_PCI_EXCEPTION_M |
3040 	       PFINT_OICR_VFLR_M |
3041 	       PFINT_OICR_HMC_ERR_M |
3042 	       PFINT_OICR_PE_PUSH_M |
3043 	       PFINT_OICR_PE_CRITERR_M);
3044 
3045 	wr32(hw, PFINT_OICR_ENA, val);
3046 
3047 	/* SW_ITR_IDX = 0, but don't change INTENA */
3048 	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3049 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3050 }
3051 
3052 /**
3053  * ice_misc_intr - misc interrupt handler
3054  * @irq: interrupt number
3055  * @data: pointer to a q_vector
3056  */
3057 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3058 {
3059 	struct ice_pf *pf = (struct ice_pf *)data;
3060 	struct ice_hw *hw = &pf->hw;
3061 	struct device *dev;
3062 	u32 oicr, ena_mask;
3063 
3064 	dev = ice_pf_to_dev(pf);
3065 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3066 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3067 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3068 
3069 	oicr = rd32(hw, PFINT_OICR);
3070 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3071 
3072 	if (oicr & PFINT_OICR_SWINT_M) {
3073 		ena_mask &= ~PFINT_OICR_SWINT_M;
3074 		pf->sw_int_count++;
3075 	}
3076 
3077 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3078 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3079 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3080 	}
3081 	if (oicr & PFINT_OICR_VFLR_M) {
3082 		/* disable any further VFLR event notifications */
3083 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3084 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3085 
3086 			reg &= ~PFINT_OICR_VFLR_M;
3087 			wr32(hw, PFINT_OICR_ENA, reg);
3088 		} else {
3089 			ena_mask &= ~PFINT_OICR_VFLR_M;
3090 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3091 		}
3092 	}
3093 
3094 	if (oicr & PFINT_OICR_GRST_M) {
3095 		u32 reset;
3096 
3097 		/* we have a reset warning */
3098 		ena_mask &= ~PFINT_OICR_GRST_M;
3099 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3100 			GLGEN_RSTAT_RESET_TYPE_S;
3101 
3102 		if (reset == ICE_RESET_CORER)
3103 			pf->corer_count++;
3104 		else if (reset == ICE_RESET_GLOBR)
3105 			pf->globr_count++;
3106 		else if (reset == ICE_RESET_EMPR)
3107 			pf->empr_count++;
3108 		else
3109 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3110 
3111 		/* If a reset cycle isn't already in progress, we set a bit in
3112 		 * pf->state so that the service task can start a reset/rebuild.
3113 		 */
3114 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3115 			if (reset == ICE_RESET_CORER)
3116 				set_bit(ICE_CORER_RECV, pf->state);
3117 			else if (reset == ICE_RESET_GLOBR)
3118 				set_bit(ICE_GLOBR_RECV, pf->state);
3119 			else
3120 				set_bit(ICE_EMPR_RECV, pf->state);
3121 
3122 			/* There are couple of different bits at play here.
3123 			 * hw->reset_ongoing indicates whether the hardware is
3124 			 * in reset. This is set to true when a reset interrupt
3125 			 * is received and set back to false after the driver
3126 			 * has determined that the hardware is out of reset.
3127 			 *
3128 			 * ICE_RESET_OICR_RECV in pf->state indicates
3129 			 * that a post reset rebuild is required before the
3130 			 * driver is operational again. This is set above.
3131 			 *
3132 			 * As this is the start of the reset/rebuild cycle, set
3133 			 * both to indicate that.
3134 			 */
3135 			hw->reset_ongoing = true;
3136 		}
3137 	}
3138 
3139 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3140 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3141 		if (!hw->reset_ongoing)
3142 			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3143 	}
3144 
3145 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3146 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3147 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3148 
3149 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3150 
3151 		if (hw->func_caps.ts_func_info.src_tmr_owned) {
3152 			/* Save EVENTs from GLTSYN register */
3153 			pf->ptp.ext_ts_irq |= gltsyn_stat &
3154 					      (GLTSYN_STAT_EVENT0_M |
3155 					       GLTSYN_STAT_EVENT1_M |
3156 					       GLTSYN_STAT_EVENT2_M);
3157 
3158 			set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3159 		}
3160 	}
3161 
3162 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3163 	if (oicr & ICE_AUX_CRIT_ERR) {
3164 		pf->oicr_err_reg |= oicr;
3165 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3166 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3167 	}
3168 
3169 	/* Report any remaining unexpected interrupts */
3170 	oicr &= ena_mask;
3171 	if (oicr) {
3172 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3173 		/* If a critical error is pending there is no choice but to
3174 		 * reset the device.
3175 		 */
3176 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3177 			    PFINT_OICR_ECC_ERR_M)) {
3178 			set_bit(ICE_PFR_REQ, pf->state);
3179 		}
3180 	}
3181 
3182 	return IRQ_WAKE_THREAD;
3183 }
3184 
3185 /**
3186  * ice_misc_intr_thread_fn - misc interrupt thread function
3187  * @irq: interrupt number
3188  * @data: pointer to a q_vector
3189  */
3190 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3191 {
3192 	struct ice_pf *pf = data;
3193 	struct ice_hw *hw;
3194 
3195 	hw = &pf->hw;
3196 
3197 	if (ice_is_reset_in_progress(pf->state))
3198 		return IRQ_HANDLED;
3199 
3200 	ice_service_task_schedule(pf);
3201 
3202 	if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3203 		ice_ptp_extts_event(pf);
3204 
3205 	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3206 		/* Process outstanding Tx timestamps. If there is more work,
3207 		 * re-arm the interrupt to trigger again.
3208 		 */
3209 		if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3210 			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3211 			ice_flush(hw);
3212 		}
3213 	}
3214 
3215 	ice_irq_dynamic_ena(hw, NULL, NULL);
3216 
3217 	return IRQ_HANDLED;
3218 }
3219 
3220 /**
3221  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3222  * @hw: pointer to HW structure
3223  */
3224 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3225 {
3226 	/* disable Admin queue Interrupt causes */
3227 	wr32(hw, PFINT_FW_CTL,
3228 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3229 
3230 	/* disable Mailbox queue Interrupt causes */
3231 	wr32(hw, PFINT_MBX_CTL,
3232 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3233 
3234 	wr32(hw, PFINT_SB_CTL,
3235 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3236 
3237 	/* disable Control queue Interrupt causes */
3238 	wr32(hw, PFINT_OICR_CTL,
3239 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3240 
3241 	ice_flush(hw);
3242 }
3243 
3244 /**
3245  * ice_free_irq_msix_misc - Unroll misc vector setup
3246  * @pf: board private structure
3247  */
3248 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3249 {
3250 	int misc_irq_num = pf->oicr_irq.virq;
3251 	struct ice_hw *hw = &pf->hw;
3252 
3253 	ice_dis_ctrlq_interrupts(hw);
3254 
3255 	/* disable OICR interrupt */
3256 	wr32(hw, PFINT_OICR_ENA, 0);
3257 	ice_flush(hw);
3258 
3259 	synchronize_irq(misc_irq_num);
3260 	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3261 
3262 	ice_free_irq(pf, pf->oicr_irq);
3263 }
3264 
3265 /**
3266  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3267  * @hw: pointer to HW structure
3268  * @reg_idx: HW vector index to associate the control queue interrupts with
3269  */
3270 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3271 {
3272 	u32 val;
3273 
3274 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3275 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3276 	wr32(hw, PFINT_OICR_CTL, val);
3277 
3278 	/* enable Admin queue Interrupt causes */
3279 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3280 	       PFINT_FW_CTL_CAUSE_ENA_M);
3281 	wr32(hw, PFINT_FW_CTL, val);
3282 
3283 	/* enable Mailbox queue Interrupt causes */
3284 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3285 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3286 	wr32(hw, PFINT_MBX_CTL, val);
3287 
3288 	/* This enables Sideband queue Interrupt causes */
3289 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3290 	       PFINT_SB_CTL_CAUSE_ENA_M);
3291 	wr32(hw, PFINT_SB_CTL, val);
3292 
3293 	ice_flush(hw);
3294 }
3295 
3296 /**
3297  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3298  * @pf: board private structure
3299  *
3300  * This sets up the handler for MSIX 0, which is used to manage the
3301  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3302  * when in MSI or Legacy interrupt mode.
3303  */
3304 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3305 {
3306 	struct device *dev = ice_pf_to_dev(pf);
3307 	struct ice_hw *hw = &pf->hw;
3308 	struct msi_map oicr_irq;
3309 	int err = 0;
3310 
3311 	if (!pf->int_name[0])
3312 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3313 			 dev_driver_string(dev), dev_name(dev));
3314 
3315 	/* Do not request IRQ but do enable OICR interrupt since settings are
3316 	 * lost during reset. Note that this function is called only during
3317 	 * rebuild path and not while reset is in progress.
3318 	 */
3319 	if (ice_is_reset_in_progress(pf->state))
3320 		goto skip_req_irq;
3321 
3322 	/* reserve one vector in irq_tracker for misc interrupts */
3323 	oicr_irq = ice_alloc_irq(pf, false);
3324 	if (oicr_irq.index < 0)
3325 		return oicr_irq.index;
3326 
3327 	pf->oicr_irq = oicr_irq;
3328 	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3329 					ice_misc_intr_thread_fn, 0,
3330 					pf->int_name, pf);
3331 	if (err) {
3332 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3333 			pf->int_name, err);
3334 		ice_free_irq(pf, pf->oicr_irq);
3335 		return err;
3336 	}
3337 
3338 skip_req_irq:
3339 	ice_ena_misc_vector(pf);
3340 
3341 	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3342 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3343 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3344 
3345 	ice_flush(hw);
3346 	ice_irq_dynamic_ena(hw, NULL, NULL);
3347 
3348 	return 0;
3349 }
3350 
3351 /**
3352  * ice_napi_add - register NAPI handler for the VSI
3353  * @vsi: VSI for which NAPI handler is to be registered
3354  *
3355  * This function is only called in the driver's load path. Registering the NAPI
3356  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3357  * reset/rebuild, etc.)
3358  */
3359 static void ice_napi_add(struct ice_vsi *vsi)
3360 {
3361 	int v_idx;
3362 
3363 	if (!vsi->netdev)
3364 		return;
3365 
3366 	ice_for_each_q_vector(vsi, v_idx)
3367 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3368 			       ice_napi_poll);
3369 }
3370 
3371 /**
3372  * ice_set_ops - set netdev and ethtools ops for the given netdev
3373  * @vsi: the VSI associated with the new netdev
3374  */
3375 static void ice_set_ops(struct ice_vsi *vsi)
3376 {
3377 	struct net_device *netdev = vsi->netdev;
3378 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3379 
3380 	if (ice_is_safe_mode(pf)) {
3381 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3382 		ice_set_ethtool_safe_mode_ops(netdev);
3383 		return;
3384 	}
3385 
3386 	netdev->netdev_ops = &ice_netdev_ops;
3387 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3388 	ice_set_ethtool_ops(netdev);
3389 
3390 	if (vsi->type != ICE_VSI_PF)
3391 		return;
3392 
3393 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3394 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3395 			       NETDEV_XDP_ACT_RX_SG;
3396 }
3397 
3398 /**
3399  * ice_set_netdev_features - set features for the given netdev
3400  * @netdev: netdev instance
3401  */
3402 static void ice_set_netdev_features(struct net_device *netdev)
3403 {
3404 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3405 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3406 	netdev_features_t csumo_features;
3407 	netdev_features_t vlano_features;
3408 	netdev_features_t dflt_features;
3409 	netdev_features_t tso_features;
3410 
3411 	if (ice_is_safe_mode(pf)) {
3412 		/* safe mode */
3413 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3414 		netdev->hw_features = netdev->features;
3415 		return;
3416 	}
3417 
3418 	dflt_features = NETIF_F_SG	|
3419 			NETIF_F_HIGHDMA	|
3420 			NETIF_F_NTUPLE	|
3421 			NETIF_F_RXHASH;
3422 
3423 	csumo_features = NETIF_F_RXCSUM	  |
3424 			 NETIF_F_IP_CSUM  |
3425 			 NETIF_F_SCTP_CRC |
3426 			 NETIF_F_IPV6_CSUM;
3427 
3428 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3429 			 NETIF_F_HW_VLAN_CTAG_TX     |
3430 			 NETIF_F_HW_VLAN_CTAG_RX;
3431 
3432 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3433 	if (is_dvm_ena)
3434 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3435 
3436 	tso_features = NETIF_F_TSO			|
3437 		       NETIF_F_TSO_ECN			|
3438 		       NETIF_F_TSO6			|
3439 		       NETIF_F_GSO_GRE			|
3440 		       NETIF_F_GSO_UDP_TUNNEL		|
3441 		       NETIF_F_GSO_GRE_CSUM		|
3442 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3443 		       NETIF_F_GSO_PARTIAL		|
3444 		       NETIF_F_GSO_IPXIP4		|
3445 		       NETIF_F_GSO_IPXIP6		|
3446 		       NETIF_F_GSO_UDP_L4;
3447 
3448 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3449 					NETIF_F_GSO_GRE_CSUM;
3450 	/* set features that user can change */
3451 	netdev->hw_features = dflt_features | csumo_features |
3452 			      vlano_features | tso_features;
3453 
3454 	/* add support for HW_CSUM on packets with MPLS header */
3455 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3456 				 NETIF_F_TSO     |
3457 				 NETIF_F_TSO6;
3458 
3459 	/* enable features */
3460 	netdev->features |= netdev->hw_features;
3461 
3462 	netdev->hw_features |= NETIF_F_HW_TC;
3463 	netdev->hw_features |= NETIF_F_LOOPBACK;
3464 
3465 	/* encap and VLAN devices inherit default, csumo and tso features */
3466 	netdev->hw_enc_features |= dflt_features | csumo_features |
3467 				   tso_features;
3468 	netdev->vlan_features |= dflt_features | csumo_features |
3469 				 tso_features;
3470 
3471 	/* advertise support but don't enable by default since only one type of
3472 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3473 	 * type turns on the other has to be turned off. This is enforced by the
3474 	 * ice_fix_features() ndo callback.
3475 	 */
3476 	if (is_dvm_ena)
3477 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3478 			NETIF_F_HW_VLAN_STAG_TX;
3479 
3480 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3481 	 * be changed at runtime
3482 	 */
3483 	netdev->hw_features |= NETIF_F_RXFCS;
3484 
3485 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3486 }
3487 
3488 /**
3489  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3490  * @lut: Lookup table
3491  * @rss_table_size: Lookup table size
3492  * @rss_size: Range of queue number for hashing
3493  */
3494 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3495 {
3496 	u16 i;
3497 
3498 	for (i = 0; i < rss_table_size; i++)
3499 		lut[i] = i % rss_size;
3500 }
3501 
3502 /**
3503  * ice_pf_vsi_setup - Set up a PF VSI
3504  * @pf: board private structure
3505  * @pi: pointer to the port_info instance
3506  *
3507  * Returns pointer to the successfully allocated VSI software struct
3508  * on success, otherwise returns NULL on failure.
3509  */
3510 static struct ice_vsi *
3511 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3512 {
3513 	struct ice_vsi_cfg_params params = {};
3514 
3515 	params.type = ICE_VSI_PF;
3516 	params.pi = pi;
3517 	params.flags = ICE_VSI_FLAG_INIT;
3518 
3519 	return ice_vsi_setup(pf, &params);
3520 }
3521 
3522 static struct ice_vsi *
3523 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3524 		   struct ice_channel *ch)
3525 {
3526 	struct ice_vsi_cfg_params params = {};
3527 
3528 	params.type = ICE_VSI_CHNL;
3529 	params.pi = pi;
3530 	params.ch = ch;
3531 	params.flags = ICE_VSI_FLAG_INIT;
3532 
3533 	return ice_vsi_setup(pf, &params);
3534 }
3535 
3536 /**
3537  * ice_ctrl_vsi_setup - Set up a control VSI
3538  * @pf: board private structure
3539  * @pi: pointer to the port_info instance
3540  *
3541  * Returns pointer to the successfully allocated VSI software struct
3542  * on success, otherwise returns NULL on failure.
3543  */
3544 static struct ice_vsi *
3545 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3546 {
3547 	struct ice_vsi_cfg_params params = {};
3548 
3549 	params.type = ICE_VSI_CTRL;
3550 	params.pi = pi;
3551 	params.flags = ICE_VSI_FLAG_INIT;
3552 
3553 	return ice_vsi_setup(pf, &params);
3554 }
3555 
3556 /**
3557  * ice_lb_vsi_setup - Set up a loopback VSI
3558  * @pf: board private structure
3559  * @pi: pointer to the port_info instance
3560  *
3561  * Returns pointer to the successfully allocated VSI software struct
3562  * on success, otherwise returns NULL on failure.
3563  */
3564 struct ice_vsi *
3565 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3566 {
3567 	struct ice_vsi_cfg_params params = {};
3568 
3569 	params.type = ICE_VSI_LB;
3570 	params.pi = pi;
3571 	params.flags = ICE_VSI_FLAG_INIT;
3572 
3573 	return ice_vsi_setup(pf, &params);
3574 }
3575 
3576 /**
3577  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3578  * @netdev: network interface to be adjusted
3579  * @proto: VLAN TPID
3580  * @vid: VLAN ID to be added
3581  *
3582  * net_device_ops implementation for adding VLAN IDs
3583  */
3584 static int
3585 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3586 {
3587 	struct ice_netdev_priv *np = netdev_priv(netdev);
3588 	struct ice_vsi_vlan_ops *vlan_ops;
3589 	struct ice_vsi *vsi = np->vsi;
3590 	struct ice_vlan vlan;
3591 	int ret;
3592 
3593 	/* VLAN 0 is added by default during load/reset */
3594 	if (!vid)
3595 		return 0;
3596 
3597 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3598 		usleep_range(1000, 2000);
3599 
3600 	/* Add multicast promisc rule for the VLAN ID to be added if
3601 	 * all-multicast is currently enabled.
3602 	 */
3603 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3604 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3605 					       ICE_MCAST_VLAN_PROMISC_BITS,
3606 					       vid);
3607 		if (ret)
3608 			goto finish;
3609 	}
3610 
3611 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3612 
3613 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3614 	 * packets aren't pruned by the device's internal switch on Rx
3615 	 */
3616 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3617 	ret = vlan_ops->add_vlan(vsi, &vlan);
3618 	if (ret)
3619 		goto finish;
3620 
3621 	/* If all-multicast is currently enabled and this VLAN ID is only one
3622 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3623 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3624 	 */
3625 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3626 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3627 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3628 					   ICE_MCAST_PROMISC_BITS, 0);
3629 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3630 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3631 	}
3632 
3633 finish:
3634 	clear_bit(ICE_CFG_BUSY, vsi->state);
3635 
3636 	return ret;
3637 }
3638 
3639 /**
3640  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3641  * @netdev: network interface to be adjusted
3642  * @proto: VLAN TPID
3643  * @vid: VLAN ID to be removed
3644  *
3645  * net_device_ops implementation for removing VLAN IDs
3646  */
3647 static int
3648 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3649 {
3650 	struct ice_netdev_priv *np = netdev_priv(netdev);
3651 	struct ice_vsi_vlan_ops *vlan_ops;
3652 	struct ice_vsi *vsi = np->vsi;
3653 	struct ice_vlan vlan;
3654 	int ret;
3655 
3656 	/* don't allow removal of VLAN 0 */
3657 	if (!vid)
3658 		return 0;
3659 
3660 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3661 		usleep_range(1000, 2000);
3662 
3663 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3664 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3665 	if (ret) {
3666 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3667 			   vsi->vsi_num);
3668 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3669 	}
3670 
3671 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3672 
3673 	/* Make sure VLAN delete is successful before updating VLAN
3674 	 * information
3675 	 */
3676 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3677 	ret = vlan_ops->del_vlan(vsi, &vlan);
3678 	if (ret)
3679 		goto finish;
3680 
3681 	/* Remove multicast promisc rule for the removed VLAN ID if
3682 	 * all-multicast is enabled.
3683 	 */
3684 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3685 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3686 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3687 
3688 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3689 		/* Update look-up type of multicast promisc rule for VLAN 0
3690 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3691 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3692 		 */
3693 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3694 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3695 						   ICE_MCAST_VLAN_PROMISC_BITS,
3696 						   0);
3697 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3698 						 ICE_MCAST_PROMISC_BITS, 0);
3699 		}
3700 	}
3701 
3702 finish:
3703 	clear_bit(ICE_CFG_BUSY, vsi->state);
3704 
3705 	return ret;
3706 }
3707 
3708 /**
3709  * ice_rep_indr_tc_block_unbind
3710  * @cb_priv: indirection block private data
3711  */
3712 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3713 {
3714 	struct ice_indr_block_priv *indr_priv = cb_priv;
3715 
3716 	list_del(&indr_priv->list);
3717 	kfree(indr_priv);
3718 }
3719 
3720 /**
3721  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3722  * @vsi: VSI struct which has the netdev
3723  */
3724 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3725 {
3726 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3727 
3728 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3729 				 ice_rep_indr_tc_block_unbind);
3730 }
3731 
3732 /**
3733  * ice_tc_indir_block_register - Register TC indirect block notifications
3734  * @vsi: VSI struct which has the netdev
3735  *
3736  * Returns 0 on success, negative value on failure
3737  */
3738 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3739 {
3740 	struct ice_netdev_priv *np;
3741 
3742 	if (!vsi || !vsi->netdev)
3743 		return -EINVAL;
3744 
3745 	np = netdev_priv(vsi->netdev);
3746 
3747 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3748 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3749 }
3750 
3751 /**
3752  * ice_get_avail_q_count - Get count of queues in use
3753  * @pf_qmap: bitmap to get queue use count from
3754  * @lock: pointer to a mutex that protects access to pf_qmap
3755  * @size: size of the bitmap
3756  */
3757 static u16
3758 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3759 {
3760 	unsigned long bit;
3761 	u16 count = 0;
3762 
3763 	mutex_lock(lock);
3764 	for_each_clear_bit(bit, pf_qmap, size)
3765 		count++;
3766 	mutex_unlock(lock);
3767 
3768 	return count;
3769 }
3770 
3771 /**
3772  * ice_get_avail_txq_count - Get count of Tx queues in use
3773  * @pf: pointer to an ice_pf instance
3774  */
3775 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3776 {
3777 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3778 				     pf->max_pf_txqs);
3779 }
3780 
3781 /**
3782  * ice_get_avail_rxq_count - Get count of Rx queues in use
3783  * @pf: pointer to an ice_pf instance
3784  */
3785 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3786 {
3787 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3788 				     pf->max_pf_rxqs);
3789 }
3790 
3791 /**
3792  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3793  * @pf: board private structure to initialize
3794  */
3795 static void ice_deinit_pf(struct ice_pf *pf)
3796 {
3797 	ice_service_task_stop(pf);
3798 	mutex_destroy(&pf->adev_mutex);
3799 	mutex_destroy(&pf->sw_mutex);
3800 	mutex_destroy(&pf->tc_mutex);
3801 	mutex_destroy(&pf->avail_q_mutex);
3802 	mutex_destroy(&pf->vfs.table_lock);
3803 
3804 	if (pf->avail_txqs) {
3805 		bitmap_free(pf->avail_txqs);
3806 		pf->avail_txqs = NULL;
3807 	}
3808 
3809 	if (pf->avail_rxqs) {
3810 		bitmap_free(pf->avail_rxqs);
3811 		pf->avail_rxqs = NULL;
3812 	}
3813 
3814 	if (pf->ptp.clock)
3815 		ptp_clock_unregister(pf->ptp.clock);
3816 }
3817 
3818 /**
3819  * ice_set_pf_caps - set PFs capability flags
3820  * @pf: pointer to the PF instance
3821  */
3822 static void ice_set_pf_caps(struct ice_pf *pf)
3823 {
3824 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3825 
3826 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3827 	if (func_caps->common_cap.rdma)
3828 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3829 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3830 	if (func_caps->common_cap.dcb)
3831 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3832 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3833 	if (func_caps->common_cap.sr_iov_1_1) {
3834 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3835 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3836 					      ICE_MAX_SRIOV_VFS);
3837 	}
3838 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3839 	if (func_caps->common_cap.rss_table_size)
3840 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3841 
3842 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3843 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3844 		u16 unused;
3845 
3846 		/* ctrl_vsi_idx will be set to a valid value when flow director
3847 		 * is setup by ice_init_fdir
3848 		 */
3849 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3850 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3851 		/* force guaranteed filter pool for PF */
3852 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3853 				       func_caps->fd_fltr_guar);
3854 		/* force shared filter pool for PF */
3855 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3856 				       func_caps->fd_fltr_best_effort);
3857 	}
3858 
3859 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3860 	if (func_caps->common_cap.ieee_1588)
3861 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3862 
3863 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3864 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3865 }
3866 
3867 /**
3868  * ice_init_pf - Initialize general software structures (struct ice_pf)
3869  * @pf: board private structure to initialize
3870  */
3871 static int ice_init_pf(struct ice_pf *pf)
3872 {
3873 	ice_set_pf_caps(pf);
3874 
3875 	mutex_init(&pf->sw_mutex);
3876 	mutex_init(&pf->tc_mutex);
3877 	mutex_init(&pf->adev_mutex);
3878 
3879 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3880 	spin_lock_init(&pf->aq_wait_lock);
3881 	init_waitqueue_head(&pf->aq_wait_queue);
3882 
3883 	init_waitqueue_head(&pf->reset_wait_queue);
3884 
3885 	/* setup service timer and periodic service task */
3886 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3887 	pf->serv_tmr_period = HZ;
3888 	INIT_WORK(&pf->serv_task, ice_service_task);
3889 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3890 
3891 	mutex_init(&pf->avail_q_mutex);
3892 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3893 	if (!pf->avail_txqs)
3894 		return -ENOMEM;
3895 
3896 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3897 	if (!pf->avail_rxqs) {
3898 		bitmap_free(pf->avail_txqs);
3899 		pf->avail_txqs = NULL;
3900 		return -ENOMEM;
3901 	}
3902 
3903 	mutex_init(&pf->vfs.table_lock);
3904 	hash_init(pf->vfs.table);
3905 	ice_mbx_init_snapshot(&pf->hw);
3906 
3907 	return 0;
3908 }
3909 
3910 /**
3911  * ice_is_wol_supported - check if WoL is supported
3912  * @hw: pointer to hardware info
3913  *
3914  * Check if WoL is supported based on the HW configuration.
3915  * Returns true if NVM supports and enables WoL for this port, false otherwise
3916  */
3917 bool ice_is_wol_supported(struct ice_hw *hw)
3918 {
3919 	u16 wol_ctrl;
3920 
3921 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3922 	 * word) indicates WoL is not supported on the corresponding PF ID.
3923 	 */
3924 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3925 		return false;
3926 
3927 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3928 }
3929 
3930 /**
3931  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3932  * @vsi: VSI being changed
3933  * @new_rx: new number of Rx queues
3934  * @new_tx: new number of Tx queues
3935  * @locked: is adev device_lock held
3936  *
3937  * Only change the number of queues if new_tx, or new_rx is non-0.
3938  *
3939  * Returns 0 on success.
3940  */
3941 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3942 {
3943 	struct ice_pf *pf = vsi->back;
3944 	int err = 0, timeout = 50;
3945 
3946 	if (!new_rx && !new_tx)
3947 		return -EINVAL;
3948 
3949 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3950 		timeout--;
3951 		if (!timeout)
3952 			return -EBUSY;
3953 		usleep_range(1000, 2000);
3954 	}
3955 
3956 	if (new_tx)
3957 		vsi->req_txq = (u16)new_tx;
3958 	if (new_rx)
3959 		vsi->req_rxq = (u16)new_rx;
3960 
3961 	/* set for the next time the netdev is started */
3962 	if (!netif_running(vsi->netdev)) {
3963 		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3964 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3965 		goto done;
3966 	}
3967 
3968 	ice_vsi_close(vsi);
3969 	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3970 	ice_pf_dcb_recfg(pf, locked);
3971 	ice_vsi_open(vsi);
3972 done:
3973 	clear_bit(ICE_CFG_BUSY, pf->state);
3974 	return err;
3975 }
3976 
3977 /**
3978  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3979  * @pf: PF to configure
3980  *
3981  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3982  * VSI can still Tx/Rx VLAN tagged packets.
3983  */
3984 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3985 {
3986 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3987 	struct ice_vsi_ctx *ctxt;
3988 	struct ice_hw *hw;
3989 	int status;
3990 
3991 	if (!vsi)
3992 		return;
3993 
3994 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3995 	if (!ctxt)
3996 		return;
3997 
3998 	hw = &pf->hw;
3999 	ctxt->info = vsi->info;
4000 
4001 	ctxt->info.valid_sections =
4002 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4003 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4004 			    ICE_AQ_VSI_PROP_SW_VALID);
4005 
4006 	/* disable VLAN anti-spoof */
4007 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4008 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4009 
4010 	/* disable VLAN pruning and keep all other settings */
4011 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4012 
4013 	/* allow all VLANs on Tx and don't strip on Rx */
4014 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4015 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4016 
4017 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4018 	if (status) {
4019 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4020 			status, ice_aq_str(hw->adminq.sq_last_status));
4021 	} else {
4022 		vsi->info.sec_flags = ctxt->info.sec_flags;
4023 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4024 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4025 	}
4026 
4027 	kfree(ctxt);
4028 }
4029 
4030 /**
4031  * ice_log_pkg_init - log result of DDP package load
4032  * @hw: pointer to hardware info
4033  * @state: state of package load
4034  */
4035 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4036 {
4037 	struct ice_pf *pf = hw->back;
4038 	struct device *dev;
4039 
4040 	dev = ice_pf_to_dev(pf);
4041 
4042 	switch (state) {
4043 	case ICE_DDP_PKG_SUCCESS:
4044 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4045 			 hw->active_pkg_name,
4046 			 hw->active_pkg_ver.major,
4047 			 hw->active_pkg_ver.minor,
4048 			 hw->active_pkg_ver.update,
4049 			 hw->active_pkg_ver.draft);
4050 		break;
4051 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4052 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4053 			 hw->active_pkg_name,
4054 			 hw->active_pkg_ver.major,
4055 			 hw->active_pkg_ver.minor,
4056 			 hw->active_pkg_ver.update,
4057 			 hw->active_pkg_ver.draft);
4058 		break;
4059 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4060 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4061 			hw->active_pkg_name,
4062 			hw->active_pkg_ver.major,
4063 			hw->active_pkg_ver.minor,
4064 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4065 		break;
4066 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4067 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4068 			 hw->active_pkg_name,
4069 			 hw->active_pkg_ver.major,
4070 			 hw->active_pkg_ver.minor,
4071 			 hw->active_pkg_ver.update,
4072 			 hw->active_pkg_ver.draft,
4073 			 hw->pkg_name,
4074 			 hw->pkg_ver.major,
4075 			 hw->pkg_ver.minor,
4076 			 hw->pkg_ver.update,
4077 			 hw->pkg_ver.draft);
4078 		break;
4079 	case ICE_DDP_PKG_FW_MISMATCH:
4080 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4081 		break;
4082 	case ICE_DDP_PKG_INVALID_FILE:
4083 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4084 		break;
4085 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4086 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4087 		break;
4088 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4089 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4090 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4091 		break;
4092 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4093 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4094 		break;
4095 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4096 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4097 		break;
4098 	case ICE_DDP_PKG_LOAD_ERROR:
4099 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4100 		/* poll for reset to complete */
4101 		if (ice_check_reset(hw))
4102 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4103 		break;
4104 	case ICE_DDP_PKG_ERR:
4105 	default:
4106 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4107 		break;
4108 	}
4109 }
4110 
4111 /**
4112  * ice_load_pkg - load/reload the DDP Package file
4113  * @firmware: firmware structure when firmware requested or NULL for reload
4114  * @pf: pointer to the PF instance
4115  *
4116  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4117  * initialize HW tables.
4118  */
4119 static void
4120 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4121 {
4122 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4123 	struct device *dev = ice_pf_to_dev(pf);
4124 	struct ice_hw *hw = &pf->hw;
4125 
4126 	/* Load DDP Package */
4127 	if (firmware && !hw->pkg_copy) {
4128 		state = ice_copy_and_init_pkg(hw, firmware->data,
4129 					      firmware->size);
4130 		ice_log_pkg_init(hw, state);
4131 	} else if (!firmware && hw->pkg_copy) {
4132 		/* Reload package during rebuild after CORER/GLOBR reset */
4133 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4134 		ice_log_pkg_init(hw, state);
4135 	} else {
4136 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4137 	}
4138 
4139 	if (!ice_is_init_pkg_successful(state)) {
4140 		/* Safe Mode */
4141 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4142 		return;
4143 	}
4144 
4145 	/* Successful download package is the precondition for advanced
4146 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4147 	 */
4148 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4149 }
4150 
4151 /**
4152  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4153  * @pf: pointer to the PF structure
4154  *
4155  * There is no error returned here because the driver should be able to handle
4156  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4157  * specifically with Tx.
4158  */
4159 static void ice_verify_cacheline_size(struct ice_pf *pf)
4160 {
4161 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4162 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4163 			 ICE_CACHE_LINE_BYTES);
4164 }
4165 
4166 /**
4167  * ice_send_version - update firmware with driver version
4168  * @pf: PF struct
4169  *
4170  * Returns 0 on success, else error code
4171  */
4172 static int ice_send_version(struct ice_pf *pf)
4173 {
4174 	struct ice_driver_ver dv;
4175 
4176 	dv.major_ver = 0xff;
4177 	dv.minor_ver = 0xff;
4178 	dv.build_ver = 0xff;
4179 	dv.subbuild_ver = 0;
4180 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4181 		sizeof(dv.driver_string));
4182 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4183 }
4184 
4185 /**
4186  * ice_init_fdir - Initialize flow director VSI and configuration
4187  * @pf: pointer to the PF instance
4188  *
4189  * returns 0 on success, negative on error
4190  */
4191 static int ice_init_fdir(struct ice_pf *pf)
4192 {
4193 	struct device *dev = ice_pf_to_dev(pf);
4194 	struct ice_vsi *ctrl_vsi;
4195 	int err;
4196 
4197 	/* Side Band Flow Director needs to have a control VSI.
4198 	 * Allocate it and store it in the PF.
4199 	 */
4200 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4201 	if (!ctrl_vsi) {
4202 		dev_dbg(dev, "could not create control VSI\n");
4203 		return -ENOMEM;
4204 	}
4205 
4206 	err = ice_vsi_open_ctrl(ctrl_vsi);
4207 	if (err) {
4208 		dev_dbg(dev, "could not open control VSI\n");
4209 		goto err_vsi_open;
4210 	}
4211 
4212 	mutex_init(&pf->hw.fdir_fltr_lock);
4213 
4214 	err = ice_fdir_create_dflt_rules(pf);
4215 	if (err)
4216 		goto err_fdir_rule;
4217 
4218 	return 0;
4219 
4220 err_fdir_rule:
4221 	ice_fdir_release_flows(&pf->hw);
4222 	ice_vsi_close(ctrl_vsi);
4223 err_vsi_open:
4224 	ice_vsi_release(ctrl_vsi);
4225 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4226 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4227 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4228 	}
4229 	return err;
4230 }
4231 
4232 static void ice_deinit_fdir(struct ice_pf *pf)
4233 {
4234 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4235 
4236 	if (!vsi)
4237 		return;
4238 
4239 	ice_vsi_manage_fdir(vsi, false);
4240 	ice_vsi_release(vsi);
4241 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4242 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4243 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4244 	}
4245 
4246 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4247 }
4248 
4249 /**
4250  * ice_get_opt_fw_name - return optional firmware file name or NULL
4251  * @pf: pointer to the PF instance
4252  */
4253 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4254 {
4255 	/* Optional firmware name same as default with additional dash
4256 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4257 	 */
4258 	struct pci_dev *pdev = pf->pdev;
4259 	char *opt_fw_filename;
4260 	u64 dsn;
4261 
4262 	/* Determine the name of the optional file using the DSN (two
4263 	 * dwords following the start of the DSN Capability).
4264 	 */
4265 	dsn = pci_get_dsn(pdev);
4266 	if (!dsn)
4267 		return NULL;
4268 
4269 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4270 	if (!opt_fw_filename)
4271 		return NULL;
4272 
4273 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4274 		 ICE_DDP_PKG_PATH, dsn);
4275 
4276 	return opt_fw_filename;
4277 }
4278 
4279 /**
4280  * ice_request_fw - Device initialization routine
4281  * @pf: pointer to the PF instance
4282  */
4283 static void ice_request_fw(struct ice_pf *pf)
4284 {
4285 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4286 	const struct firmware *firmware = NULL;
4287 	struct device *dev = ice_pf_to_dev(pf);
4288 	int err = 0;
4289 
4290 	/* optional device-specific DDP (if present) overrides the default DDP
4291 	 * package file. kernel logs a debug message if the file doesn't exist,
4292 	 * and warning messages for other errors.
4293 	 */
4294 	if (opt_fw_filename) {
4295 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4296 		if (err) {
4297 			kfree(opt_fw_filename);
4298 			goto dflt_pkg_load;
4299 		}
4300 
4301 		/* request for firmware was successful. Download to device */
4302 		ice_load_pkg(firmware, pf);
4303 		kfree(opt_fw_filename);
4304 		release_firmware(firmware);
4305 		return;
4306 	}
4307 
4308 dflt_pkg_load:
4309 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4310 	if (err) {
4311 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4312 		return;
4313 	}
4314 
4315 	/* request for firmware was successful. Download to device */
4316 	ice_load_pkg(firmware, pf);
4317 	release_firmware(firmware);
4318 }
4319 
4320 /**
4321  * ice_print_wake_reason - show the wake up cause in the log
4322  * @pf: pointer to the PF struct
4323  */
4324 static void ice_print_wake_reason(struct ice_pf *pf)
4325 {
4326 	u32 wus = pf->wakeup_reason;
4327 	const char *wake_str;
4328 
4329 	/* if no wake event, nothing to print */
4330 	if (!wus)
4331 		return;
4332 
4333 	if (wus & PFPM_WUS_LNKC_M)
4334 		wake_str = "Link\n";
4335 	else if (wus & PFPM_WUS_MAG_M)
4336 		wake_str = "Magic Packet\n";
4337 	else if (wus & PFPM_WUS_MNG_M)
4338 		wake_str = "Management\n";
4339 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4340 		wake_str = "Firmware Reset\n";
4341 	else
4342 		wake_str = "Unknown\n";
4343 
4344 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4345 }
4346 
4347 /**
4348  * ice_register_netdev - register netdev
4349  * @vsi: pointer to the VSI struct
4350  */
4351 static int ice_register_netdev(struct ice_vsi *vsi)
4352 {
4353 	int err;
4354 
4355 	if (!vsi || !vsi->netdev)
4356 		return -EIO;
4357 
4358 	err = register_netdev(vsi->netdev);
4359 	if (err)
4360 		return err;
4361 
4362 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4363 	netif_carrier_off(vsi->netdev);
4364 	netif_tx_stop_all_queues(vsi->netdev);
4365 
4366 	return 0;
4367 }
4368 
4369 static void ice_unregister_netdev(struct ice_vsi *vsi)
4370 {
4371 	if (!vsi || !vsi->netdev)
4372 		return;
4373 
4374 	unregister_netdev(vsi->netdev);
4375 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4376 }
4377 
4378 /**
4379  * ice_cfg_netdev - Allocate, configure and register a netdev
4380  * @vsi: the VSI associated with the new netdev
4381  *
4382  * Returns 0 on success, negative value on failure
4383  */
4384 static int ice_cfg_netdev(struct ice_vsi *vsi)
4385 {
4386 	struct ice_netdev_priv *np;
4387 	struct net_device *netdev;
4388 	u8 mac_addr[ETH_ALEN];
4389 
4390 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4391 				    vsi->alloc_rxq);
4392 	if (!netdev)
4393 		return -ENOMEM;
4394 
4395 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4396 	vsi->netdev = netdev;
4397 	np = netdev_priv(netdev);
4398 	np->vsi = vsi;
4399 
4400 	ice_set_netdev_features(netdev);
4401 	ice_set_ops(vsi);
4402 
4403 	if (vsi->type == ICE_VSI_PF) {
4404 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4405 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4406 		eth_hw_addr_set(netdev, mac_addr);
4407 	}
4408 
4409 	netdev->priv_flags |= IFF_UNICAST_FLT;
4410 
4411 	/* Setup netdev TC information */
4412 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4413 
4414 	netdev->max_mtu = ICE_MAX_MTU;
4415 
4416 	return 0;
4417 }
4418 
4419 static void ice_decfg_netdev(struct ice_vsi *vsi)
4420 {
4421 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4422 	free_netdev(vsi->netdev);
4423 	vsi->netdev = NULL;
4424 }
4425 
4426 static int ice_start_eth(struct ice_vsi *vsi)
4427 {
4428 	int err;
4429 
4430 	err = ice_init_mac_fltr(vsi->back);
4431 	if (err)
4432 		return err;
4433 
4434 	rtnl_lock();
4435 	err = ice_vsi_open(vsi);
4436 	rtnl_unlock();
4437 
4438 	return err;
4439 }
4440 
4441 static void ice_stop_eth(struct ice_vsi *vsi)
4442 {
4443 	ice_fltr_remove_all(vsi);
4444 	ice_vsi_close(vsi);
4445 }
4446 
4447 static int ice_init_eth(struct ice_pf *pf)
4448 {
4449 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4450 	int err;
4451 
4452 	if (!vsi)
4453 		return -EINVAL;
4454 
4455 	/* init channel list */
4456 	INIT_LIST_HEAD(&vsi->ch_list);
4457 
4458 	err = ice_cfg_netdev(vsi);
4459 	if (err)
4460 		return err;
4461 	/* Setup DCB netlink interface */
4462 	ice_dcbnl_setup(vsi);
4463 
4464 	err = ice_init_mac_fltr(pf);
4465 	if (err)
4466 		goto err_init_mac_fltr;
4467 
4468 	err = ice_devlink_create_pf_port(pf);
4469 	if (err)
4470 		goto err_devlink_create_pf_port;
4471 
4472 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4473 
4474 	err = ice_register_netdev(vsi);
4475 	if (err)
4476 		goto err_register_netdev;
4477 
4478 	err = ice_tc_indir_block_register(vsi);
4479 	if (err)
4480 		goto err_tc_indir_block_register;
4481 
4482 	ice_napi_add(vsi);
4483 
4484 	return 0;
4485 
4486 err_tc_indir_block_register:
4487 	ice_unregister_netdev(vsi);
4488 err_register_netdev:
4489 	ice_devlink_destroy_pf_port(pf);
4490 err_devlink_create_pf_port:
4491 err_init_mac_fltr:
4492 	ice_decfg_netdev(vsi);
4493 	return err;
4494 }
4495 
4496 static void ice_deinit_eth(struct ice_pf *pf)
4497 {
4498 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4499 
4500 	if (!vsi)
4501 		return;
4502 
4503 	ice_vsi_close(vsi);
4504 	ice_unregister_netdev(vsi);
4505 	ice_devlink_destroy_pf_port(pf);
4506 	ice_tc_indir_block_unregister(vsi);
4507 	ice_decfg_netdev(vsi);
4508 }
4509 
4510 static int ice_init_dev(struct ice_pf *pf)
4511 {
4512 	struct device *dev = ice_pf_to_dev(pf);
4513 	struct ice_hw *hw = &pf->hw;
4514 	int err;
4515 
4516 	err = ice_init_hw(hw);
4517 	if (err) {
4518 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4519 		return err;
4520 	}
4521 
4522 	ice_init_feature_support(pf);
4523 
4524 	ice_request_fw(pf);
4525 
4526 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4527 	 * set in pf->state, which will cause ice_is_safe_mode to return
4528 	 * true
4529 	 */
4530 	if (ice_is_safe_mode(pf)) {
4531 		/* we already got function/device capabilities but these don't
4532 		 * reflect what the driver needs to do in safe mode. Instead of
4533 		 * adding conditional logic everywhere to ignore these
4534 		 * device/function capabilities, override them.
4535 		 */
4536 		ice_set_safe_mode_caps(hw);
4537 	}
4538 
4539 	err = ice_init_pf(pf);
4540 	if (err) {
4541 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4542 		goto err_init_pf;
4543 	}
4544 
4545 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4546 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4547 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4548 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4549 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4550 		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4551 			pf->hw.tnl.valid_count[TNL_VXLAN];
4552 		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4553 			UDP_TUNNEL_TYPE_VXLAN;
4554 	}
4555 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4556 		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4557 			pf->hw.tnl.valid_count[TNL_GENEVE];
4558 		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4559 			UDP_TUNNEL_TYPE_GENEVE;
4560 	}
4561 
4562 	err = ice_init_interrupt_scheme(pf);
4563 	if (err) {
4564 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4565 		err = -EIO;
4566 		goto err_init_interrupt_scheme;
4567 	}
4568 
4569 	/* In case of MSIX we are going to setup the misc vector right here
4570 	 * to handle admin queue events etc. In case of legacy and MSI
4571 	 * the misc functionality and queue processing is combined in
4572 	 * the same vector and that gets setup at open.
4573 	 */
4574 	err = ice_req_irq_msix_misc(pf);
4575 	if (err) {
4576 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4577 		goto err_req_irq_msix_misc;
4578 	}
4579 
4580 	return 0;
4581 
4582 err_req_irq_msix_misc:
4583 	ice_clear_interrupt_scheme(pf);
4584 err_init_interrupt_scheme:
4585 	ice_deinit_pf(pf);
4586 err_init_pf:
4587 	ice_deinit_hw(hw);
4588 	return err;
4589 }
4590 
4591 static void ice_deinit_dev(struct ice_pf *pf)
4592 {
4593 	ice_free_irq_msix_misc(pf);
4594 	ice_deinit_pf(pf);
4595 	ice_deinit_hw(&pf->hw);
4596 
4597 	/* Service task is already stopped, so call reset directly. */
4598 	ice_reset(&pf->hw, ICE_RESET_PFR);
4599 	pci_wait_for_pending_transaction(pf->pdev);
4600 	ice_clear_interrupt_scheme(pf);
4601 }
4602 
4603 static void ice_init_features(struct ice_pf *pf)
4604 {
4605 	struct device *dev = ice_pf_to_dev(pf);
4606 
4607 	if (ice_is_safe_mode(pf))
4608 		return;
4609 
4610 	/* initialize DDP driven features */
4611 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4612 		ice_ptp_init(pf);
4613 
4614 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4615 		ice_gnss_init(pf);
4616 
4617 	/* Note: Flow director init failure is non-fatal to load */
4618 	if (ice_init_fdir(pf))
4619 		dev_err(dev, "could not initialize flow director\n");
4620 
4621 	/* Note: DCB init failure is non-fatal to load */
4622 	if (ice_init_pf_dcb(pf, false)) {
4623 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4624 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4625 	} else {
4626 		ice_cfg_lldp_mib_change(&pf->hw, true);
4627 	}
4628 
4629 	if (ice_init_lag(pf))
4630 		dev_warn(dev, "Failed to init link aggregation support\n");
4631 }
4632 
4633 static void ice_deinit_features(struct ice_pf *pf)
4634 {
4635 	ice_deinit_lag(pf);
4636 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4637 		ice_cfg_lldp_mib_change(&pf->hw, false);
4638 	ice_deinit_fdir(pf);
4639 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4640 		ice_gnss_exit(pf);
4641 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4642 		ice_ptp_release(pf);
4643 }
4644 
4645 static void ice_init_wakeup(struct ice_pf *pf)
4646 {
4647 	/* Save wakeup reason register for later use */
4648 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4649 
4650 	/* check for a power management event */
4651 	ice_print_wake_reason(pf);
4652 
4653 	/* clear wake status, all bits */
4654 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4655 
4656 	/* Disable WoL at init, wait for user to enable */
4657 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4658 }
4659 
4660 static int ice_init_link(struct ice_pf *pf)
4661 {
4662 	struct device *dev = ice_pf_to_dev(pf);
4663 	int err;
4664 
4665 	err = ice_init_link_events(pf->hw.port_info);
4666 	if (err) {
4667 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4668 		return err;
4669 	}
4670 
4671 	/* not a fatal error if this fails */
4672 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4673 	if (err)
4674 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4675 
4676 	/* not a fatal error if this fails */
4677 	err = ice_update_link_info(pf->hw.port_info);
4678 	if (err)
4679 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4680 
4681 	ice_init_link_dflt_override(pf->hw.port_info);
4682 
4683 	ice_check_link_cfg_err(pf,
4684 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4685 
4686 	/* if media available, initialize PHY settings */
4687 	if (pf->hw.port_info->phy.link_info.link_info &
4688 	    ICE_AQ_MEDIA_AVAILABLE) {
4689 		/* not a fatal error if this fails */
4690 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4691 		if (err)
4692 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4693 
4694 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4695 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4696 
4697 			if (vsi)
4698 				ice_configure_phy(vsi);
4699 		}
4700 	} else {
4701 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4702 	}
4703 
4704 	return err;
4705 }
4706 
4707 static int ice_init_pf_sw(struct ice_pf *pf)
4708 {
4709 	bool dvm = ice_is_dvm_ena(&pf->hw);
4710 	struct ice_vsi *vsi;
4711 	int err;
4712 
4713 	/* create switch struct for the switch element created by FW on boot */
4714 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4715 	if (!pf->first_sw)
4716 		return -ENOMEM;
4717 
4718 	if (pf->hw.evb_veb)
4719 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4720 	else
4721 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4722 
4723 	pf->first_sw->pf = pf;
4724 
4725 	/* record the sw_id available for later use */
4726 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4727 
4728 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4729 	if (err)
4730 		goto err_aq_set_port_params;
4731 
4732 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4733 	if (!vsi) {
4734 		err = -ENOMEM;
4735 		goto err_pf_vsi_setup;
4736 	}
4737 
4738 	return 0;
4739 
4740 err_pf_vsi_setup:
4741 err_aq_set_port_params:
4742 	kfree(pf->first_sw);
4743 	return err;
4744 }
4745 
4746 static void ice_deinit_pf_sw(struct ice_pf *pf)
4747 {
4748 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4749 
4750 	if (!vsi)
4751 		return;
4752 
4753 	ice_vsi_release(vsi);
4754 	kfree(pf->first_sw);
4755 }
4756 
4757 static int ice_alloc_vsis(struct ice_pf *pf)
4758 {
4759 	struct device *dev = ice_pf_to_dev(pf);
4760 
4761 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4762 	if (!pf->num_alloc_vsi)
4763 		return -EIO;
4764 
4765 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4766 		dev_warn(dev,
4767 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4768 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4769 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4770 	}
4771 
4772 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4773 			       GFP_KERNEL);
4774 	if (!pf->vsi)
4775 		return -ENOMEM;
4776 
4777 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4778 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
4779 	if (!pf->vsi_stats) {
4780 		devm_kfree(dev, pf->vsi);
4781 		return -ENOMEM;
4782 	}
4783 
4784 	return 0;
4785 }
4786 
4787 static void ice_dealloc_vsis(struct ice_pf *pf)
4788 {
4789 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4790 	pf->vsi_stats = NULL;
4791 
4792 	pf->num_alloc_vsi = 0;
4793 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4794 	pf->vsi = NULL;
4795 }
4796 
4797 static int ice_init_devlink(struct ice_pf *pf)
4798 {
4799 	int err;
4800 
4801 	err = ice_devlink_register_params(pf);
4802 	if (err)
4803 		return err;
4804 
4805 	ice_devlink_init_regions(pf);
4806 	ice_devlink_register(pf);
4807 
4808 	return 0;
4809 }
4810 
4811 static void ice_deinit_devlink(struct ice_pf *pf)
4812 {
4813 	ice_devlink_unregister(pf);
4814 	ice_devlink_destroy_regions(pf);
4815 	ice_devlink_unregister_params(pf);
4816 }
4817 
4818 static int ice_init(struct ice_pf *pf)
4819 {
4820 	int err;
4821 
4822 	err = ice_init_dev(pf);
4823 	if (err)
4824 		return err;
4825 
4826 	err = ice_alloc_vsis(pf);
4827 	if (err)
4828 		goto err_alloc_vsis;
4829 
4830 	err = ice_init_pf_sw(pf);
4831 	if (err)
4832 		goto err_init_pf_sw;
4833 
4834 	ice_init_wakeup(pf);
4835 
4836 	err = ice_init_link(pf);
4837 	if (err)
4838 		goto err_init_link;
4839 
4840 	err = ice_send_version(pf);
4841 	if (err)
4842 		goto err_init_link;
4843 
4844 	ice_verify_cacheline_size(pf);
4845 
4846 	if (ice_is_safe_mode(pf))
4847 		ice_set_safe_mode_vlan_cfg(pf);
4848 	else
4849 		/* print PCI link speed and width */
4850 		pcie_print_link_status(pf->pdev);
4851 
4852 	/* ready to go, so clear down state bit */
4853 	clear_bit(ICE_DOWN, pf->state);
4854 	clear_bit(ICE_SERVICE_DIS, pf->state);
4855 
4856 	/* since everything is good, start the service timer */
4857 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4858 
4859 	return 0;
4860 
4861 err_init_link:
4862 	ice_deinit_pf_sw(pf);
4863 err_init_pf_sw:
4864 	ice_dealloc_vsis(pf);
4865 err_alloc_vsis:
4866 	ice_deinit_dev(pf);
4867 	return err;
4868 }
4869 
4870 static void ice_deinit(struct ice_pf *pf)
4871 {
4872 	set_bit(ICE_SERVICE_DIS, pf->state);
4873 	set_bit(ICE_DOWN, pf->state);
4874 
4875 	ice_deinit_pf_sw(pf);
4876 	ice_dealloc_vsis(pf);
4877 	ice_deinit_dev(pf);
4878 }
4879 
4880 /**
4881  * ice_load - load pf by init hw and starting VSI
4882  * @pf: pointer to the pf instance
4883  */
4884 int ice_load(struct ice_pf *pf)
4885 {
4886 	struct ice_vsi_cfg_params params = {};
4887 	struct ice_vsi *vsi;
4888 	int err;
4889 
4890 	err = ice_init_dev(pf);
4891 	if (err)
4892 		return err;
4893 
4894 	vsi = ice_get_main_vsi(pf);
4895 
4896 	params = ice_vsi_to_params(vsi);
4897 	params.flags = ICE_VSI_FLAG_INIT;
4898 
4899 	err = ice_vsi_cfg(vsi, &params);
4900 	if (err)
4901 		goto err_vsi_cfg;
4902 
4903 	err = ice_start_eth(ice_get_main_vsi(pf));
4904 	if (err)
4905 		goto err_start_eth;
4906 
4907 	err = ice_init_rdma(pf);
4908 	if (err)
4909 		goto err_init_rdma;
4910 
4911 	ice_init_features(pf);
4912 	ice_service_task_restart(pf);
4913 
4914 	clear_bit(ICE_DOWN, pf->state);
4915 
4916 	return 0;
4917 
4918 err_init_rdma:
4919 	ice_vsi_close(ice_get_main_vsi(pf));
4920 err_start_eth:
4921 	ice_vsi_decfg(ice_get_main_vsi(pf));
4922 err_vsi_cfg:
4923 	ice_deinit_dev(pf);
4924 	return err;
4925 }
4926 
4927 /**
4928  * ice_unload - unload pf by stopping VSI and deinit hw
4929  * @pf: pointer to the pf instance
4930  */
4931 void ice_unload(struct ice_pf *pf)
4932 {
4933 	ice_deinit_features(pf);
4934 	ice_deinit_rdma(pf);
4935 	ice_stop_eth(ice_get_main_vsi(pf));
4936 	ice_vsi_decfg(ice_get_main_vsi(pf));
4937 	ice_deinit_dev(pf);
4938 }
4939 
4940 /**
4941  * ice_probe - Device initialization routine
4942  * @pdev: PCI device information struct
4943  * @ent: entry in ice_pci_tbl
4944  *
4945  * Returns 0 on success, negative on failure
4946  */
4947 static int
4948 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4949 {
4950 	struct device *dev = &pdev->dev;
4951 	struct ice_pf *pf;
4952 	struct ice_hw *hw;
4953 	int err;
4954 
4955 	if (pdev->is_virtfn) {
4956 		dev_err(dev, "can't probe a virtual function\n");
4957 		return -EINVAL;
4958 	}
4959 
4960 	/* this driver uses devres, see
4961 	 * Documentation/driver-api/driver-model/devres.rst
4962 	 */
4963 	err = pcim_enable_device(pdev);
4964 	if (err)
4965 		return err;
4966 
4967 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4968 	if (err) {
4969 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4970 		return err;
4971 	}
4972 
4973 	pf = ice_allocate_pf(dev);
4974 	if (!pf)
4975 		return -ENOMEM;
4976 
4977 	/* initialize Auxiliary index to invalid value */
4978 	pf->aux_idx = -1;
4979 
4980 	/* set up for high or low DMA */
4981 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4982 	if (err) {
4983 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4984 		return err;
4985 	}
4986 
4987 	pci_set_master(pdev);
4988 
4989 	pf->pdev = pdev;
4990 	pci_set_drvdata(pdev, pf);
4991 	set_bit(ICE_DOWN, pf->state);
4992 	/* Disable service task until DOWN bit is cleared */
4993 	set_bit(ICE_SERVICE_DIS, pf->state);
4994 
4995 	hw = &pf->hw;
4996 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4997 	pci_save_state(pdev);
4998 
4999 	hw->back = pf;
5000 	hw->port_info = NULL;
5001 	hw->vendor_id = pdev->vendor;
5002 	hw->device_id = pdev->device;
5003 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5004 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5005 	hw->subsystem_device_id = pdev->subsystem_device;
5006 	hw->bus.device = PCI_SLOT(pdev->devfn);
5007 	hw->bus.func = PCI_FUNC(pdev->devfn);
5008 	ice_set_ctrlq_len(hw);
5009 
5010 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5011 
5012 #ifndef CONFIG_DYNAMIC_DEBUG
5013 	if (debug < -1)
5014 		hw->debug_mask = debug;
5015 #endif
5016 
5017 	err = ice_init(pf);
5018 	if (err)
5019 		goto err_init;
5020 
5021 	err = ice_init_eth(pf);
5022 	if (err)
5023 		goto err_init_eth;
5024 
5025 	err = ice_init_rdma(pf);
5026 	if (err)
5027 		goto err_init_rdma;
5028 
5029 	err = ice_init_devlink(pf);
5030 	if (err)
5031 		goto err_init_devlink;
5032 
5033 	ice_init_features(pf);
5034 
5035 	return 0;
5036 
5037 err_init_devlink:
5038 	ice_deinit_rdma(pf);
5039 err_init_rdma:
5040 	ice_deinit_eth(pf);
5041 err_init_eth:
5042 	ice_deinit(pf);
5043 err_init:
5044 	pci_disable_device(pdev);
5045 	return err;
5046 }
5047 
5048 /**
5049  * ice_set_wake - enable or disable Wake on LAN
5050  * @pf: pointer to the PF struct
5051  *
5052  * Simple helper for WoL control
5053  */
5054 static void ice_set_wake(struct ice_pf *pf)
5055 {
5056 	struct ice_hw *hw = &pf->hw;
5057 	bool wol = pf->wol_ena;
5058 
5059 	/* clear wake state, otherwise new wake events won't fire */
5060 	wr32(hw, PFPM_WUS, U32_MAX);
5061 
5062 	/* enable / disable APM wake up, no RMW needed */
5063 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5064 
5065 	/* set magic packet filter enabled */
5066 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5067 }
5068 
5069 /**
5070  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5071  * @pf: pointer to the PF struct
5072  *
5073  * Issue firmware command to enable multicast magic wake, making
5074  * sure that any locally administered address (LAA) is used for
5075  * wake, and that PF reset doesn't undo the LAA.
5076  */
5077 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5078 {
5079 	struct device *dev = ice_pf_to_dev(pf);
5080 	struct ice_hw *hw = &pf->hw;
5081 	u8 mac_addr[ETH_ALEN];
5082 	struct ice_vsi *vsi;
5083 	int status;
5084 	u8 flags;
5085 
5086 	if (!pf->wol_ena)
5087 		return;
5088 
5089 	vsi = ice_get_main_vsi(pf);
5090 	if (!vsi)
5091 		return;
5092 
5093 	/* Get current MAC address in case it's an LAA */
5094 	if (vsi->netdev)
5095 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5096 	else
5097 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5098 
5099 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5100 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5101 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5102 
5103 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5104 	if (status)
5105 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5106 			status, ice_aq_str(hw->adminq.sq_last_status));
5107 }
5108 
5109 /**
5110  * ice_remove - Device removal routine
5111  * @pdev: PCI device information struct
5112  */
5113 static void ice_remove(struct pci_dev *pdev)
5114 {
5115 	struct ice_pf *pf = pci_get_drvdata(pdev);
5116 	int i;
5117 
5118 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5119 		if (!ice_is_reset_in_progress(pf->state))
5120 			break;
5121 		msleep(100);
5122 	}
5123 
5124 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5125 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5126 		ice_free_vfs(pf);
5127 	}
5128 
5129 	ice_service_task_stop(pf);
5130 	ice_aq_cancel_waiting_tasks(pf);
5131 	set_bit(ICE_DOWN, pf->state);
5132 
5133 	if (!ice_is_safe_mode(pf))
5134 		ice_remove_arfs(pf);
5135 	ice_deinit_features(pf);
5136 	ice_deinit_devlink(pf);
5137 	ice_deinit_rdma(pf);
5138 	ice_deinit_eth(pf);
5139 	ice_deinit(pf);
5140 
5141 	ice_vsi_release_all(pf);
5142 
5143 	ice_setup_mc_magic_wake(pf);
5144 	ice_set_wake(pf);
5145 
5146 	pci_disable_device(pdev);
5147 }
5148 
5149 /**
5150  * ice_shutdown - PCI callback for shutting down device
5151  * @pdev: PCI device information struct
5152  */
5153 static void ice_shutdown(struct pci_dev *pdev)
5154 {
5155 	struct ice_pf *pf = pci_get_drvdata(pdev);
5156 
5157 	ice_remove(pdev);
5158 
5159 	if (system_state == SYSTEM_POWER_OFF) {
5160 		pci_wake_from_d3(pdev, pf->wol_ena);
5161 		pci_set_power_state(pdev, PCI_D3hot);
5162 	}
5163 }
5164 
5165 #ifdef CONFIG_PM
5166 /**
5167  * ice_prepare_for_shutdown - prep for PCI shutdown
5168  * @pf: board private structure
5169  *
5170  * Inform or close all dependent features in prep for PCI device shutdown
5171  */
5172 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5173 {
5174 	struct ice_hw *hw = &pf->hw;
5175 	u32 v;
5176 
5177 	/* Notify VFs of impending reset */
5178 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5179 		ice_vc_notify_reset(pf);
5180 
5181 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5182 
5183 	/* disable the VSIs and their queues that are not already DOWN */
5184 	ice_pf_dis_all_vsi(pf, false);
5185 
5186 	ice_for_each_vsi(pf, v)
5187 		if (pf->vsi[v])
5188 			pf->vsi[v]->vsi_num = 0;
5189 
5190 	ice_shutdown_all_ctrlq(hw);
5191 }
5192 
5193 /**
5194  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5195  * @pf: board private structure to reinitialize
5196  *
5197  * This routine reinitialize interrupt scheme that was cleared during
5198  * power management suspend callback.
5199  *
5200  * This should be called during resume routine to re-allocate the q_vectors
5201  * and reacquire interrupts.
5202  */
5203 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5204 {
5205 	struct device *dev = ice_pf_to_dev(pf);
5206 	int ret, v;
5207 
5208 	/* Since we clear MSIX flag during suspend, we need to
5209 	 * set it back during resume...
5210 	 */
5211 
5212 	ret = ice_init_interrupt_scheme(pf);
5213 	if (ret) {
5214 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5215 		return ret;
5216 	}
5217 
5218 	/* Remap vectors and rings, after successful re-init interrupts */
5219 	ice_for_each_vsi(pf, v) {
5220 		if (!pf->vsi[v])
5221 			continue;
5222 
5223 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5224 		if (ret)
5225 			goto err_reinit;
5226 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5227 	}
5228 
5229 	ret = ice_req_irq_msix_misc(pf);
5230 	if (ret) {
5231 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5232 			ret);
5233 		goto err_reinit;
5234 	}
5235 
5236 	return 0;
5237 
5238 err_reinit:
5239 	while (v--)
5240 		if (pf->vsi[v])
5241 			ice_vsi_free_q_vectors(pf->vsi[v]);
5242 
5243 	return ret;
5244 }
5245 
5246 /**
5247  * ice_suspend
5248  * @dev: generic device information structure
5249  *
5250  * Power Management callback to quiesce the device and prepare
5251  * for D3 transition.
5252  */
5253 static int __maybe_unused ice_suspend(struct device *dev)
5254 {
5255 	struct pci_dev *pdev = to_pci_dev(dev);
5256 	struct ice_pf *pf;
5257 	int disabled, v;
5258 
5259 	pf = pci_get_drvdata(pdev);
5260 
5261 	if (!ice_pf_state_is_nominal(pf)) {
5262 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5263 		return -EBUSY;
5264 	}
5265 
5266 	/* Stop watchdog tasks until resume completion.
5267 	 * Even though it is most likely that the service task is
5268 	 * disabled if the device is suspended or down, the service task's
5269 	 * state is controlled by a different state bit, and we should
5270 	 * store and honor whatever state that bit is in at this point.
5271 	 */
5272 	disabled = ice_service_task_stop(pf);
5273 
5274 	ice_unplug_aux_dev(pf);
5275 
5276 	/* Already suspended?, then there is nothing to do */
5277 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5278 		if (!disabled)
5279 			ice_service_task_restart(pf);
5280 		return 0;
5281 	}
5282 
5283 	if (test_bit(ICE_DOWN, pf->state) ||
5284 	    ice_is_reset_in_progress(pf->state)) {
5285 		dev_err(dev, "can't suspend device in reset or already down\n");
5286 		if (!disabled)
5287 			ice_service_task_restart(pf);
5288 		return 0;
5289 	}
5290 
5291 	ice_setup_mc_magic_wake(pf);
5292 
5293 	ice_prepare_for_shutdown(pf);
5294 
5295 	ice_set_wake(pf);
5296 
5297 	/* Free vectors, clear the interrupt scheme and release IRQs
5298 	 * for proper hibernation, especially with large number of CPUs.
5299 	 * Otherwise hibernation might fail when mapping all the vectors back
5300 	 * to CPU0.
5301 	 */
5302 	ice_free_irq_msix_misc(pf);
5303 	ice_for_each_vsi(pf, v) {
5304 		if (!pf->vsi[v])
5305 			continue;
5306 		ice_vsi_free_q_vectors(pf->vsi[v]);
5307 	}
5308 	ice_clear_interrupt_scheme(pf);
5309 
5310 	pci_save_state(pdev);
5311 	pci_wake_from_d3(pdev, pf->wol_ena);
5312 	pci_set_power_state(pdev, PCI_D3hot);
5313 	return 0;
5314 }
5315 
5316 /**
5317  * ice_resume - PM callback for waking up from D3
5318  * @dev: generic device information structure
5319  */
5320 static int __maybe_unused ice_resume(struct device *dev)
5321 {
5322 	struct pci_dev *pdev = to_pci_dev(dev);
5323 	enum ice_reset_req reset_type;
5324 	struct ice_pf *pf;
5325 	struct ice_hw *hw;
5326 	int ret;
5327 
5328 	pci_set_power_state(pdev, PCI_D0);
5329 	pci_restore_state(pdev);
5330 	pci_save_state(pdev);
5331 
5332 	if (!pci_device_is_present(pdev))
5333 		return -ENODEV;
5334 
5335 	ret = pci_enable_device_mem(pdev);
5336 	if (ret) {
5337 		dev_err(dev, "Cannot enable device after suspend\n");
5338 		return ret;
5339 	}
5340 
5341 	pf = pci_get_drvdata(pdev);
5342 	hw = &pf->hw;
5343 
5344 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5345 	ice_print_wake_reason(pf);
5346 
5347 	/* We cleared the interrupt scheme when we suspended, so we need to
5348 	 * restore it now to resume device functionality.
5349 	 */
5350 	ret = ice_reinit_interrupt_scheme(pf);
5351 	if (ret)
5352 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5353 
5354 	clear_bit(ICE_DOWN, pf->state);
5355 	/* Now perform PF reset and rebuild */
5356 	reset_type = ICE_RESET_PFR;
5357 	/* re-enable service task for reset, but allow reset to schedule it */
5358 	clear_bit(ICE_SERVICE_DIS, pf->state);
5359 
5360 	if (ice_schedule_reset(pf, reset_type))
5361 		dev_err(dev, "Reset during resume failed.\n");
5362 
5363 	clear_bit(ICE_SUSPENDED, pf->state);
5364 	ice_service_task_restart(pf);
5365 
5366 	/* Restart the service task */
5367 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5368 
5369 	return 0;
5370 }
5371 #endif /* CONFIG_PM */
5372 
5373 /**
5374  * ice_pci_err_detected - warning that PCI error has been detected
5375  * @pdev: PCI device information struct
5376  * @err: the type of PCI error
5377  *
5378  * Called to warn that something happened on the PCI bus and the error handling
5379  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5380  */
5381 static pci_ers_result_t
5382 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5383 {
5384 	struct ice_pf *pf = pci_get_drvdata(pdev);
5385 
5386 	if (!pf) {
5387 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5388 			__func__, err);
5389 		return PCI_ERS_RESULT_DISCONNECT;
5390 	}
5391 
5392 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5393 		ice_service_task_stop(pf);
5394 
5395 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5396 			set_bit(ICE_PFR_REQ, pf->state);
5397 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5398 		}
5399 	}
5400 
5401 	return PCI_ERS_RESULT_NEED_RESET;
5402 }
5403 
5404 /**
5405  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5406  * @pdev: PCI device information struct
5407  *
5408  * Called to determine if the driver can recover from the PCI slot reset by
5409  * using a register read to determine if the device is recoverable.
5410  */
5411 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5412 {
5413 	struct ice_pf *pf = pci_get_drvdata(pdev);
5414 	pci_ers_result_t result;
5415 	int err;
5416 	u32 reg;
5417 
5418 	err = pci_enable_device_mem(pdev);
5419 	if (err) {
5420 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5421 			err);
5422 		result = PCI_ERS_RESULT_DISCONNECT;
5423 	} else {
5424 		pci_set_master(pdev);
5425 		pci_restore_state(pdev);
5426 		pci_save_state(pdev);
5427 		pci_wake_from_d3(pdev, false);
5428 
5429 		/* Check for life */
5430 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5431 		if (!reg)
5432 			result = PCI_ERS_RESULT_RECOVERED;
5433 		else
5434 			result = PCI_ERS_RESULT_DISCONNECT;
5435 	}
5436 
5437 	return result;
5438 }
5439 
5440 /**
5441  * ice_pci_err_resume - restart operations after PCI error recovery
5442  * @pdev: PCI device information struct
5443  *
5444  * Called to allow the driver to bring things back up after PCI error and/or
5445  * reset recovery have finished
5446  */
5447 static void ice_pci_err_resume(struct pci_dev *pdev)
5448 {
5449 	struct ice_pf *pf = pci_get_drvdata(pdev);
5450 
5451 	if (!pf) {
5452 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5453 			__func__);
5454 		return;
5455 	}
5456 
5457 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5458 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5459 			__func__);
5460 		return;
5461 	}
5462 
5463 	ice_restore_all_vfs_msi_state(pdev);
5464 
5465 	ice_do_reset(pf, ICE_RESET_PFR);
5466 	ice_service_task_restart(pf);
5467 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5468 }
5469 
5470 /**
5471  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5472  * @pdev: PCI device information struct
5473  */
5474 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5475 {
5476 	struct ice_pf *pf = pci_get_drvdata(pdev);
5477 
5478 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5479 		ice_service_task_stop(pf);
5480 
5481 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5482 			set_bit(ICE_PFR_REQ, pf->state);
5483 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5484 		}
5485 	}
5486 }
5487 
5488 /**
5489  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5490  * @pdev: PCI device information struct
5491  */
5492 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5493 {
5494 	ice_pci_err_resume(pdev);
5495 }
5496 
5497 /* ice_pci_tbl - PCI Device ID Table
5498  *
5499  * Wildcard entries (PCI_ANY_ID) should come last
5500  * Last entry must be all 0s
5501  *
5502  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5503  *   Class, Class Mask, private data (not used) }
5504  */
5505 static const struct pci_device_id ice_pci_tbl[] = {
5506 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5507 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5508 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5509 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5510 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5511 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5512 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5513 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5514 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5515 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5516 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5517 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5518 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5519 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5520 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5521 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5522 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5523 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5524 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5525 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5526 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5527 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5528 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5529 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5530 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5531 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5532 	/* required last entry */
5533 	{ 0, }
5534 };
5535 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5536 
5537 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5538 
5539 static const struct pci_error_handlers ice_pci_err_handler = {
5540 	.error_detected = ice_pci_err_detected,
5541 	.slot_reset = ice_pci_err_slot_reset,
5542 	.reset_prepare = ice_pci_err_reset_prepare,
5543 	.reset_done = ice_pci_err_reset_done,
5544 	.resume = ice_pci_err_resume
5545 };
5546 
5547 static struct pci_driver ice_driver = {
5548 	.name = KBUILD_MODNAME,
5549 	.id_table = ice_pci_tbl,
5550 	.probe = ice_probe,
5551 	.remove = ice_remove,
5552 #ifdef CONFIG_PM
5553 	.driver.pm = &ice_pm_ops,
5554 #endif /* CONFIG_PM */
5555 	.shutdown = ice_shutdown,
5556 	.sriov_configure = ice_sriov_configure,
5557 	.err_handler = &ice_pci_err_handler
5558 };
5559 
5560 /**
5561  * ice_module_init - Driver registration routine
5562  *
5563  * ice_module_init is the first routine called when the driver is
5564  * loaded. All it does is register with the PCI subsystem.
5565  */
5566 static int __init ice_module_init(void)
5567 {
5568 	int status;
5569 
5570 	pr_info("%s\n", ice_driver_string);
5571 	pr_info("%s\n", ice_copyright);
5572 
5573 	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5574 	if (!ice_wq) {
5575 		pr_err("Failed to create workqueue\n");
5576 		return -ENOMEM;
5577 	}
5578 
5579 	status = pci_register_driver(&ice_driver);
5580 	if (status) {
5581 		pr_err("failed to register PCI driver, err %d\n", status);
5582 		destroy_workqueue(ice_wq);
5583 	}
5584 
5585 	return status;
5586 }
5587 module_init(ice_module_init);
5588 
5589 /**
5590  * ice_module_exit - Driver exit cleanup routine
5591  *
5592  * ice_module_exit is called just before the driver is removed
5593  * from memory.
5594  */
5595 static void __exit ice_module_exit(void)
5596 {
5597 	pci_unregister_driver(&ice_driver);
5598 	destroy_workqueue(ice_wq);
5599 	pr_info("module unloaded\n");
5600 }
5601 module_exit(ice_module_exit);
5602 
5603 /**
5604  * ice_set_mac_address - NDO callback to set MAC address
5605  * @netdev: network interface device structure
5606  * @pi: pointer to an address structure
5607  *
5608  * Returns 0 on success, negative on failure
5609  */
5610 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5611 {
5612 	struct ice_netdev_priv *np = netdev_priv(netdev);
5613 	struct ice_vsi *vsi = np->vsi;
5614 	struct ice_pf *pf = vsi->back;
5615 	struct ice_hw *hw = &pf->hw;
5616 	struct sockaddr *addr = pi;
5617 	u8 old_mac[ETH_ALEN];
5618 	u8 flags = 0;
5619 	u8 *mac;
5620 	int err;
5621 
5622 	mac = (u8 *)addr->sa_data;
5623 
5624 	if (!is_valid_ether_addr(mac))
5625 		return -EADDRNOTAVAIL;
5626 
5627 	if (test_bit(ICE_DOWN, pf->state) ||
5628 	    ice_is_reset_in_progress(pf->state)) {
5629 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5630 			   mac);
5631 		return -EBUSY;
5632 	}
5633 
5634 	if (ice_chnl_dmac_fltr_cnt(pf)) {
5635 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5636 			   mac);
5637 		return -EAGAIN;
5638 	}
5639 
5640 	netif_addr_lock_bh(netdev);
5641 	ether_addr_copy(old_mac, netdev->dev_addr);
5642 	/* change the netdev's MAC address */
5643 	eth_hw_addr_set(netdev, mac);
5644 	netif_addr_unlock_bh(netdev);
5645 
5646 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5647 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5648 	if (err && err != -ENOENT) {
5649 		err = -EADDRNOTAVAIL;
5650 		goto err_update_filters;
5651 	}
5652 
5653 	/* Add filter for new MAC. If filter exists, return success */
5654 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5655 	if (err == -EEXIST) {
5656 		/* Although this MAC filter is already present in hardware it's
5657 		 * possible in some cases (e.g. bonding) that dev_addr was
5658 		 * modified outside of the driver and needs to be restored back
5659 		 * to this value.
5660 		 */
5661 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5662 
5663 		return 0;
5664 	} else if (err) {
5665 		/* error if the new filter addition failed */
5666 		err = -EADDRNOTAVAIL;
5667 	}
5668 
5669 err_update_filters:
5670 	if (err) {
5671 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5672 			   mac);
5673 		netif_addr_lock_bh(netdev);
5674 		eth_hw_addr_set(netdev, old_mac);
5675 		netif_addr_unlock_bh(netdev);
5676 		return err;
5677 	}
5678 
5679 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5680 		   netdev->dev_addr);
5681 
5682 	/* write new MAC address to the firmware */
5683 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5684 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5685 	if (err) {
5686 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5687 			   mac, err);
5688 	}
5689 	return 0;
5690 }
5691 
5692 /**
5693  * ice_set_rx_mode - NDO callback to set the netdev filters
5694  * @netdev: network interface device structure
5695  */
5696 static void ice_set_rx_mode(struct net_device *netdev)
5697 {
5698 	struct ice_netdev_priv *np = netdev_priv(netdev);
5699 	struct ice_vsi *vsi = np->vsi;
5700 
5701 	if (!vsi)
5702 		return;
5703 
5704 	/* Set the flags to synchronize filters
5705 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5706 	 * flags
5707 	 */
5708 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5709 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5710 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5711 
5712 	/* schedule our worker thread which will take care of
5713 	 * applying the new filter changes
5714 	 */
5715 	ice_service_task_schedule(vsi->back);
5716 }
5717 
5718 /**
5719  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5720  * @netdev: network interface device structure
5721  * @queue_index: Queue ID
5722  * @maxrate: maximum bandwidth in Mbps
5723  */
5724 static int
5725 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5726 {
5727 	struct ice_netdev_priv *np = netdev_priv(netdev);
5728 	struct ice_vsi *vsi = np->vsi;
5729 	u16 q_handle;
5730 	int status;
5731 	u8 tc;
5732 
5733 	/* Validate maxrate requested is within permitted range */
5734 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5735 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5736 			   maxrate, queue_index);
5737 		return -EINVAL;
5738 	}
5739 
5740 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5741 	tc = ice_dcb_get_tc(vsi, queue_index);
5742 
5743 	/* Set BW back to default, when user set maxrate to 0 */
5744 	if (!maxrate)
5745 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5746 					       q_handle, ICE_MAX_BW);
5747 	else
5748 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5749 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5750 	if (status)
5751 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5752 			   status);
5753 
5754 	return status;
5755 }
5756 
5757 /**
5758  * ice_fdb_add - add an entry to the hardware database
5759  * @ndm: the input from the stack
5760  * @tb: pointer to array of nladdr (unused)
5761  * @dev: the net device pointer
5762  * @addr: the MAC address entry being added
5763  * @vid: VLAN ID
5764  * @flags: instructions from stack about fdb operation
5765  * @extack: netlink extended ack
5766  */
5767 static int
5768 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5769 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5770 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5771 {
5772 	int err;
5773 
5774 	if (vid) {
5775 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5776 		return -EINVAL;
5777 	}
5778 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5779 		netdev_err(dev, "FDB only supports static addresses\n");
5780 		return -EINVAL;
5781 	}
5782 
5783 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5784 		err = dev_uc_add_excl(dev, addr);
5785 	else if (is_multicast_ether_addr(addr))
5786 		err = dev_mc_add_excl(dev, addr);
5787 	else
5788 		err = -EINVAL;
5789 
5790 	/* Only return duplicate errors if NLM_F_EXCL is set */
5791 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5792 		err = 0;
5793 
5794 	return err;
5795 }
5796 
5797 /**
5798  * ice_fdb_del - delete an entry from the hardware database
5799  * @ndm: the input from the stack
5800  * @tb: pointer to array of nladdr (unused)
5801  * @dev: the net device pointer
5802  * @addr: the MAC address entry being added
5803  * @vid: VLAN ID
5804  * @extack: netlink extended ack
5805  */
5806 static int
5807 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5808 	    struct net_device *dev, const unsigned char *addr,
5809 	    __always_unused u16 vid, struct netlink_ext_ack *extack)
5810 {
5811 	int err;
5812 
5813 	if (ndm->ndm_state & NUD_PERMANENT) {
5814 		netdev_err(dev, "FDB only supports static addresses\n");
5815 		return -EINVAL;
5816 	}
5817 
5818 	if (is_unicast_ether_addr(addr))
5819 		err = dev_uc_del(dev, addr);
5820 	else if (is_multicast_ether_addr(addr))
5821 		err = dev_mc_del(dev, addr);
5822 	else
5823 		err = -EINVAL;
5824 
5825 	return err;
5826 }
5827 
5828 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
5829 					 NETIF_F_HW_VLAN_CTAG_TX | \
5830 					 NETIF_F_HW_VLAN_STAG_RX | \
5831 					 NETIF_F_HW_VLAN_STAG_TX)
5832 
5833 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
5834 					 NETIF_F_HW_VLAN_STAG_RX)
5835 
5836 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
5837 					 NETIF_F_HW_VLAN_STAG_FILTER)
5838 
5839 /**
5840  * ice_fix_features - fix the netdev features flags based on device limitations
5841  * @netdev: ptr to the netdev that flags are being fixed on
5842  * @features: features that need to be checked and possibly fixed
5843  *
5844  * Make sure any fixups are made to features in this callback. This enables the
5845  * driver to not have to check unsupported configurations throughout the driver
5846  * because that's the responsiblity of this callback.
5847  *
5848  * Single VLAN Mode (SVM) Supported Features:
5849  *	NETIF_F_HW_VLAN_CTAG_FILTER
5850  *	NETIF_F_HW_VLAN_CTAG_RX
5851  *	NETIF_F_HW_VLAN_CTAG_TX
5852  *
5853  * Double VLAN Mode (DVM) Supported Features:
5854  *	NETIF_F_HW_VLAN_CTAG_FILTER
5855  *	NETIF_F_HW_VLAN_CTAG_RX
5856  *	NETIF_F_HW_VLAN_CTAG_TX
5857  *
5858  *	NETIF_F_HW_VLAN_STAG_FILTER
5859  *	NETIF_HW_VLAN_STAG_RX
5860  *	NETIF_HW_VLAN_STAG_TX
5861  *
5862  * Features that need fixing:
5863  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5864  *	These are mutually exlusive as the VSI context cannot support multiple
5865  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
5866  *	is not done, then default to clearing the requested STAG offload
5867  *	settings.
5868  *
5869  *	All supported filtering has to be enabled or disabled together. For
5870  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5871  *	together. If this is not done, then default to VLAN filtering disabled.
5872  *	These are mutually exclusive as there is currently no way to
5873  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5874  *	prune rules.
5875  */
5876 static netdev_features_t
5877 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5878 {
5879 	struct ice_netdev_priv *np = netdev_priv(netdev);
5880 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5881 	bool cur_ctag, cur_stag, req_ctag, req_stag;
5882 
5883 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5884 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5885 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5886 
5887 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5888 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5889 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5890 
5891 	if (req_vlan_fltr != cur_vlan_fltr) {
5892 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5893 			if (req_ctag && req_stag) {
5894 				features |= NETIF_VLAN_FILTERING_FEATURES;
5895 			} else if (!req_ctag && !req_stag) {
5896 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
5897 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
5898 				   (!cur_stag && req_stag && !cur_ctag)) {
5899 				features |= NETIF_VLAN_FILTERING_FEATURES;
5900 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5901 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
5902 				   (cur_stag && !req_stag && cur_ctag)) {
5903 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
5904 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5905 			}
5906 		} else {
5907 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5908 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5909 
5910 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5911 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5912 		}
5913 	}
5914 
5915 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5916 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5917 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5918 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5919 			      NETIF_F_HW_VLAN_STAG_TX);
5920 	}
5921 
5922 	if (!(netdev->features & NETIF_F_RXFCS) &&
5923 	    (features & NETIF_F_RXFCS) &&
5924 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
5925 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
5926 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
5927 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
5928 	}
5929 
5930 	return features;
5931 }
5932 
5933 /**
5934  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
5935  * @vsi: PF's VSI
5936  * @features: features used to determine VLAN offload settings
5937  *
5938  * First, determine the vlan_ethertype based on the VLAN offload bits in
5939  * features. Then determine if stripping and insertion should be enabled or
5940  * disabled. Finally enable or disable VLAN stripping and insertion.
5941  */
5942 static int
5943 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
5944 {
5945 	bool enable_stripping = true, enable_insertion = true;
5946 	struct ice_vsi_vlan_ops *vlan_ops;
5947 	int strip_err = 0, insert_err = 0;
5948 	u16 vlan_ethertype = 0;
5949 
5950 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5951 
5952 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
5953 		vlan_ethertype = ETH_P_8021AD;
5954 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
5955 		vlan_ethertype = ETH_P_8021Q;
5956 
5957 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
5958 		enable_stripping = false;
5959 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
5960 		enable_insertion = false;
5961 
5962 	if (enable_stripping)
5963 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
5964 	else
5965 		strip_err = vlan_ops->dis_stripping(vsi);
5966 
5967 	if (enable_insertion)
5968 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
5969 	else
5970 		insert_err = vlan_ops->dis_insertion(vsi);
5971 
5972 	if (strip_err || insert_err)
5973 		return -EIO;
5974 
5975 	return 0;
5976 }
5977 
5978 /**
5979  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
5980  * @vsi: PF's VSI
5981  * @features: features used to determine VLAN filtering settings
5982  *
5983  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
5984  * features.
5985  */
5986 static int
5987 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
5988 {
5989 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5990 	int err = 0;
5991 
5992 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
5993 	 * if either bit is set
5994 	 */
5995 	if (features &
5996 	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
5997 		err = vlan_ops->ena_rx_filtering(vsi);
5998 	else
5999 		err = vlan_ops->dis_rx_filtering(vsi);
6000 
6001 	return err;
6002 }
6003 
6004 /**
6005  * ice_set_vlan_features - set VLAN settings based on suggested feature set
6006  * @netdev: ptr to the netdev being adjusted
6007  * @features: the feature set that the stack is suggesting
6008  *
6009  * Only update VLAN settings if the requested_vlan_features are different than
6010  * the current_vlan_features.
6011  */
6012 static int
6013 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6014 {
6015 	netdev_features_t current_vlan_features, requested_vlan_features;
6016 	struct ice_netdev_priv *np = netdev_priv(netdev);
6017 	struct ice_vsi *vsi = np->vsi;
6018 	int err;
6019 
6020 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6021 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6022 	if (current_vlan_features ^ requested_vlan_features) {
6023 		if ((features & NETIF_F_RXFCS) &&
6024 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6025 			dev_err(ice_pf_to_dev(vsi->back),
6026 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6027 			return -EIO;
6028 		}
6029 
6030 		err = ice_set_vlan_offload_features(vsi, features);
6031 		if (err)
6032 			return err;
6033 	}
6034 
6035 	current_vlan_features = netdev->features &
6036 		NETIF_VLAN_FILTERING_FEATURES;
6037 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6038 	if (current_vlan_features ^ requested_vlan_features) {
6039 		err = ice_set_vlan_filtering_features(vsi, features);
6040 		if (err)
6041 			return err;
6042 	}
6043 
6044 	return 0;
6045 }
6046 
6047 /**
6048  * ice_set_loopback - turn on/off loopback mode on underlying PF
6049  * @vsi: ptr to VSI
6050  * @ena: flag to indicate the on/off setting
6051  */
6052 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6053 {
6054 	bool if_running = netif_running(vsi->netdev);
6055 	int ret;
6056 
6057 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6058 		ret = ice_down(vsi);
6059 		if (ret) {
6060 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6061 			return ret;
6062 		}
6063 	}
6064 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6065 	if (ret)
6066 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6067 	if (if_running)
6068 		ret = ice_up(vsi);
6069 
6070 	return ret;
6071 }
6072 
6073 /**
6074  * ice_set_features - set the netdev feature flags
6075  * @netdev: ptr to the netdev being adjusted
6076  * @features: the feature set that the stack is suggesting
6077  */
6078 static int
6079 ice_set_features(struct net_device *netdev, netdev_features_t features)
6080 {
6081 	netdev_features_t changed = netdev->features ^ features;
6082 	struct ice_netdev_priv *np = netdev_priv(netdev);
6083 	struct ice_vsi *vsi = np->vsi;
6084 	struct ice_pf *pf = vsi->back;
6085 	int ret = 0;
6086 
6087 	/* Don't set any netdev advanced features with device in Safe Mode */
6088 	if (ice_is_safe_mode(pf)) {
6089 		dev_err(ice_pf_to_dev(pf),
6090 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6091 		return ret;
6092 	}
6093 
6094 	/* Do not change setting during reset */
6095 	if (ice_is_reset_in_progress(pf->state)) {
6096 		dev_err(ice_pf_to_dev(pf),
6097 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6098 		return -EBUSY;
6099 	}
6100 
6101 	/* Multiple features can be changed in one call so keep features in
6102 	 * separate if/else statements to guarantee each feature is checked
6103 	 */
6104 	if (changed & NETIF_F_RXHASH)
6105 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6106 
6107 	ret = ice_set_vlan_features(netdev, features);
6108 	if (ret)
6109 		return ret;
6110 
6111 	/* Turn on receive of FCS aka CRC, and after setting this
6112 	 * flag the packet data will have the 4 byte CRC appended
6113 	 */
6114 	if (changed & NETIF_F_RXFCS) {
6115 		if ((features & NETIF_F_RXFCS) &&
6116 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6117 			dev_err(ice_pf_to_dev(vsi->back),
6118 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6119 			return -EIO;
6120 		}
6121 
6122 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6123 		ret = ice_down_up(vsi);
6124 		if (ret)
6125 			return ret;
6126 	}
6127 
6128 	if (changed & NETIF_F_NTUPLE) {
6129 		bool ena = !!(features & NETIF_F_NTUPLE);
6130 
6131 		ice_vsi_manage_fdir(vsi, ena);
6132 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6133 	}
6134 
6135 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6136 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6137 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6138 		return -EACCES;
6139 	}
6140 
6141 	if (changed & NETIF_F_HW_TC) {
6142 		bool ena = !!(features & NETIF_F_HW_TC);
6143 
6144 		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6145 		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6146 	}
6147 
6148 	if (changed & NETIF_F_LOOPBACK)
6149 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6150 
6151 	return ret;
6152 }
6153 
6154 /**
6155  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6156  * @vsi: VSI to setup VLAN properties for
6157  */
6158 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6159 {
6160 	int err;
6161 
6162 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6163 	if (err)
6164 		return err;
6165 
6166 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6167 	if (err)
6168 		return err;
6169 
6170 	return ice_vsi_add_vlan_zero(vsi);
6171 }
6172 
6173 /**
6174  * ice_vsi_cfg_lan - Setup the VSI lan related config
6175  * @vsi: the VSI being configured
6176  *
6177  * Return 0 on success and negative value on error
6178  */
6179 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6180 {
6181 	int err;
6182 
6183 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6184 		ice_set_rx_mode(vsi->netdev);
6185 
6186 		err = ice_vsi_vlan_setup(vsi);
6187 		if (err)
6188 			return err;
6189 	}
6190 	ice_vsi_cfg_dcb_rings(vsi);
6191 
6192 	err = ice_vsi_cfg_lan_txqs(vsi);
6193 	if (!err && ice_is_xdp_ena_vsi(vsi))
6194 		err = ice_vsi_cfg_xdp_txqs(vsi);
6195 	if (!err)
6196 		err = ice_vsi_cfg_rxqs(vsi);
6197 
6198 	return err;
6199 }
6200 
6201 /* THEORY OF MODERATION:
6202  * The ice driver hardware works differently than the hardware that DIMLIB was
6203  * originally made for. ice hardware doesn't have packet count limits that
6204  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6205  * which is hard-coded to a limit of 250,000 ints/second.
6206  * If not using dynamic moderation, the INTRL value can be modified
6207  * by ethtool rx-usecs-high.
6208  */
6209 struct ice_dim {
6210 	/* the throttle rate for interrupts, basically worst case delay before
6211 	 * an initial interrupt fires, value is stored in microseconds.
6212 	 */
6213 	u16 itr;
6214 };
6215 
6216 /* Make a different profile for Rx that doesn't allow quite so aggressive
6217  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6218  * second.
6219  */
6220 static const struct ice_dim rx_profile[] = {
6221 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6222 	{8},    /* 125,000 ints/s */
6223 	{16},   /*  62,500 ints/s */
6224 	{62},   /*  16,129 ints/s */
6225 	{126}   /*   7,936 ints/s */
6226 };
6227 
6228 /* The transmit profile, which has the same sorts of values
6229  * as the previous struct
6230  */
6231 static const struct ice_dim tx_profile[] = {
6232 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6233 	{8},    /* 125,000 ints/s */
6234 	{40},   /*  16,125 ints/s */
6235 	{128},  /*   7,812 ints/s */
6236 	{256}   /*   3,906 ints/s */
6237 };
6238 
6239 static void ice_tx_dim_work(struct work_struct *work)
6240 {
6241 	struct ice_ring_container *rc;
6242 	struct dim *dim;
6243 	u16 itr;
6244 
6245 	dim = container_of(work, struct dim, work);
6246 	rc = (struct ice_ring_container *)dim->priv;
6247 
6248 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6249 
6250 	/* look up the values in our local table */
6251 	itr = tx_profile[dim->profile_ix].itr;
6252 
6253 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6254 	ice_write_itr(rc, itr);
6255 
6256 	dim->state = DIM_START_MEASURE;
6257 }
6258 
6259 static void ice_rx_dim_work(struct work_struct *work)
6260 {
6261 	struct ice_ring_container *rc;
6262 	struct dim *dim;
6263 	u16 itr;
6264 
6265 	dim = container_of(work, struct dim, work);
6266 	rc = (struct ice_ring_container *)dim->priv;
6267 
6268 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6269 
6270 	/* look up the values in our local table */
6271 	itr = rx_profile[dim->profile_ix].itr;
6272 
6273 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6274 	ice_write_itr(rc, itr);
6275 
6276 	dim->state = DIM_START_MEASURE;
6277 }
6278 
6279 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6280 
6281 /**
6282  * ice_init_moderation - set up interrupt moderation
6283  * @q_vector: the vector containing rings to be configured
6284  *
6285  * Set up interrupt moderation registers, with the intent to do the right thing
6286  * when called from reset or from probe, and whether or not dynamic moderation
6287  * is enabled or not. Take special care to write all the registers in both
6288  * dynamic moderation mode or not in order to make sure hardware is in a known
6289  * state.
6290  */
6291 static void ice_init_moderation(struct ice_q_vector *q_vector)
6292 {
6293 	struct ice_ring_container *rc;
6294 	bool tx_dynamic, rx_dynamic;
6295 
6296 	rc = &q_vector->tx;
6297 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6298 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6299 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6300 	rc->dim.priv = rc;
6301 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6302 
6303 	/* set the initial TX ITR to match the above */
6304 	ice_write_itr(rc, tx_dynamic ?
6305 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6306 
6307 	rc = &q_vector->rx;
6308 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6309 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6310 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6311 	rc->dim.priv = rc;
6312 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6313 
6314 	/* set the initial RX ITR to match the above */
6315 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6316 				       rc->itr_setting);
6317 
6318 	ice_set_q_vector_intrl(q_vector);
6319 }
6320 
6321 /**
6322  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6323  * @vsi: the VSI being configured
6324  */
6325 static void ice_napi_enable_all(struct ice_vsi *vsi)
6326 {
6327 	int q_idx;
6328 
6329 	if (!vsi->netdev)
6330 		return;
6331 
6332 	ice_for_each_q_vector(vsi, q_idx) {
6333 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6334 
6335 		ice_init_moderation(q_vector);
6336 
6337 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6338 			napi_enable(&q_vector->napi);
6339 	}
6340 }
6341 
6342 /**
6343  * ice_up_complete - Finish the last steps of bringing up a connection
6344  * @vsi: The VSI being configured
6345  *
6346  * Return 0 on success and negative value on error
6347  */
6348 static int ice_up_complete(struct ice_vsi *vsi)
6349 {
6350 	struct ice_pf *pf = vsi->back;
6351 	int err;
6352 
6353 	ice_vsi_cfg_msix(vsi);
6354 
6355 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6356 	 * Tx queue group list was configured and the context bits were
6357 	 * programmed using ice_vsi_cfg_txqs
6358 	 */
6359 	err = ice_vsi_start_all_rx_rings(vsi);
6360 	if (err)
6361 		return err;
6362 
6363 	clear_bit(ICE_VSI_DOWN, vsi->state);
6364 	ice_napi_enable_all(vsi);
6365 	ice_vsi_ena_irq(vsi);
6366 
6367 	if (vsi->port_info &&
6368 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6369 	    vsi->netdev && vsi->type == ICE_VSI_PF) {
6370 		ice_print_link_msg(vsi, true);
6371 		netif_tx_start_all_queues(vsi->netdev);
6372 		netif_carrier_on(vsi->netdev);
6373 		ice_ptp_link_change(pf, pf->hw.pf_id, true);
6374 	}
6375 
6376 	/* Perform an initial read of the statistics registers now to
6377 	 * set the baseline so counters are ready when interface is up
6378 	 */
6379 	ice_update_eth_stats(vsi);
6380 
6381 	if (vsi->type == ICE_VSI_PF)
6382 		ice_service_task_schedule(pf);
6383 
6384 	return 0;
6385 }
6386 
6387 /**
6388  * ice_up - Bring the connection back up after being down
6389  * @vsi: VSI being configured
6390  */
6391 int ice_up(struct ice_vsi *vsi)
6392 {
6393 	int err;
6394 
6395 	err = ice_vsi_cfg_lan(vsi);
6396 	if (!err)
6397 		err = ice_up_complete(vsi);
6398 
6399 	return err;
6400 }
6401 
6402 /**
6403  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6404  * @syncp: pointer to u64_stats_sync
6405  * @stats: stats that pkts and bytes count will be taken from
6406  * @pkts: packets stats counter
6407  * @bytes: bytes stats counter
6408  *
6409  * This function fetches stats from the ring considering the atomic operations
6410  * that needs to be performed to read u64 values in 32 bit machine.
6411  */
6412 void
6413 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6414 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6415 {
6416 	unsigned int start;
6417 
6418 	do {
6419 		start = u64_stats_fetch_begin(syncp);
6420 		*pkts = stats.pkts;
6421 		*bytes = stats.bytes;
6422 	} while (u64_stats_fetch_retry(syncp, start));
6423 }
6424 
6425 /**
6426  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6427  * @vsi: the VSI to be updated
6428  * @vsi_stats: the stats struct to be updated
6429  * @rings: rings to work on
6430  * @count: number of rings
6431  */
6432 static void
6433 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6434 			     struct rtnl_link_stats64 *vsi_stats,
6435 			     struct ice_tx_ring **rings, u16 count)
6436 {
6437 	u16 i;
6438 
6439 	for (i = 0; i < count; i++) {
6440 		struct ice_tx_ring *ring;
6441 		u64 pkts = 0, bytes = 0;
6442 
6443 		ring = READ_ONCE(rings[i]);
6444 		if (!ring || !ring->ring_stats)
6445 			continue;
6446 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6447 					     ring->ring_stats->stats, &pkts,
6448 					     &bytes);
6449 		vsi_stats->tx_packets += pkts;
6450 		vsi_stats->tx_bytes += bytes;
6451 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6452 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6453 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6454 	}
6455 }
6456 
6457 /**
6458  * ice_update_vsi_ring_stats - Update VSI stats counters
6459  * @vsi: the VSI to be updated
6460  */
6461 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6462 {
6463 	struct rtnl_link_stats64 *net_stats, *stats_prev;
6464 	struct rtnl_link_stats64 *vsi_stats;
6465 	u64 pkts, bytes;
6466 	int i;
6467 
6468 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6469 	if (!vsi_stats)
6470 		return;
6471 
6472 	/* reset non-netdev (extended) stats */
6473 	vsi->tx_restart = 0;
6474 	vsi->tx_busy = 0;
6475 	vsi->tx_linearize = 0;
6476 	vsi->rx_buf_failed = 0;
6477 	vsi->rx_page_failed = 0;
6478 
6479 	rcu_read_lock();
6480 
6481 	/* update Tx rings counters */
6482 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6483 				     vsi->num_txq);
6484 
6485 	/* update Rx rings counters */
6486 	ice_for_each_rxq(vsi, i) {
6487 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6488 		struct ice_ring_stats *ring_stats;
6489 
6490 		ring_stats = ring->ring_stats;
6491 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6492 					     ring_stats->stats, &pkts,
6493 					     &bytes);
6494 		vsi_stats->rx_packets += pkts;
6495 		vsi_stats->rx_bytes += bytes;
6496 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6497 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6498 	}
6499 
6500 	/* update XDP Tx rings counters */
6501 	if (ice_is_xdp_ena_vsi(vsi))
6502 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6503 					     vsi->num_xdp_txq);
6504 
6505 	rcu_read_unlock();
6506 
6507 	net_stats = &vsi->net_stats;
6508 	stats_prev = &vsi->net_stats_prev;
6509 
6510 	/* clear prev counters after reset */
6511 	if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6512 	    vsi_stats->rx_packets < stats_prev->rx_packets) {
6513 		stats_prev->tx_packets = 0;
6514 		stats_prev->tx_bytes = 0;
6515 		stats_prev->rx_packets = 0;
6516 		stats_prev->rx_bytes = 0;
6517 	}
6518 
6519 	/* update netdev counters */
6520 	net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6521 	net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6522 	net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6523 	net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6524 
6525 	stats_prev->tx_packets = vsi_stats->tx_packets;
6526 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6527 	stats_prev->rx_packets = vsi_stats->rx_packets;
6528 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6529 
6530 	kfree(vsi_stats);
6531 }
6532 
6533 /**
6534  * ice_update_vsi_stats - Update VSI stats counters
6535  * @vsi: the VSI to be updated
6536  */
6537 void ice_update_vsi_stats(struct ice_vsi *vsi)
6538 {
6539 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6540 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6541 	struct ice_pf *pf = vsi->back;
6542 
6543 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6544 	    test_bit(ICE_CFG_BUSY, pf->state))
6545 		return;
6546 
6547 	/* get stats as recorded by Tx/Rx rings */
6548 	ice_update_vsi_ring_stats(vsi);
6549 
6550 	/* get VSI stats as recorded by the hardware */
6551 	ice_update_eth_stats(vsi);
6552 
6553 	cur_ns->tx_errors = cur_es->tx_errors;
6554 	cur_ns->rx_dropped = cur_es->rx_discards;
6555 	cur_ns->tx_dropped = cur_es->tx_discards;
6556 	cur_ns->multicast = cur_es->rx_multicast;
6557 
6558 	/* update some more netdev stats if this is main VSI */
6559 	if (vsi->type == ICE_VSI_PF) {
6560 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6561 		cur_ns->rx_errors = pf->stats.crc_errors +
6562 				    pf->stats.illegal_bytes +
6563 				    pf->stats.rx_len_errors +
6564 				    pf->stats.rx_undersize +
6565 				    pf->hw_csum_rx_error +
6566 				    pf->stats.rx_jabber +
6567 				    pf->stats.rx_fragments +
6568 				    pf->stats.rx_oversize;
6569 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6570 		/* record drops from the port level */
6571 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6572 	}
6573 }
6574 
6575 /**
6576  * ice_update_pf_stats - Update PF port stats counters
6577  * @pf: PF whose stats needs to be updated
6578  */
6579 void ice_update_pf_stats(struct ice_pf *pf)
6580 {
6581 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6582 	struct ice_hw *hw = &pf->hw;
6583 	u16 fd_ctr_base;
6584 	u8 port;
6585 
6586 	port = hw->port_info->lport;
6587 	prev_ps = &pf->stats_prev;
6588 	cur_ps = &pf->stats;
6589 
6590 	if (ice_is_reset_in_progress(pf->state))
6591 		pf->stat_prev_loaded = false;
6592 
6593 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6594 			  &prev_ps->eth.rx_bytes,
6595 			  &cur_ps->eth.rx_bytes);
6596 
6597 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6598 			  &prev_ps->eth.rx_unicast,
6599 			  &cur_ps->eth.rx_unicast);
6600 
6601 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6602 			  &prev_ps->eth.rx_multicast,
6603 			  &cur_ps->eth.rx_multicast);
6604 
6605 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6606 			  &prev_ps->eth.rx_broadcast,
6607 			  &cur_ps->eth.rx_broadcast);
6608 
6609 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6610 			  &prev_ps->eth.rx_discards,
6611 			  &cur_ps->eth.rx_discards);
6612 
6613 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6614 			  &prev_ps->eth.tx_bytes,
6615 			  &cur_ps->eth.tx_bytes);
6616 
6617 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6618 			  &prev_ps->eth.tx_unicast,
6619 			  &cur_ps->eth.tx_unicast);
6620 
6621 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6622 			  &prev_ps->eth.tx_multicast,
6623 			  &cur_ps->eth.tx_multicast);
6624 
6625 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6626 			  &prev_ps->eth.tx_broadcast,
6627 			  &cur_ps->eth.tx_broadcast);
6628 
6629 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6630 			  &prev_ps->tx_dropped_link_down,
6631 			  &cur_ps->tx_dropped_link_down);
6632 
6633 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6634 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6635 
6636 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6637 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6638 
6639 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6640 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6641 
6642 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6643 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6644 
6645 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6646 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6647 
6648 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6649 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6650 
6651 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6652 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6653 
6654 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6655 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6656 
6657 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6658 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6659 
6660 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6661 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6662 
6663 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6664 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6665 
6666 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6667 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6668 
6669 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6670 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6671 
6672 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6673 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6674 
6675 	fd_ctr_base = hw->fd_ctr_base;
6676 
6677 	ice_stat_update40(hw,
6678 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6679 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6680 			  &cur_ps->fd_sb_match);
6681 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6682 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6683 
6684 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6685 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6686 
6687 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6688 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6689 
6690 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6691 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6692 
6693 	ice_update_dcb_stats(pf);
6694 
6695 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6696 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6697 
6698 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6699 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6700 
6701 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6702 			  &prev_ps->mac_local_faults,
6703 			  &cur_ps->mac_local_faults);
6704 
6705 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6706 			  &prev_ps->mac_remote_faults,
6707 			  &cur_ps->mac_remote_faults);
6708 
6709 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6710 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6711 
6712 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6713 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6714 
6715 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6716 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6717 
6718 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6719 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6720 
6721 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6722 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6723 
6724 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6725 
6726 	pf->stat_prev_loaded = true;
6727 }
6728 
6729 /**
6730  * ice_get_stats64 - get statistics for network device structure
6731  * @netdev: network interface device structure
6732  * @stats: main device statistics structure
6733  */
6734 static
6735 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6736 {
6737 	struct ice_netdev_priv *np = netdev_priv(netdev);
6738 	struct rtnl_link_stats64 *vsi_stats;
6739 	struct ice_vsi *vsi = np->vsi;
6740 
6741 	vsi_stats = &vsi->net_stats;
6742 
6743 	if (!vsi->num_txq || !vsi->num_rxq)
6744 		return;
6745 
6746 	/* netdev packet/byte stats come from ring counter. These are obtained
6747 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6748 	 * But, only call the update routine and read the registers if VSI is
6749 	 * not down.
6750 	 */
6751 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6752 		ice_update_vsi_ring_stats(vsi);
6753 	stats->tx_packets = vsi_stats->tx_packets;
6754 	stats->tx_bytes = vsi_stats->tx_bytes;
6755 	stats->rx_packets = vsi_stats->rx_packets;
6756 	stats->rx_bytes = vsi_stats->rx_bytes;
6757 
6758 	/* The rest of the stats can be read from the hardware but instead we
6759 	 * just return values that the watchdog task has already obtained from
6760 	 * the hardware.
6761 	 */
6762 	stats->multicast = vsi_stats->multicast;
6763 	stats->tx_errors = vsi_stats->tx_errors;
6764 	stats->tx_dropped = vsi_stats->tx_dropped;
6765 	stats->rx_errors = vsi_stats->rx_errors;
6766 	stats->rx_dropped = vsi_stats->rx_dropped;
6767 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6768 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6769 }
6770 
6771 /**
6772  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6773  * @vsi: VSI having NAPI disabled
6774  */
6775 static void ice_napi_disable_all(struct ice_vsi *vsi)
6776 {
6777 	int q_idx;
6778 
6779 	if (!vsi->netdev)
6780 		return;
6781 
6782 	ice_for_each_q_vector(vsi, q_idx) {
6783 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6784 
6785 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6786 			napi_disable(&q_vector->napi);
6787 
6788 		cancel_work_sync(&q_vector->tx.dim.work);
6789 		cancel_work_sync(&q_vector->rx.dim.work);
6790 	}
6791 }
6792 
6793 /**
6794  * ice_down - Shutdown the connection
6795  * @vsi: The VSI being stopped
6796  *
6797  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6798  */
6799 int ice_down(struct ice_vsi *vsi)
6800 {
6801 	int i, tx_err, rx_err, vlan_err = 0;
6802 
6803 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6804 
6805 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6806 		vlan_err = ice_vsi_del_vlan_zero(vsi);
6807 		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6808 		netif_carrier_off(vsi->netdev);
6809 		netif_tx_disable(vsi->netdev);
6810 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6811 		ice_eswitch_stop_all_tx_queues(vsi->back);
6812 	}
6813 
6814 	ice_vsi_dis_irq(vsi);
6815 
6816 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6817 	if (tx_err)
6818 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6819 			   vsi->vsi_num, tx_err);
6820 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6821 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6822 		if (tx_err)
6823 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6824 				   vsi->vsi_num, tx_err);
6825 	}
6826 
6827 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6828 	if (rx_err)
6829 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6830 			   vsi->vsi_num, rx_err);
6831 
6832 	ice_napi_disable_all(vsi);
6833 
6834 	ice_for_each_txq(vsi, i)
6835 		ice_clean_tx_ring(vsi->tx_rings[i]);
6836 
6837 	if (ice_is_xdp_ena_vsi(vsi))
6838 		ice_for_each_xdp_txq(vsi, i)
6839 			ice_clean_tx_ring(vsi->xdp_rings[i]);
6840 
6841 	ice_for_each_rxq(vsi, i)
6842 		ice_clean_rx_ring(vsi->rx_rings[i]);
6843 
6844 	if (tx_err || rx_err || vlan_err) {
6845 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6846 			   vsi->vsi_num, vsi->vsw->sw_id);
6847 		return -EIO;
6848 	}
6849 
6850 	return 0;
6851 }
6852 
6853 /**
6854  * ice_down_up - shutdown the VSI connection and bring it up
6855  * @vsi: the VSI to be reconnected
6856  */
6857 int ice_down_up(struct ice_vsi *vsi)
6858 {
6859 	int ret;
6860 
6861 	/* if DOWN already set, nothing to do */
6862 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6863 		return 0;
6864 
6865 	ret = ice_down(vsi);
6866 	if (ret)
6867 		return ret;
6868 
6869 	ret = ice_up(vsi);
6870 	if (ret) {
6871 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6872 		return ret;
6873 	}
6874 
6875 	return 0;
6876 }
6877 
6878 /**
6879  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6880  * @vsi: VSI having resources allocated
6881  *
6882  * Return 0 on success, negative on failure
6883  */
6884 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6885 {
6886 	int i, err = 0;
6887 
6888 	if (!vsi->num_txq) {
6889 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6890 			vsi->vsi_num);
6891 		return -EINVAL;
6892 	}
6893 
6894 	ice_for_each_txq(vsi, i) {
6895 		struct ice_tx_ring *ring = vsi->tx_rings[i];
6896 
6897 		if (!ring)
6898 			return -EINVAL;
6899 
6900 		if (vsi->netdev)
6901 			ring->netdev = vsi->netdev;
6902 		err = ice_setup_tx_ring(ring);
6903 		if (err)
6904 			break;
6905 	}
6906 
6907 	return err;
6908 }
6909 
6910 /**
6911  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6912  * @vsi: VSI having resources allocated
6913  *
6914  * Return 0 on success, negative on failure
6915  */
6916 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6917 {
6918 	int i, err = 0;
6919 
6920 	if (!vsi->num_rxq) {
6921 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6922 			vsi->vsi_num);
6923 		return -EINVAL;
6924 	}
6925 
6926 	ice_for_each_rxq(vsi, i) {
6927 		struct ice_rx_ring *ring = vsi->rx_rings[i];
6928 
6929 		if (!ring)
6930 			return -EINVAL;
6931 
6932 		if (vsi->netdev)
6933 			ring->netdev = vsi->netdev;
6934 		err = ice_setup_rx_ring(ring);
6935 		if (err)
6936 			break;
6937 	}
6938 
6939 	return err;
6940 }
6941 
6942 /**
6943  * ice_vsi_open_ctrl - open control VSI for use
6944  * @vsi: the VSI to open
6945  *
6946  * Initialization of the Control VSI
6947  *
6948  * Returns 0 on success, negative value on error
6949  */
6950 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6951 {
6952 	char int_name[ICE_INT_NAME_STR_LEN];
6953 	struct ice_pf *pf = vsi->back;
6954 	struct device *dev;
6955 	int err;
6956 
6957 	dev = ice_pf_to_dev(pf);
6958 	/* allocate descriptors */
6959 	err = ice_vsi_setup_tx_rings(vsi);
6960 	if (err)
6961 		goto err_setup_tx;
6962 
6963 	err = ice_vsi_setup_rx_rings(vsi);
6964 	if (err)
6965 		goto err_setup_rx;
6966 
6967 	err = ice_vsi_cfg_lan(vsi);
6968 	if (err)
6969 		goto err_setup_rx;
6970 
6971 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6972 		 dev_driver_string(dev), dev_name(dev));
6973 	err = ice_vsi_req_irq_msix(vsi, int_name);
6974 	if (err)
6975 		goto err_setup_rx;
6976 
6977 	ice_vsi_cfg_msix(vsi);
6978 
6979 	err = ice_vsi_start_all_rx_rings(vsi);
6980 	if (err)
6981 		goto err_up_complete;
6982 
6983 	clear_bit(ICE_VSI_DOWN, vsi->state);
6984 	ice_vsi_ena_irq(vsi);
6985 
6986 	return 0;
6987 
6988 err_up_complete:
6989 	ice_down(vsi);
6990 err_setup_rx:
6991 	ice_vsi_free_rx_rings(vsi);
6992 err_setup_tx:
6993 	ice_vsi_free_tx_rings(vsi);
6994 
6995 	return err;
6996 }
6997 
6998 /**
6999  * ice_vsi_open - Called when a network interface is made active
7000  * @vsi: the VSI to open
7001  *
7002  * Initialization of the VSI
7003  *
7004  * Returns 0 on success, negative value on error
7005  */
7006 int ice_vsi_open(struct ice_vsi *vsi)
7007 {
7008 	char int_name[ICE_INT_NAME_STR_LEN];
7009 	struct ice_pf *pf = vsi->back;
7010 	int err;
7011 
7012 	/* allocate descriptors */
7013 	err = ice_vsi_setup_tx_rings(vsi);
7014 	if (err)
7015 		goto err_setup_tx;
7016 
7017 	err = ice_vsi_setup_rx_rings(vsi);
7018 	if (err)
7019 		goto err_setup_rx;
7020 
7021 	err = ice_vsi_cfg_lan(vsi);
7022 	if (err)
7023 		goto err_setup_rx;
7024 
7025 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7026 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7027 	err = ice_vsi_req_irq_msix(vsi, int_name);
7028 	if (err)
7029 		goto err_setup_rx;
7030 
7031 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7032 
7033 	if (vsi->type == ICE_VSI_PF) {
7034 		/* Notify the stack of the actual queue counts. */
7035 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7036 		if (err)
7037 			goto err_set_qs;
7038 
7039 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7040 		if (err)
7041 			goto err_set_qs;
7042 	}
7043 
7044 	err = ice_up_complete(vsi);
7045 	if (err)
7046 		goto err_up_complete;
7047 
7048 	return 0;
7049 
7050 err_up_complete:
7051 	ice_down(vsi);
7052 err_set_qs:
7053 	ice_vsi_free_irq(vsi);
7054 err_setup_rx:
7055 	ice_vsi_free_rx_rings(vsi);
7056 err_setup_tx:
7057 	ice_vsi_free_tx_rings(vsi);
7058 
7059 	return err;
7060 }
7061 
7062 /**
7063  * ice_vsi_release_all - Delete all VSIs
7064  * @pf: PF from which all VSIs are being removed
7065  */
7066 static void ice_vsi_release_all(struct ice_pf *pf)
7067 {
7068 	int err, i;
7069 
7070 	if (!pf->vsi)
7071 		return;
7072 
7073 	ice_for_each_vsi(pf, i) {
7074 		if (!pf->vsi[i])
7075 			continue;
7076 
7077 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7078 			continue;
7079 
7080 		err = ice_vsi_release(pf->vsi[i]);
7081 		if (err)
7082 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7083 				i, err, pf->vsi[i]->vsi_num);
7084 	}
7085 }
7086 
7087 /**
7088  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7089  * @pf: pointer to the PF instance
7090  * @type: VSI type to rebuild
7091  *
7092  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7093  */
7094 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7095 {
7096 	struct device *dev = ice_pf_to_dev(pf);
7097 	int i, err;
7098 
7099 	ice_for_each_vsi(pf, i) {
7100 		struct ice_vsi *vsi = pf->vsi[i];
7101 
7102 		if (!vsi || vsi->type != type)
7103 			continue;
7104 
7105 		/* rebuild the VSI */
7106 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7107 		if (err) {
7108 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7109 				err, vsi->idx, ice_vsi_type_str(type));
7110 			return err;
7111 		}
7112 
7113 		/* replay filters for the VSI */
7114 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7115 		if (err) {
7116 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7117 				err, vsi->idx, ice_vsi_type_str(type));
7118 			return err;
7119 		}
7120 
7121 		/* Re-map HW VSI number, using VSI handle that has been
7122 		 * previously validated in ice_replay_vsi() call above
7123 		 */
7124 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7125 
7126 		/* enable the VSI */
7127 		err = ice_ena_vsi(vsi, false);
7128 		if (err) {
7129 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7130 				err, vsi->idx, ice_vsi_type_str(type));
7131 			return err;
7132 		}
7133 
7134 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7135 			 ice_vsi_type_str(type));
7136 	}
7137 
7138 	return 0;
7139 }
7140 
7141 /**
7142  * ice_update_pf_netdev_link - Update PF netdev link status
7143  * @pf: pointer to the PF instance
7144  */
7145 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7146 {
7147 	bool link_up;
7148 	int i;
7149 
7150 	ice_for_each_vsi(pf, i) {
7151 		struct ice_vsi *vsi = pf->vsi[i];
7152 
7153 		if (!vsi || vsi->type != ICE_VSI_PF)
7154 			return;
7155 
7156 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7157 		if (link_up) {
7158 			netif_carrier_on(pf->vsi[i]->netdev);
7159 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7160 		} else {
7161 			netif_carrier_off(pf->vsi[i]->netdev);
7162 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7163 		}
7164 	}
7165 }
7166 
7167 /**
7168  * ice_rebuild - rebuild after reset
7169  * @pf: PF to rebuild
7170  * @reset_type: type of reset
7171  *
7172  * Do not rebuild VF VSI in this flow because that is already handled via
7173  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7174  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7175  * to reset/rebuild all the VF VSI twice.
7176  */
7177 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7178 {
7179 	struct device *dev = ice_pf_to_dev(pf);
7180 	struct ice_hw *hw = &pf->hw;
7181 	bool dvm;
7182 	int err;
7183 
7184 	if (test_bit(ICE_DOWN, pf->state))
7185 		goto clear_recovery;
7186 
7187 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7188 
7189 #define ICE_EMP_RESET_SLEEP_MS 5000
7190 	if (reset_type == ICE_RESET_EMPR) {
7191 		/* If an EMP reset has occurred, any previously pending flash
7192 		 * update will have completed. We no longer know whether or
7193 		 * not the NVM update EMP reset is restricted.
7194 		 */
7195 		pf->fw_emp_reset_disabled = false;
7196 
7197 		msleep(ICE_EMP_RESET_SLEEP_MS);
7198 	}
7199 
7200 	err = ice_init_all_ctrlq(hw);
7201 	if (err) {
7202 		dev_err(dev, "control queues init failed %d\n", err);
7203 		goto err_init_ctrlq;
7204 	}
7205 
7206 	/* if DDP was previously loaded successfully */
7207 	if (!ice_is_safe_mode(pf)) {
7208 		/* reload the SW DB of filter tables */
7209 		if (reset_type == ICE_RESET_PFR)
7210 			ice_fill_blk_tbls(hw);
7211 		else
7212 			/* Reload DDP Package after CORER/GLOBR reset */
7213 			ice_load_pkg(NULL, pf);
7214 	}
7215 
7216 	err = ice_clear_pf_cfg(hw);
7217 	if (err) {
7218 		dev_err(dev, "clear PF configuration failed %d\n", err);
7219 		goto err_init_ctrlq;
7220 	}
7221 
7222 	ice_clear_pxe_mode(hw);
7223 
7224 	err = ice_init_nvm(hw);
7225 	if (err) {
7226 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7227 		goto err_init_ctrlq;
7228 	}
7229 
7230 	err = ice_get_caps(hw);
7231 	if (err) {
7232 		dev_err(dev, "ice_get_caps failed %d\n", err);
7233 		goto err_init_ctrlq;
7234 	}
7235 
7236 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7237 	if (err) {
7238 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7239 		goto err_init_ctrlq;
7240 	}
7241 
7242 	dvm = ice_is_dvm_ena(hw);
7243 
7244 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7245 	if (err)
7246 		goto err_init_ctrlq;
7247 
7248 	err = ice_sched_init_port(hw->port_info);
7249 	if (err)
7250 		goto err_sched_init_port;
7251 
7252 	/* start misc vector */
7253 	err = ice_req_irq_msix_misc(pf);
7254 	if (err) {
7255 		dev_err(dev, "misc vector setup failed: %d\n", err);
7256 		goto err_sched_init_port;
7257 	}
7258 
7259 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7260 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7261 		if (!rd32(hw, PFQF_FD_SIZE)) {
7262 			u16 unused, guar, b_effort;
7263 
7264 			guar = hw->func_caps.fd_fltr_guar;
7265 			b_effort = hw->func_caps.fd_fltr_best_effort;
7266 
7267 			/* force guaranteed filter pool for PF */
7268 			ice_alloc_fd_guar_item(hw, &unused, guar);
7269 			/* force shared filter pool for PF */
7270 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7271 		}
7272 	}
7273 
7274 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7275 		ice_dcb_rebuild(pf);
7276 
7277 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7278 	 * the VSI rebuild. If not, this causes the PTP link status events to
7279 	 * fail.
7280 	 */
7281 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7282 		ice_ptp_reset(pf);
7283 
7284 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7285 		ice_gnss_init(pf);
7286 
7287 	/* rebuild PF VSI */
7288 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7289 	if (err) {
7290 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7291 		goto err_vsi_rebuild;
7292 	}
7293 
7294 	/* configure PTP timestamping after VSI rebuild */
7295 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7296 		ice_ptp_cfg_timestamp(pf, false);
7297 
7298 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7299 	if (err) {
7300 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7301 		goto err_vsi_rebuild;
7302 	}
7303 
7304 	if (reset_type == ICE_RESET_PFR) {
7305 		err = ice_rebuild_channels(pf);
7306 		if (err) {
7307 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7308 				err);
7309 			goto err_vsi_rebuild;
7310 		}
7311 	}
7312 
7313 	/* If Flow Director is active */
7314 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7315 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7316 		if (err) {
7317 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7318 			goto err_vsi_rebuild;
7319 		}
7320 
7321 		/* replay HW Flow Director recipes */
7322 		if (hw->fdir_prof)
7323 			ice_fdir_replay_flows(hw);
7324 
7325 		/* replay Flow Director filters */
7326 		ice_fdir_replay_fltrs(pf);
7327 
7328 		ice_rebuild_arfs(pf);
7329 	}
7330 
7331 	ice_update_pf_netdev_link(pf);
7332 
7333 	/* tell the firmware we are up */
7334 	err = ice_send_version(pf);
7335 	if (err) {
7336 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7337 			err);
7338 		goto err_vsi_rebuild;
7339 	}
7340 
7341 	ice_replay_post(hw);
7342 
7343 	/* if we get here, reset flow is successful */
7344 	clear_bit(ICE_RESET_FAILED, pf->state);
7345 
7346 	ice_plug_aux_dev(pf);
7347 	return;
7348 
7349 err_vsi_rebuild:
7350 err_sched_init_port:
7351 	ice_sched_cleanup_all(hw);
7352 err_init_ctrlq:
7353 	ice_shutdown_all_ctrlq(hw);
7354 	set_bit(ICE_RESET_FAILED, pf->state);
7355 clear_recovery:
7356 	/* set this bit in PF state to control service task scheduling */
7357 	set_bit(ICE_NEEDS_RESTART, pf->state);
7358 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7359 }
7360 
7361 /**
7362  * ice_change_mtu - NDO callback to change the MTU
7363  * @netdev: network interface device structure
7364  * @new_mtu: new value for maximum frame size
7365  *
7366  * Returns 0 on success, negative on failure
7367  */
7368 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7369 {
7370 	struct ice_netdev_priv *np = netdev_priv(netdev);
7371 	struct ice_vsi *vsi = np->vsi;
7372 	struct ice_pf *pf = vsi->back;
7373 	struct bpf_prog *prog;
7374 	u8 count = 0;
7375 	int err = 0;
7376 
7377 	if (new_mtu == (int)netdev->mtu) {
7378 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7379 		return 0;
7380 	}
7381 
7382 	prog = vsi->xdp_prog;
7383 	if (prog && !prog->aux->xdp_has_frags) {
7384 		int frame_size = ice_max_xdp_frame_size(vsi);
7385 
7386 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7387 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7388 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7389 			return -EINVAL;
7390 		}
7391 	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7392 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7393 			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7394 				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7395 			return -EINVAL;
7396 		}
7397 	}
7398 
7399 	/* if a reset is in progress, wait for some time for it to complete */
7400 	do {
7401 		if (ice_is_reset_in_progress(pf->state)) {
7402 			count++;
7403 			usleep_range(1000, 2000);
7404 		} else {
7405 			break;
7406 		}
7407 
7408 	} while (count < 100);
7409 
7410 	if (count == 100) {
7411 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7412 		return -EBUSY;
7413 	}
7414 
7415 	netdev->mtu = (unsigned int)new_mtu;
7416 
7417 	/* if VSI is up, bring it down and then back up */
7418 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7419 		err = ice_down(vsi);
7420 		if (err) {
7421 			netdev_err(netdev, "change MTU if_down err %d\n", err);
7422 			return err;
7423 		}
7424 
7425 		err = ice_up(vsi);
7426 		if (err) {
7427 			netdev_err(netdev, "change MTU if_up err %d\n", err);
7428 			return err;
7429 		}
7430 	}
7431 
7432 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7433 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7434 
7435 	return err;
7436 }
7437 
7438 /**
7439  * ice_eth_ioctl - Access the hwtstamp interface
7440  * @netdev: network interface device structure
7441  * @ifr: interface request data
7442  * @cmd: ioctl command
7443  */
7444 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7445 {
7446 	struct ice_netdev_priv *np = netdev_priv(netdev);
7447 	struct ice_pf *pf = np->vsi->back;
7448 
7449 	switch (cmd) {
7450 	case SIOCGHWTSTAMP:
7451 		return ice_ptp_get_ts_config(pf, ifr);
7452 	case SIOCSHWTSTAMP:
7453 		return ice_ptp_set_ts_config(pf, ifr);
7454 	default:
7455 		return -EOPNOTSUPP;
7456 	}
7457 }
7458 
7459 /**
7460  * ice_aq_str - convert AQ err code to a string
7461  * @aq_err: the AQ error code to convert
7462  */
7463 const char *ice_aq_str(enum ice_aq_err aq_err)
7464 {
7465 	switch (aq_err) {
7466 	case ICE_AQ_RC_OK:
7467 		return "OK";
7468 	case ICE_AQ_RC_EPERM:
7469 		return "ICE_AQ_RC_EPERM";
7470 	case ICE_AQ_RC_ENOENT:
7471 		return "ICE_AQ_RC_ENOENT";
7472 	case ICE_AQ_RC_ENOMEM:
7473 		return "ICE_AQ_RC_ENOMEM";
7474 	case ICE_AQ_RC_EBUSY:
7475 		return "ICE_AQ_RC_EBUSY";
7476 	case ICE_AQ_RC_EEXIST:
7477 		return "ICE_AQ_RC_EEXIST";
7478 	case ICE_AQ_RC_EINVAL:
7479 		return "ICE_AQ_RC_EINVAL";
7480 	case ICE_AQ_RC_ENOSPC:
7481 		return "ICE_AQ_RC_ENOSPC";
7482 	case ICE_AQ_RC_ENOSYS:
7483 		return "ICE_AQ_RC_ENOSYS";
7484 	case ICE_AQ_RC_EMODE:
7485 		return "ICE_AQ_RC_EMODE";
7486 	case ICE_AQ_RC_ENOSEC:
7487 		return "ICE_AQ_RC_ENOSEC";
7488 	case ICE_AQ_RC_EBADSIG:
7489 		return "ICE_AQ_RC_EBADSIG";
7490 	case ICE_AQ_RC_ESVN:
7491 		return "ICE_AQ_RC_ESVN";
7492 	case ICE_AQ_RC_EBADMAN:
7493 		return "ICE_AQ_RC_EBADMAN";
7494 	case ICE_AQ_RC_EBADBUF:
7495 		return "ICE_AQ_RC_EBADBUF";
7496 	}
7497 
7498 	return "ICE_AQ_RC_UNKNOWN";
7499 }
7500 
7501 /**
7502  * ice_set_rss_lut - Set RSS LUT
7503  * @vsi: Pointer to VSI structure
7504  * @lut: Lookup table
7505  * @lut_size: Lookup table size
7506  *
7507  * Returns 0 on success, negative on failure
7508  */
7509 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7510 {
7511 	struct ice_aq_get_set_rss_lut_params params = {};
7512 	struct ice_hw *hw = &vsi->back->hw;
7513 	int status;
7514 
7515 	if (!lut)
7516 		return -EINVAL;
7517 
7518 	params.vsi_handle = vsi->idx;
7519 	params.lut_size = lut_size;
7520 	params.lut_type = vsi->rss_lut_type;
7521 	params.lut = lut;
7522 
7523 	status = ice_aq_set_rss_lut(hw, &params);
7524 	if (status)
7525 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7526 			status, ice_aq_str(hw->adminq.sq_last_status));
7527 
7528 	return status;
7529 }
7530 
7531 /**
7532  * ice_set_rss_key - Set RSS key
7533  * @vsi: Pointer to the VSI structure
7534  * @seed: RSS hash seed
7535  *
7536  * Returns 0 on success, negative on failure
7537  */
7538 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7539 {
7540 	struct ice_hw *hw = &vsi->back->hw;
7541 	int status;
7542 
7543 	if (!seed)
7544 		return -EINVAL;
7545 
7546 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7547 	if (status)
7548 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7549 			status, ice_aq_str(hw->adminq.sq_last_status));
7550 
7551 	return status;
7552 }
7553 
7554 /**
7555  * ice_get_rss_lut - Get RSS LUT
7556  * @vsi: Pointer to VSI structure
7557  * @lut: Buffer to store the lookup table entries
7558  * @lut_size: Size of buffer to store the lookup table entries
7559  *
7560  * Returns 0 on success, negative on failure
7561  */
7562 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7563 {
7564 	struct ice_aq_get_set_rss_lut_params params = {};
7565 	struct ice_hw *hw = &vsi->back->hw;
7566 	int status;
7567 
7568 	if (!lut)
7569 		return -EINVAL;
7570 
7571 	params.vsi_handle = vsi->idx;
7572 	params.lut_size = lut_size;
7573 	params.lut_type = vsi->rss_lut_type;
7574 	params.lut = lut;
7575 
7576 	status = ice_aq_get_rss_lut(hw, &params);
7577 	if (status)
7578 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7579 			status, ice_aq_str(hw->adminq.sq_last_status));
7580 
7581 	return status;
7582 }
7583 
7584 /**
7585  * ice_get_rss_key - Get RSS key
7586  * @vsi: Pointer to VSI structure
7587  * @seed: Buffer to store the key in
7588  *
7589  * Returns 0 on success, negative on failure
7590  */
7591 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7592 {
7593 	struct ice_hw *hw = &vsi->back->hw;
7594 	int status;
7595 
7596 	if (!seed)
7597 		return -EINVAL;
7598 
7599 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7600 	if (status)
7601 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7602 			status, ice_aq_str(hw->adminq.sq_last_status));
7603 
7604 	return status;
7605 }
7606 
7607 /**
7608  * ice_bridge_getlink - Get the hardware bridge mode
7609  * @skb: skb buff
7610  * @pid: process ID
7611  * @seq: RTNL message seq
7612  * @dev: the netdev being configured
7613  * @filter_mask: filter mask passed in
7614  * @nlflags: netlink flags passed in
7615  *
7616  * Return the bridge mode (VEB/VEPA)
7617  */
7618 static int
7619 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7620 		   struct net_device *dev, u32 filter_mask, int nlflags)
7621 {
7622 	struct ice_netdev_priv *np = netdev_priv(dev);
7623 	struct ice_vsi *vsi = np->vsi;
7624 	struct ice_pf *pf = vsi->back;
7625 	u16 bmode;
7626 
7627 	bmode = pf->first_sw->bridge_mode;
7628 
7629 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7630 				       filter_mask, NULL);
7631 }
7632 
7633 /**
7634  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7635  * @vsi: Pointer to VSI structure
7636  * @bmode: Hardware bridge mode (VEB/VEPA)
7637  *
7638  * Returns 0 on success, negative on failure
7639  */
7640 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7641 {
7642 	struct ice_aqc_vsi_props *vsi_props;
7643 	struct ice_hw *hw = &vsi->back->hw;
7644 	struct ice_vsi_ctx *ctxt;
7645 	int ret;
7646 
7647 	vsi_props = &vsi->info;
7648 
7649 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7650 	if (!ctxt)
7651 		return -ENOMEM;
7652 
7653 	ctxt->info = vsi->info;
7654 
7655 	if (bmode == BRIDGE_MODE_VEB)
7656 		/* change from VEPA to VEB mode */
7657 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7658 	else
7659 		/* change from VEB to VEPA mode */
7660 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7661 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7662 
7663 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7664 	if (ret) {
7665 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7666 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7667 		goto out;
7668 	}
7669 	/* Update sw flags for book keeping */
7670 	vsi_props->sw_flags = ctxt->info.sw_flags;
7671 
7672 out:
7673 	kfree(ctxt);
7674 	return ret;
7675 }
7676 
7677 /**
7678  * ice_bridge_setlink - Set the hardware bridge mode
7679  * @dev: the netdev being configured
7680  * @nlh: RTNL message
7681  * @flags: bridge setlink flags
7682  * @extack: netlink extended ack
7683  *
7684  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7685  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7686  * not already set for all VSIs connected to this switch. And also update the
7687  * unicast switch filter rules for the corresponding switch of the netdev.
7688  */
7689 static int
7690 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7691 		   u16 __always_unused flags,
7692 		   struct netlink_ext_ack __always_unused *extack)
7693 {
7694 	struct ice_netdev_priv *np = netdev_priv(dev);
7695 	struct ice_pf *pf = np->vsi->back;
7696 	struct nlattr *attr, *br_spec;
7697 	struct ice_hw *hw = &pf->hw;
7698 	struct ice_sw *pf_sw;
7699 	int rem, v, err = 0;
7700 
7701 	pf_sw = pf->first_sw;
7702 	/* find the attribute in the netlink message */
7703 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7704 
7705 	nla_for_each_nested(attr, br_spec, rem) {
7706 		__u16 mode;
7707 
7708 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7709 			continue;
7710 		mode = nla_get_u16(attr);
7711 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7712 			return -EINVAL;
7713 		/* Continue  if bridge mode is not being flipped */
7714 		if (mode == pf_sw->bridge_mode)
7715 			continue;
7716 		/* Iterates through the PF VSI list and update the loopback
7717 		 * mode of the VSI
7718 		 */
7719 		ice_for_each_vsi(pf, v) {
7720 			if (!pf->vsi[v])
7721 				continue;
7722 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7723 			if (err)
7724 				return err;
7725 		}
7726 
7727 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7728 		/* Update the unicast switch filter rules for the corresponding
7729 		 * switch of the netdev
7730 		 */
7731 		err = ice_update_sw_rule_bridge_mode(hw);
7732 		if (err) {
7733 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7734 				   mode, err,
7735 				   ice_aq_str(hw->adminq.sq_last_status));
7736 			/* revert hw->evb_veb */
7737 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7738 			return err;
7739 		}
7740 
7741 		pf_sw->bridge_mode = mode;
7742 	}
7743 
7744 	return 0;
7745 }
7746 
7747 /**
7748  * ice_tx_timeout - Respond to a Tx Hang
7749  * @netdev: network interface device structure
7750  * @txqueue: Tx queue
7751  */
7752 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7753 {
7754 	struct ice_netdev_priv *np = netdev_priv(netdev);
7755 	struct ice_tx_ring *tx_ring = NULL;
7756 	struct ice_vsi *vsi = np->vsi;
7757 	struct ice_pf *pf = vsi->back;
7758 	u32 i;
7759 
7760 	pf->tx_timeout_count++;
7761 
7762 	/* Check if PFC is enabled for the TC to which the queue belongs
7763 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7764 	 * need to reset and rebuild
7765 	 */
7766 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7767 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7768 			 txqueue);
7769 		return;
7770 	}
7771 
7772 	/* now that we have an index, find the tx_ring struct */
7773 	ice_for_each_txq(vsi, i)
7774 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7775 			if (txqueue == vsi->tx_rings[i]->q_index) {
7776 				tx_ring = vsi->tx_rings[i];
7777 				break;
7778 			}
7779 
7780 	/* Reset recovery level if enough time has elapsed after last timeout.
7781 	 * Also ensure no new reset action happens before next timeout period.
7782 	 */
7783 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7784 		pf->tx_timeout_recovery_level = 1;
7785 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7786 				       netdev->watchdog_timeo)))
7787 		return;
7788 
7789 	if (tx_ring) {
7790 		struct ice_hw *hw = &pf->hw;
7791 		u32 head, val = 0;
7792 
7793 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7794 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7795 		/* Read interrupt register */
7796 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7797 
7798 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7799 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7800 			    head, tx_ring->next_to_use, val);
7801 	}
7802 
7803 	pf->tx_timeout_last_recovery = jiffies;
7804 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7805 		    pf->tx_timeout_recovery_level, txqueue);
7806 
7807 	switch (pf->tx_timeout_recovery_level) {
7808 	case 1:
7809 		set_bit(ICE_PFR_REQ, pf->state);
7810 		break;
7811 	case 2:
7812 		set_bit(ICE_CORER_REQ, pf->state);
7813 		break;
7814 	case 3:
7815 		set_bit(ICE_GLOBR_REQ, pf->state);
7816 		break;
7817 	default:
7818 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7819 		set_bit(ICE_DOWN, pf->state);
7820 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7821 		set_bit(ICE_SERVICE_DIS, pf->state);
7822 		break;
7823 	}
7824 
7825 	ice_service_task_schedule(pf);
7826 	pf->tx_timeout_recovery_level++;
7827 }
7828 
7829 /**
7830  * ice_setup_tc_cls_flower - flower classifier offloads
7831  * @np: net device to configure
7832  * @filter_dev: device on which filter is added
7833  * @cls_flower: offload data
7834  */
7835 static int
7836 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7837 			struct net_device *filter_dev,
7838 			struct flow_cls_offload *cls_flower)
7839 {
7840 	struct ice_vsi *vsi = np->vsi;
7841 
7842 	if (cls_flower->common.chain_index)
7843 		return -EOPNOTSUPP;
7844 
7845 	switch (cls_flower->command) {
7846 	case FLOW_CLS_REPLACE:
7847 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7848 	case FLOW_CLS_DESTROY:
7849 		return ice_del_cls_flower(vsi, cls_flower);
7850 	default:
7851 		return -EINVAL;
7852 	}
7853 }
7854 
7855 /**
7856  * ice_setup_tc_block_cb - callback handler registered for TC block
7857  * @type: TC SETUP type
7858  * @type_data: TC flower offload data that contains user input
7859  * @cb_priv: netdev private data
7860  */
7861 static int
7862 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7863 {
7864 	struct ice_netdev_priv *np = cb_priv;
7865 
7866 	switch (type) {
7867 	case TC_SETUP_CLSFLOWER:
7868 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7869 					       type_data);
7870 	default:
7871 		return -EOPNOTSUPP;
7872 	}
7873 }
7874 
7875 /**
7876  * ice_validate_mqprio_qopt - Validate TCF input parameters
7877  * @vsi: Pointer to VSI
7878  * @mqprio_qopt: input parameters for mqprio queue configuration
7879  *
7880  * This function validates MQPRIO params, such as qcount (power of 2 wherever
7881  * needed), and make sure user doesn't specify qcount and BW rate limit
7882  * for TCs, which are more than "num_tc"
7883  */
7884 static int
7885 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7886 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
7887 {
7888 	u64 sum_max_rate = 0, sum_min_rate = 0;
7889 	int non_power_of_2_qcount = 0;
7890 	struct ice_pf *pf = vsi->back;
7891 	int max_rss_q_cnt = 0;
7892 	struct device *dev;
7893 	int i, speed;
7894 	u8 num_tc;
7895 
7896 	if (vsi->type != ICE_VSI_PF)
7897 		return -EINVAL;
7898 
7899 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7900 	    mqprio_qopt->qopt.num_tc < 1 ||
7901 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7902 		return -EINVAL;
7903 
7904 	dev = ice_pf_to_dev(pf);
7905 	vsi->ch_rss_size = 0;
7906 	num_tc = mqprio_qopt->qopt.num_tc;
7907 
7908 	for (i = 0; num_tc; i++) {
7909 		int qcount = mqprio_qopt->qopt.count[i];
7910 		u64 max_rate, min_rate, rem;
7911 
7912 		if (!qcount)
7913 			return -EINVAL;
7914 
7915 		if (is_power_of_2(qcount)) {
7916 			if (non_power_of_2_qcount &&
7917 			    qcount > non_power_of_2_qcount) {
7918 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7919 					qcount, non_power_of_2_qcount);
7920 				return -EINVAL;
7921 			}
7922 			if (qcount > max_rss_q_cnt)
7923 				max_rss_q_cnt = qcount;
7924 		} else {
7925 			if (non_power_of_2_qcount &&
7926 			    qcount != non_power_of_2_qcount) {
7927 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7928 					qcount, non_power_of_2_qcount);
7929 				return -EINVAL;
7930 			}
7931 			if (qcount < max_rss_q_cnt) {
7932 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7933 					qcount, max_rss_q_cnt);
7934 				return -EINVAL;
7935 			}
7936 			max_rss_q_cnt = qcount;
7937 			non_power_of_2_qcount = qcount;
7938 		}
7939 
7940 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7941 		 * converts the bandwidth rate limit into Bytes/s when
7942 		 * passing it down to the driver. So convert input bandwidth
7943 		 * from Bytes/s to Kbps
7944 		 */
7945 		max_rate = mqprio_qopt->max_rate[i];
7946 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7947 		sum_max_rate += max_rate;
7948 
7949 		/* min_rate is minimum guaranteed rate and it can't be zero */
7950 		min_rate = mqprio_qopt->min_rate[i];
7951 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7952 		sum_min_rate += min_rate;
7953 
7954 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7955 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7956 				min_rate, ICE_MIN_BW_LIMIT);
7957 			return -EINVAL;
7958 		}
7959 
7960 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7961 		if (rem) {
7962 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7963 				i, ICE_MIN_BW_LIMIT);
7964 			return -EINVAL;
7965 		}
7966 
7967 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7968 		if (rem) {
7969 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7970 				i, ICE_MIN_BW_LIMIT);
7971 			return -EINVAL;
7972 		}
7973 
7974 		/* min_rate can't be more than max_rate, except when max_rate
7975 		 * is zero (implies max_rate sought is max line rate). In such
7976 		 * a case min_rate can be more than max.
7977 		 */
7978 		if (max_rate && min_rate > max_rate) {
7979 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7980 				min_rate, max_rate);
7981 			return -EINVAL;
7982 		}
7983 
7984 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7985 			break;
7986 		if (mqprio_qopt->qopt.offset[i + 1] !=
7987 		    (mqprio_qopt->qopt.offset[i] + qcount))
7988 			return -EINVAL;
7989 	}
7990 	if (vsi->num_rxq <
7991 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7992 		return -EINVAL;
7993 	if (vsi->num_txq <
7994 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7995 		return -EINVAL;
7996 
7997 	speed = ice_get_link_speed_kbps(vsi);
7998 	if (sum_max_rate && sum_max_rate > (u64)speed) {
7999 		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
8000 			sum_max_rate, speed);
8001 		return -EINVAL;
8002 	}
8003 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8004 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8005 			sum_min_rate, speed);
8006 		return -EINVAL;
8007 	}
8008 
8009 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8010 	vsi->ch_rss_size = max_rss_q_cnt;
8011 
8012 	return 0;
8013 }
8014 
8015 /**
8016  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8017  * @pf: ptr to PF device
8018  * @vsi: ptr to VSI
8019  */
8020 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8021 {
8022 	struct device *dev = ice_pf_to_dev(pf);
8023 	bool added = false;
8024 	struct ice_hw *hw;
8025 	int flow;
8026 
8027 	if (!(vsi->num_gfltr || vsi->num_bfltr))
8028 		return -EINVAL;
8029 
8030 	hw = &pf->hw;
8031 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8032 		struct ice_fd_hw_prof *prof;
8033 		int tun, status;
8034 		u64 entry_h;
8035 
8036 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8037 		      hw->fdir_prof[flow]->cnt))
8038 			continue;
8039 
8040 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8041 			enum ice_flow_priority prio;
8042 			u64 prof_id;
8043 
8044 			/* add this VSI to FDir profile for this flow */
8045 			prio = ICE_FLOW_PRIO_NORMAL;
8046 			prof = hw->fdir_prof[flow];
8047 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8048 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8049 						    prof->vsi_h[0], vsi->idx,
8050 						    prio, prof->fdir_seg[tun],
8051 						    &entry_h);
8052 			if (status) {
8053 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8054 					vsi->idx, flow);
8055 				continue;
8056 			}
8057 
8058 			prof->entry_h[prof->cnt][tun] = entry_h;
8059 		}
8060 
8061 		/* store VSI for filter replay and delete */
8062 		prof->vsi_h[prof->cnt] = vsi->idx;
8063 		prof->cnt++;
8064 
8065 		added = true;
8066 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8067 			flow);
8068 	}
8069 
8070 	if (!added)
8071 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8072 
8073 	return 0;
8074 }
8075 
8076 /**
8077  * ice_add_channel - add a channel by adding VSI
8078  * @pf: ptr to PF device
8079  * @sw_id: underlying HW switching element ID
8080  * @ch: ptr to channel structure
8081  *
8082  * Add a channel (VSI) using add_vsi and queue_map
8083  */
8084 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8085 {
8086 	struct device *dev = ice_pf_to_dev(pf);
8087 	struct ice_vsi *vsi;
8088 
8089 	if (ch->type != ICE_VSI_CHNL) {
8090 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8091 		return -EINVAL;
8092 	}
8093 
8094 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8095 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8096 		dev_err(dev, "create chnl VSI failure\n");
8097 		return -EINVAL;
8098 	}
8099 
8100 	ice_add_vsi_to_fdir(pf, vsi);
8101 
8102 	ch->sw_id = sw_id;
8103 	ch->vsi_num = vsi->vsi_num;
8104 	ch->info.mapping_flags = vsi->info.mapping_flags;
8105 	ch->ch_vsi = vsi;
8106 	/* set the back pointer of channel for newly created VSI */
8107 	vsi->ch = ch;
8108 
8109 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8110 	       sizeof(vsi->info.q_mapping));
8111 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8112 	       sizeof(vsi->info.tc_mapping));
8113 
8114 	return 0;
8115 }
8116 
8117 /**
8118  * ice_chnl_cfg_res
8119  * @vsi: the VSI being setup
8120  * @ch: ptr to channel structure
8121  *
8122  * Configure channel specific resources such as rings, vector.
8123  */
8124 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8125 {
8126 	int i;
8127 
8128 	for (i = 0; i < ch->num_txq; i++) {
8129 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8130 		struct ice_ring_container *rc;
8131 		struct ice_tx_ring *tx_ring;
8132 		struct ice_rx_ring *rx_ring;
8133 
8134 		tx_ring = vsi->tx_rings[ch->base_q + i];
8135 		rx_ring = vsi->rx_rings[ch->base_q + i];
8136 		if (!tx_ring || !rx_ring)
8137 			continue;
8138 
8139 		/* setup ring being channel enabled */
8140 		tx_ring->ch = ch;
8141 		rx_ring->ch = ch;
8142 
8143 		/* following code block sets up vector specific attributes */
8144 		tx_q_vector = tx_ring->q_vector;
8145 		rx_q_vector = rx_ring->q_vector;
8146 		if (!tx_q_vector && !rx_q_vector)
8147 			continue;
8148 
8149 		if (tx_q_vector) {
8150 			tx_q_vector->ch = ch;
8151 			/* setup Tx and Rx ITR setting if DIM is off */
8152 			rc = &tx_q_vector->tx;
8153 			if (!ITR_IS_DYNAMIC(rc))
8154 				ice_write_itr(rc, rc->itr_setting);
8155 		}
8156 		if (rx_q_vector) {
8157 			rx_q_vector->ch = ch;
8158 			/* setup Tx and Rx ITR setting if DIM is off */
8159 			rc = &rx_q_vector->rx;
8160 			if (!ITR_IS_DYNAMIC(rc))
8161 				ice_write_itr(rc, rc->itr_setting);
8162 		}
8163 	}
8164 
8165 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8166 	 * GLINT_ITR register would have written to perform in-context
8167 	 * update, hence perform flush
8168 	 */
8169 	if (ch->num_txq || ch->num_rxq)
8170 		ice_flush(&vsi->back->hw);
8171 }
8172 
8173 /**
8174  * ice_cfg_chnl_all_res - configure channel resources
8175  * @vsi: pte to main_vsi
8176  * @ch: ptr to channel structure
8177  *
8178  * This function configures channel specific resources such as flow-director
8179  * counter index, and other resources such as queues, vectors, ITR settings
8180  */
8181 static void
8182 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8183 {
8184 	/* configure channel (aka ADQ) resources such as queues, vectors,
8185 	 * ITR settings for channel specific vectors and anything else
8186 	 */
8187 	ice_chnl_cfg_res(vsi, ch);
8188 }
8189 
8190 /**
8191  * ice_setup_hw_channel - setup new channel
8192  * @pf: ptr to PF device
8193  * @vsi: the VSI being setup
8194  * @ch: ptr to channel structure
8195  * @sw_id: underlying HW switching element ID
8196  * @type: type of channel to be created (VMDq2/VF)
8197  *
8198  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8199  * and configures Tx rings accordingly
8200  */
8201 static int
8202 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8203 		     struct ice_channel *ch, u16 sw_id, u8 type)
8204 {
8205 	struct device *dev = ice_pf_to_dev(pf);
8206 	int ret;
8207 
8208 	ch->base_q = vsi->next_base_q;
8209 	ch->type = type;
8210 
8211 	ret = ice_add_channel(pf, sw_id, ch);
8212 	if (ret) {
8213 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8214 		return ret;
8215 	}
8216 
8217 	/* configure/setup ADQ specific resources */
8218 	ice_cfg_chnl_all_res(vsi, ch);
8219 
8220 	/* make sure to update the next_base_q so that subsequent channel's
8221 	 * (aka ADQ) VSI queue map is correct
8222 	 */
8223 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8224 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8225 		ch->num_rxq);
8226 
8227 	return 0;
8228 }
8229 
8230 /**
8231  * ice_setup_channel - setup new channel using uplink element
8232  * @pf: ptr to PF device
8233  * @vsi: the VSI being setup
8234  * @ch: ptr to channel structure
8235  *
8236  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8237  * and uplink switching element
8238  */
8239 static bool
8240 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8241 		  struct ice_channel *ch)
8242 {
8243 	struct device *dev = ice_pf_to_dev(pf);
8244 	u16 sw_id;
8245 	int ret;
8246 
8247 	if (vsi->type != ICE_VSI_PF) {
8248 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8249 		return false;
8250 	}
8251 
8252 	sw_id = pf->first_sw->sw_id;
8253 
8254 	/* create channel (VSI) */
8255 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8256 	if (ret) {
8257 		dev_err(dev, "failed to setup hw_channel\n");
8258 		return false;
8259 	}
8260 	dev_dbg(dev, "successfully created channel()\n");
8261 
8262 	return ch->ch_vsi ? true : false;
8263 }
8264 
8265 /**
8266  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8267  * @vsi: VSI to be configured
8268  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8269  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8270  */
8271 static int
8272 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8273 {
8274 	int err;
8275 
8276 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8277 	if (err)
8278 		return err;
8279 
8280 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8281 }
8282 
8283 /**
8284  * ice_create_q_channel - function to create channel
8285  * @vsi: VSI to be configured
8286  * @ch: ptr to channel (it contains channel specific params)
8287  *
8288  * This function creates channel (VSI) using num_queues specified by user,
8289  * reconfigs RSS if needed.
8290  */
8291 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8292 {
8293 	struct ice_pf *pf = vsi->back;
8294 	struct device *dev;
8295 
8296 	if (!ch)
8297 		return -EINVAL;
8298 
8299 	dev = ice_pf_to_dev(pf);
8300 	if (!ch->num_txq || !ch->num_rxq) {
8301 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8302 		return -EINVAL;
8303 	}
8304 
8305 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8306 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8307 			vsi->cnt_q_avail, ch->num_txq);
8308 		return -EINVAL;
8309 	}
8310 
8311 	if (!ice_setup_channel(pf, vsi, ch)) {
8312 		dev_info(dev, "Failed to setup channel\n");
8313 		return -EINVAL;
8314 	}
8315 	/* configure BW rate limit */
8316 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8317 		int ret;
8318 
8319 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8320 				       ch->min_tx_rate);
8321 		if (ret)
8322 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8323 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8324 		else
8325 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8326 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8327 	}
8328 
8329 	vsi->cnt_q_avail -= ch->num_txq;
8330 
8331 	return 0;
8332 }
8333 
8334 /**
8335  * ice_rem_all_chnl_fltrs - removes all channel filters
8336  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8337  *
8338  * Remove all advanced switch filters only if they are channel specific
8339  * tc-flower based filter
8340  */
8341 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8342 {
8343 	struct ice_tc_flower_fltr *fltr;
8344 	struct hlist_node *node;
8345 
8346 	/* to remove all channel filters, iterate an ordered list of filters */
8347 	hlist_for_each_entry_safe(fltr, node,
8348 				  &pf->tc_flower_fltr_list,
8349 				  tc_flower_node) {
8350 		struct ice_rule_query_data rule;
8351 		int status;
8352 
8353 		/* for now process only channel specific filters */
8354 		if (!ice_is_chnl_fltr(fltr))
8355 			continue;
8356 
8357 		rule.rid = fltr->rid;
8358 		rule.rule_id = fltr->rule_id;
8359 		rule.vsi_handle = fltr->dest_vsi_handle;
8360 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8361 		if (status) {
8362 			if (status == -ENOENT)
8363 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8364 					rule.rule_id);
8365 			else
8366 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8367 					status);
8368 		} else if (fltr->dest_vsi) {
8369 			/* update advanced switch filter count */
8370 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8371 				u32 flags = fltr->flags;
8372 
8373 				fltr->dest_vsi->num_chnl_fltr--;
8374 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8375 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8376 					pf->num_dmac_chnl_fltrs--;
8377 			}
8378 		}
8379 
8380 		hlist_del(&fltr->tc_flower_node);
8381 		kfree(fltr);
8382 	}
8383 }
8384 
8385 /**
8386  * ice_remove_q_channels - Remove queue channels for the TCs
8387  * @vsi: VSI to be configured
8388  * @rem_fltr: delete advanced switch filter or not
8389  *
8390  * Remove queue channels for the TCs
8391  */
8392 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8393 {
8394 	struct ice_channel *ch, *ch_tmp;
8395 	struct ice_pf *pf = vsi->back;
8396 	int i;
8397 
8398 	/* remove all tc-flower based filter if they are channel filters only */
8399 	if (rem_fltr)
8400 		ice_rem_all_chnl_fltrs(pf);
8401 
8402 	/* remove ntuple filters since queue configuration is being changed */
8403 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8404 		struct ice_hw *hw = &pf->hw;
8405 
8406 		mutex_lock(&hw->fdir_fltr_lock);
8407 		ice_fdir_del_all_fltrs(vsi);
8408 		mutex_unlock(&hw->fdir_fltr_lock);
8409 	}
8410 
8411 	/* perform cleanup for channels if they exist */
8412 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8413 		struct ice_vsi *ch_vsi;
8414 
8415 		list_del(&ch->list);
8416 		ch_vsi = ch->ch_vsi;
8417 		if (!ch_vsi) {
8418 			kfree(ch);
8419 			continue;
8420 		}
8421 
8422 		/* Reset queue contexts */
8423 		for (i = 0; i < ch->num_rxq; i++) {
8424 			struct ice_tx_ring *tx_ring;
8425 			struct ice_rx_ring *rx_ring;
8426 
8427 			tx_ring = vsi->tx_rings[ch->base_q + i];
8428 			rx_ring = vsi->rx_rings[ch->base_q + i];
8429 			if (tx_ring) {
8430 				tx_ring->ch = NULL;
8431 				if (tx_ring->q_vector)
8432 					tx_ring->q_vector->ch = NULL;
8433 			}
8434 			if (rx_ring) {
8435 				rx_ring->ch = NULL;
8436 				if (rx_ring->q_vector)
8437 					rx_ring->q_vector->ch = NULL;
8438 			}
8439 		}
8440 
8441 		/* Release FD resources for the channel VSI */
8442 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8443 
8444 		/* clear the VSI from scheduler tree */
8445 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8446 
8447 		/* Delete VSI from FW, PF and HW VSI arrays */
8448 		ice_vsi_delete(ch->ch_vsi);
8449 
8450 		/* free the channel */
8451 		kfree(ch);
8452 	}
8453 
8454 	/* clear the channel VSI map which is stored in main VSI */
8455 	ice_for_each_chnl_tc(i)
8456 		vsi->tc_map_vsi[i] = NULL;
8457 
8458 	/* reset main VSI's all TC information */
8459 	vsi->all_enatc = 0;
8460 	vsi->all_numtc = 0;
8461 }
8462 
8463 /**
8464  * ice_rebuild_channels - rebuild channel
8465  * @pf: ptr to PF
8466  *
8467  * Recreate channel VSIs and replay filters
8468  */
8469 static int ice_rebuild_channels(struct ice_pf *pf)
8470 {
8471 	struct device *dev = ice_pf_to_dev(pf);
8472 	struct ice_vsi *main_vsi;
8473 	bool rem_adv_fltr = true;
8474 	struct ice_channel *ch;
8475 	struct ice_vsi *vsi;
8476 	int tc_idx = 1;
8477 	int i, err;
8478 
8479 	main_vsi = ice_get_main_vsi(pf);
8480 	if (!main_vsi)
8481 		return 0;
8482 
8483 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8484 	    main_vsi->old_numtc == 1)
8485 		return 0; /* nothing to be done */
8486 
8487 	/* reconfigure main VSI based on old value of TC and cached values
8488 	 * for MQPRIO opts
8489 	 */
8490 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8491 	if (err) {
8492 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8493 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8494 		return err;
8495 	}
8496 
8497 	/* rebuild ADQ VSIs */
8498 	ice_for_each_vsi(pf, i) {
8499 		enum ice_vsi_type type;
8500 
8501 		vsi = pf->vsi[i];
8502 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8503 			continue;
8504 
8505 		type = vsi->type;
8506 
8507 		/* rebuild ADQ VSI */
8508 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8509 		if (err) {
8510 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8511 				ice_vsi_type_str(type), vsi->idx, err);
8512 			goto cleanup;
8513 		}
8514 
8515 		/* Re-map HW VSI number, using VSI handle that has been
8516 		 * previously validated in ice_replay_vsi() call above
8517 		 */
8518 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8519 
8520 		/* replay filters for the VSI */
8521 		err = ice_replay_vsi(&pf->hw, vsi->idx);
8522 		if (err) {
8523 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8524 				ice_vsi_type_str(type), err, vsi->idx);
8525 			rem_adv_fltr = false;
8526 			goto cleanup;
8527 		}
8528 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8529 			 ice_vsi_type_str(type), vsi->idx);
8530 
8531 		/* store ADQ VSI at correct TC index in main VSI's
8532 		 * map of TC to VSI
8533 		 */
8534 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8535 	}
8536 
8537 	/* ADQ VSI(s) has been rebuilt successfully, so setup
8538 	 * channel for main VSI's Tx and Rx rings
8539 	 */
8540 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8541 		struct ice_vsi *ch_vsi;
8542 
8543 		ch_vsi = ch->ch_vsi;
8544 		if (!ch_vsi)
8545 			continue;
8546 
8547 		/* reconfig channel resources */
8548 		ice_cfg_chnl_all_res(main_vsi, ch);
8549 
8550 		/* replay BW rate limit if it is non-zero */
8551 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8552 			continue;
8553 
8554 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8555 				       ch->min_tx_rate);
8556 		if (err)
8557 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8558 				err, ch->max_tx_rate, ch->min_tx_rate,
8559 				ch_vsi->vsi_num);
8560 		else
8561 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8562 				ch->max_tx_rate, ch->min_tx_rate,
8563 				ch_vsi->vsi_num);
8564 	}
8565 
8566 	/* reconfig RSS for main VSI */
8567 	if (main_vsi->ch_rss_size)
8568 		ice_vsi_cfg_rss_lut_key(main_vsi);
8569 
8570 	return 0;
8571 
8572 cleanup:
8573 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8574 	return err;
8575 }
8576 
8577 /**
8578  * ice_create_q_channels - Add queue channel for the given TCs
8579  * @vsi: VSI to be configured
8580  *
8581  * Configures queue channel mapping to the given TCs
8582  */
8583 static int ice_create_q_channels(struct ice_vsi *vsi)
8584 {
8585 	struct ice_pf *pf = vsi->back;
8586 	struct ice_channel *ch;
8587 	int ret = 0, i;
8588 
8589 	ice_for_each_chnl_tc(i) {
8590 		if (!(vsi->all_enatc & BIT(i)))
8591 			continue;
8592 
8593 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8594 		if (!ch) {
8595 			ret = -ENOMEM;
8596 			goto err_free;
8597 		}
8598 		INIT_LIST_HEAD(&ch->list);
8599 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8600 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8601 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8602 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8603 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8604 
8605 		/* convert to Kbits/s */
8606 		if (ch->max_tx_rate)
8607 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8608 						  ICE_BW_KBPS_DIVISOR);
8609 		if (ch->min_tx_rate)
8610 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8611 						  ICE_BW_KBPS_DIVISOR);
8612 
8613 		ret = ice_create_q_channel(vsi, ch);
8614 		if (ret) {
8615 			dev_err(ice_pf_to_dev(pf),
8616 				"failed creating channel TC:%d\n", i);
8617 			kfree(ch);
8618 			goto err_free;
8619 		}
8620 		list_add_tail(&ch->list, &vsi->ch_list);
8621 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8622 		dev_dbg(ice_pf_to_dev(pf),
8623 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8624 	}
8625 	return 0;
8626 
8627 err_free:
8628 	ice_remove_q_channels(vsi, false);
8629 
8630 	return ret;
8631 }
8632 
8633 /**
8634  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8635  * @netdev: net device to configure
8636  * @type_data: TC offload data
8637  */
8638 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8639 {
8640 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8641 	struct ice_netdev_priv *np = netdev_priv(netdev);
8642 	struct ice_vsi *vsi = np->vsi;
8643 	struct ice_pf *pf = vsi->back;
8644 	u16 mode, ena_tc_qdisc = 0;
8645 	int cur_txq, cur_rxq;
8646 	u8 hw = 0, num_tcf;
8647 	struct device *dev;
8648 	int ret, i;
8649 
8650 	dev = ice_pf_to_dev(pf);
8651 	num_tcf = mqprio_qopt->qopt.num_tc;
8652 	hw = mqprio_qopt->qopt.hw;
8653 	mode = mqprio_qopt->mode;
8654 	if (!hw) {
8655 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8656 		vsi->ch_rss_size = 0;
8657 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8658 		goto config_tcf;
8659 	}
8660 
8661 	/* Generate queue region map for number of TCF requested */
8662 	for (i = 0; i < num_tcf; i++)
8663 		ena_tc_qdisc |= BIT(i);
8664 
8665 	switch (mode) {
8666 	case TC_MQPRIO_MODE_CHANNEL:
8667 
8668 		if (pf->hw.port_info->is_custom_tx_enabled) {
8669 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8670 			return -EBUSY;
8671 		}
8672 		ice_tear_down_devlink_rate_tree(pf);
8673 
8674 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8675 		if (ret) {
8676 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8677 				   ret);
8678 			return ret;
8679 		}
8680 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8681 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8682 		/* don't assume state of hw_tc_offload during driver load
8683 		 * and set the flag for TC flower filter if hw_tc_offload
8684 		 * already ON
8685 		 */
8686 		if (vsi->netdev->features & NETIF_F_HW_TC)
8687 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8688 		break;
8689 	default:
8690 		return -EINVAL;
8691 	}
8692 
8693 config_tcf:
8694 
8695 	/* Requesting same TCF configuration as already enabled */
8696 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8697 	    mode != TC_MQPRIO_MODE_CHANNEL)
8698 		return 0;
8699 
8700 	/* Pause VSI queues */
8701 	ice_dis_vsi(vsi, true);
8702 
8703 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8704 		ice_remove_q_channels(vsi, true);
8705 
8706 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8707 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8708 				     num_online_cpus());
8709 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8710 				     num_online_cpus());
8711 	} else {
8712 		/* logic to rebuild VSI, same like ethtool -L */
8713 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8714 
8715 		for (i = 0; i < num_tcf; i++) {
8716 			if (!(ena_tc_qdisc & BIT(i)))
8717 				continue;
8718 
8719 			offset = vsi->mqprio_qopt.qopt.offset[i];
8720 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8721 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8722 		}
8723 		vsi->req_txq = offset + qcount_tx;
8724 		vsi->req_rxq = offset + qcount_rx;
8725 
8726 		/* store away original rss_size info, so that it gets reused
8727 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8728 		 * determine, what should be the rss_sizefor main VSI
8729 		 */
8730 		vsi->orig_rss_size = vsi->rss_size;
8731 	}
8732 
8733 	/* save current values of Tx and Rx queues before calling VSI rebuild
8734 	 * for fallback option
8735 	 */
8736 	cur_txq = vsi->num_txq;
8737 	cur_rxq = vsi->num_rxq;
8738 
8739 	/* proceed with rebuild main VSI using correct number of queues */
8740 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8741 	if (ret) {
8742 		/* fallback to current number of queues */
8743 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8744 		vsi->req_txq = cur_txq;
8745 		vsi->req_rxq = cur_rxq;
8746 		clear_bit(ICE_RESET_FAILED, pf->state);
8747 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8748 			dev_err(dev, "Rebuild of main VSI failed again\n");
8749 			return ret;
8750 		}
8751 	}
8752 
8753 	vsi->all_numtc = num_tcf;
8754 	vsi->all_enatc = ena_tc_qdisc;
8755 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8756 	if (ret) {
8757 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8758 			   vsi->vsi_num);
8759 		goto exit;
8760 	}
8761 
8762 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8763 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8764 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8765 
8766 		/* set TC0 rate limit if specified */
8767 		if (max_tx_rate || min_tx_rate) {
8768 			/* convert to Kbits/s */
8769 			if (max_tx_rate)
8770 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8771 			if (min_tx_rate)
8772 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8773 
8774 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8775 			if (!ret) {
8776 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8777 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8778 			} else {
8779 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8780 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8781 				goto exit;
8782 			}
8783 		}
8784 		ret = ice_create_q_channels(vsi);
8785 		if (ret) {
8786 			netdev_err(netdev, "failed configuring queue channels\n");
8787 			goto exit;
8788 		} else {
8789 			netdev_dbg(netdev, "successfully configured channels\n");
8790 		}
8791 	}
8792 
8793 	if (vsi->ch_rss_size)
8794 		ice_vsi_cfg_rss_lut_key(vsi);
8795 
8796 exit:
8797 	/* if error, reset the all_numtc and all_enatc */
8798 	if (ret) {
8799 		vsi->all_numtc = 0;
8800 		vsi->all_enatc = 0;
8801 	}
8802 	/* resume VSI */
8803 	ice_ena_vsi(vsi, true);
8804 
8805 	return ret;
8806 }
8807 
8808 static LIST_HEAD(ice_block_cb_list);
8809 
8810 static int
8811 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8812 	     void *type_data)
8813 {
8814 	struct ice_netdev_priv *np = netdev_priv(netdev);
8815 	struct ice_pf *pf = np->vsi->back;
8816 	int err;
8817 
8818 	switch (type) {
8819 	case TC_SETUP_BLOCK:
8820 		return flow_block_cb_setup_simple(type_data,
8821 						  &ice_block_cb_list,
8822 						  ice_setup_tc_block_cb,
8823 						  np, np, true);
8824 	case TC_SETUP_QDISC_MQPRIO:
8825 		/* setup traffic classifier for receive side */
8826 		mutex_lock(&pf->tc_mutex);
8827 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8828 		mutex_unlock(&pf->tc_mutex);
8829 		return err;
8830 	default:
8831 		return -EOPNOTSUPP;
8832 	}
8833 	return -EOPNOTSUPP;
8834 }
8835 
8836 static struct ice_indr_block_priv *
8837 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8838 			   struct net_device *netdev)
8839 {
8840 	struct ice_indr_block_priv *cb_priv;
8841 
8842 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8843 		if (!cb_priv->netdev)
8844 			return NULL;
8845 		if (cb_priv->netdev == netdev)
8846 			return cb_priv;
8847 	}
8848 	return NULL;
8849 }
8850 
8851 static int
8852 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8853 			void *indr_priv)
8854 {
8855 	struct ice_indr_block_priv *priv = indr_priv;
8856 	struct ice_netdev_priv *np = priv->np;
8857 
8858 	switch (type) {
8859 	case TC_SETUP_CLSFLOWER:
8860 		return ice_setup_tc_cls_flower(np, priv->netdev,
8861 					       (struct flow_cls_offload *)
8862 					       type_data);
8863 	default:
8864 		return -EOPNOTSUPP;
8865 	}
8866 }
8867 
8868 static int
8869 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8870 			struct ice_netdev_priv *np,
8871 			struct flow_block_offload *f, void *data,
8872 			void (*cleanup)(struct flow_block_cb *block_cb))
8873 {
8874 	struct ice_indr_block_priv *indr_priv;
8875 	struct flow_block_cb *block_cb;
8876 
8877 	if (!ice_is_tunnel_supported(netdev) &&
8878 	    !(is_vlan_dev(netdev) &&
8879 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
8880 		return -EOPNOTSUPP;
8881 
8882 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8883 		return -EOPNOTSUPP;
8884 
8885 	switch (f->command) {
8886 	case FLOW_BLOCK_BIND:
8887 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8888 		if (indr_priv)
8889 			return -EEXIST;
8890 
8891 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8892 		if (!indr_priv)
8893 			return -ENOMEM;
8894 
8895 		indr_priv->netdev = netdev;
8896 		indr_priv->np = np;
8897 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8898 
8899 		block_cb =
8900 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8901 						 indr_priv, indr_priv,
8902 						 ice_rep_indr_tc_block_unbind,
8903 						 f, netdev, sch, data, np,
8904 						 cleanup);
8905 
8906 		if (IS_ERR(block_cb)) {
8907 			list_del(&indr_priv->list);
8908 			kfree(indr_priv);
8909 			return PTR_ERR(block_cb);
8910 		}
8911 		flow_block_cb_add(block_cb, f);
8912 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8913 		break;
8914 	case FLOW_BLOCK_UNBIND:
8915 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8916 		if (!indr_priv)
8917 			return -ENOENT;
8918 
8919 		block_cb = flow_block_cb_lookup(f->block,
8920 						ice_indr_setup_block_cb,
8921 						indr_priv);
8922 		if (!block_cb)
8923 			return -ENOENT;
8924 
8925 		flow_indr_block_cb_remove(block_cb, f);
8926 
8927 		list_del(&block_cb->driver_list);
8928 		break;
8929 	default:
8930 		return -EOPNOTSUPP;
8931 	}
8932 	return 0;
8933 }
8934 
8935 static int
8936 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8937 		     void *cb_priv, enum tc_setup_type type, void *type_data,
8938 		     void *data,
8939 		     void (*cleanup)(struct flow_block_cb *block_cb))
8940 {
8941 	switch (type) {
8942 	case TC_SETUP_BLOCK:
8943 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8944 					       data, cleanup);
8945 
8946 	default:
8947 		return -EOPNOTSUPP;
8948 	}
8949 }
8950 
8951 /**
8952  * ice_open - Called when a network interface becomes active
8953  * @netdev: network interface device structure
8954  *
8955  * The open entry point is called when a network interface is made
8956  * active by the system (IFF_UP). At this point all resources needed
8957  * for transmit and receive operations are allocated, the interrupt
8958  * handler is registered with the OS, the netdev watchdog is enabled,
8959  * and the stack is notified that the interface is ready.
8960  *
8961  * Returns 0 on success, negative value on failure
8962  */
8963 int ice_open(struct net_device *netdev)
8964 {
8965 	struct ice_netdev_priv *np = netdev_priv(netdev);
8966 	struct ice_pf *pf = np->vsi->back;
8967 
8968 	if (ice_is_reset_in_progress(pf->state)) {
8969 		netdev_err(netdev, "can't open net device while reset is in progress");
8970 		return -EBUSY;
8971 	}
8972 
8973 	return ice_open_internal(netdev);
8974 }
8975 
8976 /**
8977  * ice_open_internal - Called when a network interface becomes active
8978  * @netdev: network interface device structure
8979  *
8980  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8981  * handling routine
8982  *
8983  * Returns 0 on success, negative value on failure
8984  */
8985 int ice_open_internal(struct net_device *netdev)
8986 {
8987 	struct ice_netdev_priv *np = netdev_priv(netdev);
8988 	struct ice_vsi *vsi = np->vsi;
8989 	struct ice_pf *pf = vsi->back;
8990 	struct ice_port_info *pi;
8991 	int err;
8992 
8993 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8994 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8995 		return -EIO;
8996 	}
8997 
8998 	netif_carrier_off(netdev);
8999 
9000 	pi = vsi->port_info;
9001 	err = ice_update_link_info(pi);
9002 	if (err) {
9003 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9004 		return err;
9005 	}
9006 
9007 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9008 
9009 	/* Set PHY if there is media, otherwise, turn off PHY */
9010 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9011 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9012 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9013 			err = ice_init_phy_user_cfg(pi);
9014 			if (err) {
9015 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9016 					   err);
9017 				return err;
9018 			}
9019 		}
9020 
9021 		err = ice_configure_phy(vsi);
9022 		if (err) {
9023 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9024 				   err);
9025 			return err;
9026 		}
9027 	} else {
9028 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9029 		ice_set_link(vsi, false);
9030 	}
9031 
9032 	err = ice_vsi_open(vsi);
9033 	if (err)
9034 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9035 			   vsi->vsi_num, vsi->vsw->sw_id);
9036 
9037 	/* Update existing tunnels information */
9038 	udp_tunnel_get_rx_info(netdev);
9039 
9040 	return err;
9041 }
9042 
9043 /**
9044  * ice_stop - Disables a network interface
9045  * @netdev: network interface device structure
9046  *
9047  * The stop entry point is called when an interface is de-activated by the OS,
9048  * and the netdevice enters the DOWN state. The hardware is still under the
9049  * driver's control, but the netdev interface is disabled.
9050  *
9051  * Returns success only - not allowed to fail
9052  */
9053 int ice_stop(struct net_device *netdev)
9054 {
9055 	struct ice_netdev_priv *np = netdev_priv(netdev);
9056 	struct ice_vsi *vsi = np->vsi;
9057 	struct ice_pf *pf = vsi->back;
9058 
9059 	if (ice_is_reset_in_progress(pf->state)) {
9060 		netdev_err(netdev, "can't stop net device while reset is in progress");
9061 		return -EBUSY;
9062 	}
9063 
9064 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9065 		int link_err = ice_force_phys_link_state(vsi, false);
9066 
9067 		if (link_err) {
9068 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9069 				   vsi->vsi_num, link_err);
9070 			return -EIO;
9071 		}
9072 	}
9073 
9074 	ice_vsi_close(vsi);
9075 
9076 	return 0;
9077 }
9078 
9079 /**
9080  * ice_features_check - Validate encapsulated packet conforms to limits
9081  * @skb: skb buffer
9082  * @netdev: This port's netdev
9083  * @features: Offload features that the stack believes apply
9084  */
9085 static netdev_features_t
9086 ice_features_check(struct sk_buff *skb,
9087 		   struct net_device __always_unused *netdev,
9088 		   netdev_features_t features)
9089 {
9090 	bool gso = skb_is_gso(skb);
9091 	size_t len;
9092 
9093 	/* No point in doing any of this if neither checksum nor GSO are
9094 	 * being requested for this frame. We can rule out both by just
9095 	 * checking for CHECKSUM_PARTIAL
9096 	 */
9097 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9098 		return features;
9099 
9100 	/* We cannot support GSO if the MSS is going to be less than
9101 	 * 64 bytes. If it is then we need to drop support for GSO.
9102 	 */
9103 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9104 		features &= ~NETIF_F_GSO_MASK;
9105 
9106 	len = skb_network_offset(skb);
9107 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9108 		goto out_rm_features;
9109 
9110 	len = skb_network_header_len(skb);
9111 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9112 		goto out_rm_features;
9113 
9114 	if (skb->encapsulation) {
9115 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9116 		 * the case of IPIP frames, the transport header pointer is
9117 		 * after the inner header! So check to make sure that this
9118 		 * is a GRE or UDP_TUNNEL frame before doing that math.
9119 		 */
9120 		if (gso && (skb_shinfo(skb)->gso_type &
9121 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9122 			len = skb_inner_network_header(skb) -
9123 			      skb_transport_header(skb);
9124 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9125 				goto out_rm_features;
9126 		}
9127 
9128 		len = skb_inner_network_header_len(skb);
9129 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9130 			goto out_rm_features;
9131 	}
9132 
9133 	return features;
9134 out_rm_features:
9135 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9136 }
9137 
9138 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9139 	.ndo_open = ice_open,
9140 	.ndo_stop = ice_stop,
9141 	.ndo_start_xmit = ice_start_xmit,
9142 	.ndo_set_mac_address = ice_set_mac_address,
9143 	.ndo_validate_addr = eth_validate_addr,
9144 	.ndo_change_mtu = ice_change_mtu,
9145 	.ndo_get_stats64 = ice_get_stats64,
9146 	.ndo_tx_timeout = ice_tx_timeout,
9147 	.ndo_bpf = ice_xdp_safe_mode,
9148 };
9149 
9150 static const struct net_device_ops ice_netdev_ops = {
9151 	.ndo_open = ice_open,
9152 	.ndo_stop = ice_stop,
9153 	.ndo_start_xmit = ice_start_xmit,
9154 	.ndo_select_queue = ice_select_queue,
9155 	.ndo_features_check = ice_features_check,
9156 	.ndo_fix_features = ice_fix_features,
9157 	.ndo_set_rx_mode = ice_set_rx_mode,
9158 	.ndo_set_mac_address = ice_set_mac_address,
9159 	.ndo_validate_addr = eth_validate_addr,
9160 	.ndo_change_mtu = ice_change_mtu,
9161 	.ndo_get_stats64 = ice_get_stats64,
9162 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9163 	.ndo_eth_ioctl = ice_eth_ioctl,
9164 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9165 	.ndo_set_vf_mac = ice_set_vf_mac,
9166 	.ndo_get_vf_config = ice_get_vf_cfg,
9167 	.ndo_set_vf_trust = ice_set_vf_trust,
9168 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9169 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9170 	.ndo_get_vf_stats = ice_get_vf_stats,
9171 	.ndo_set_vf_rate = ice_set_vf_bw,
9172 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9173 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9174 	.ndo_setup_tc = ice_setup_tc,
9175 	.ndo_set_features = ice_set_features,
9176 	.ndo_bridge_getlink = ice_bridge_getlink,
9177 	.ndo_bridge_setlink = ice_bridge_setlink,
9178 	.ndo_fdb_add = ice_fdb_add,
9179 	.ndo_fdb_del = ice_fdb_del,
9180 #ifdef CONFIG_RFS_ACCEL
9181 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9182 #endif
9183 	.ndo_tx_timeout = ice_tx_timeout,
9184 	.ndo_bpf = ice_xdp,
9185 	.ndo_xdp_xmit = ice_xdp_xmit,
9186 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9187 };
9188