1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 #include "ice_vsi_vlan_ops.h"
25 
26 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
27 static const char ice_driver_string[] = DRV_SUMMARY;
28 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
29 
30 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
31 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
32 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
33 
34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
38 
39 static int debug = -1;
40 module_param(debug, int, 0644);
41 #ifndef CONFIG_DYNAMIC_DEBUG
42 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
43 #else
44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
45 #endif /* !CONFIG_DYNAMIC_DEBUG */
46 
47 static DEFINE_IDA(ice_aux_ida);
48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49 EXPORT_SYMBOL(ice_xdp_locking_key);
50 
51 /**
52  * ice_hw_to_dev - Get device pointer from the hardware structure
53  * @hw: pointer to the device HW structure
54  *
55  * Used to access the device pointer from compilation units which can't easily
56  * include the definition of struct ice_pf without leading to circular header
57  * dependencies.
58  */
59 struct device *ice_hw_to_dev(struct ice_hw *hw)
60 {
61 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
62 
63 	return &pf->pdev->dev;
64 }
65 
66 static struct workqueue_struct *ice_wq;
67 static const struct net_device_ops ice_netdev_safe_mode_ops;
68 static const struct net_device_ops ice_netdev_ops;
69 
70 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
71 
72 static void ice_vsi_release_all(struct ice_pf *pf);
73 
74 static int ice_rebuild_channels(struct ice_pf *pf);
75 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
76 
77 static int
78 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
79 		     void *cb_priv, enum tc_setup_type type, void *type_data,
80 		     void *data,
81 		     void (*cleanup)(struct flow_block_cb *block_cb));
82 
83 bool netif_is_ice(struct net_device *dev)
84 {
85 	return dev && (dev->netdev_ops == &ice_netdev_ops);
86 }
87 
88 /**
89  * ice_get_tx_pending - returns number of Tx descriptors not processed
90  * @ring: the ring of descriptors
91  */
92 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
93 {
94 	u16 head, tail;
95 
96 	head = ring->next_to_clean;
97 	tail = ring->next_to_use;
98 
99 	if (head != tail)
100 		return (head < tail) ?
101 			tail - head : (tail + ring->count - head);
102 	return 0;
103 }
104 
105 /**
106  * ice_check_for_hang_subtask - check for and recover hung queues
107  * @pf: pointer to PF struct
108  */
109 static void ice_check_for_hang_subtask(struct ice_pf *pf)
110 {
111 	struct ice_vsi *vsi = NULL;
112 	struct ice_hw *hw;
113 	unsigned int i;
114 	int packets;
115 	u32 v;
116 
117 	ice_for_each_vsi(pf, v)
118 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
119 			vsi = pf->vsi[v];
120 			break;
121 		}
122 
123 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
124 		return;
125 
126 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
127 		return;
128 
129 	hw = &vsi->back->hw;
130 
131 	ice_for_each_txq(vsi, i) {
132 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
133 
134 		if (!tx_ring)
135 			continue;
136 		if (ice_ring_ch_enabled(tx_ring))
137 			continue;
138 
139 		if (tx_ring->desc) {
140 			/* If packet counter has not changed the queue is
141 			 * likely stalled, so force an interrupt for this
142 			 * queue.
143 			 *
144 			 * prev_pkt would be negative if there was no
145 			 * pending work.
146 			 */
147 			packets = tx_ring->stats.pkts & INT_MAX;
148 			if (tx_ring->tx_stats.prev_pkt == packets) {
149 				/* Trigger sw interrupt to revive the queue */
150 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
151 				continue;
152 			}
153 
154 			/* Memory barrier between read of packet count and call
155 			 * to ice_get_tx_pending()
156 			 */
157 			smp_rmb();
158 			tx_ring->tx_stats.prev_pkt =
159 			    ice_get_tx_pending(tx_ring) ? packets : -1;
160 		}
161 	}
162 }
163 
164 /**
165  * ice_init_mac_fltr - Set initial MAC filters
166  * @pf: board private structure
167  *
168  * Set initial set of MAC filters for PF VSI; configure filters for permanent
169  * address and broadcast address. If an error is encountered, netdevice will be
170  * unregistered.
171  */
172 static int ice_init_mac_fltr(struct ice_pf *pf)
173 {
174 	struct ice_vsi *vsi;
175 	u8 *perm_addr;
176 
177 	vsi = ice_get_main_vsi(pf);
178 	if (!vsi)
179 		return -EINVAL;
180 
181 	perm_addr = vsi->port_info->mac.perm_addr;
182 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
183 }
184 
185 /**
186  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
187  * @netdev: the net device on which the sync is happening
188  * @addr: MAC address to sync
189  *
190  * This is a callback function which is called by the in kernel device sync
191  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
192  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
193  * MAC filters from the hardware.
194  */
195 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
196 {
197 	struct ice_netdev_priv *np = netdev_priv(netdev);
198 	struct ice_vsi *vsi = np->vsi;
199 
200 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
201 				     ICE_FWD_TO_VSI))
202 		return -EINVAL;
203 
204 	return 0;
205 }
206 
207 /**
208  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
209  * @netdev: the net device on which the unsync is happening
210  * @addr: MAC address to unsync
211  *
212  * This is a callback function which is called by the in kernel device unsync
213  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
214  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
215  * delete the MAC filters from the hardware.
216  */
217 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
218 {
219 	struct ice_netdev_priv *np = netdev_priv(netdev);
220 	struct ice_vsi *vsi = np->vsi;
221 
222 	/* Under some circumstances, we might receive a request to delete our
223 	 * own device address from our uc list. Because we store the device
224 	 * address in the VSI's MAC filter list, we need to ignore such
225 	 * requests and not delete our device address from this list.
226 	 */
227 	if (ether_addr_equal(addr, netdev->dev_addr))
228 		return 0;
229 
230 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
231 				     ICE_FWD_TO_VSI))
232 		return -EINVAL;
233 
234 	return 0;
235 }
236 
237 /**
238  * ice_vsi_fltr_changed - check if filter state changed
239  * @vsi: VSI to be checked
240  *
241  * returns true if filter state has changed, false otherwise.
242  */
243 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
244 {
245 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
246 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
247 }
248 
249 /**
250  * ice_set_promisc - Enable promiscuous mode for a given PF
251  * @vsi: the VSI being configured
252  * @promisc_m: mask of promiscuous config bits
253  *
254  */
255 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
256 {
257 	int status;
258 
259 	if (vsi->type != ICE_VSI_PF)
260 		return 0;
261 
262 	if (ice_vsi_has_non_zero_vlans(vsi)) {
263 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
264 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
265 						       promisc_m);
266 	} else {
267 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
268 						  promisc_m, 0);
269 	}
270 	if (status && status != -EEXIST)
271 		return status;
272 
273 	return 0;
274 }
275 
276 /**
277  * ice_clear_promisc - Disable promiscuous mode for a given PF
278  * @vsi: the VSI being configured
279  * @promisc_m: mask of promiscuous config bits
280  *
281  */
282 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
283 {
284 	int status;
285 
286 	if (vsi->type != ICE_VSI_PF)
287 		return 0;
288 
289 	if (ice_vsi_has_non_zero_vlans(vsi)) {
290 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
291 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
292 							 promisc_m);
293 	} else {
294 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
295 						    promisc_m, 0);
296 	}
297 
298 	return status;
299 }
300 
301 /**
302  * ice_get_devlink_port - Get devlink port from netdev
303  * @netdev: the netdevice structure
304  */
305 static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
306 {
307 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
308 
309 	if (!ice_is_switchdev_running(pf))
310 		return NULL;
311 
312 	return &pf->devlink_port;
313 }
314 
315 /**
316  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
317  * @vsi: ptr to the VSI
318  *
319  * Push any outstanding VSI filter changes through the AdminQ.
320  */
321 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
322 {
323 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
324 	struct device *dev = ice_pf_to_dev(vsi->back);
325 	struct net_device *netdev = vsi->netdev;
326 	bool promisc_forced_on = false;
327 	struct ice_pf *pf = vsi->back;
328 	struct ice_hw *hw = &pf->hw;
329 	u32 changed_flags = 0;
330 	int err;
331 
332 	if (!vsi->netdev)
333 		return -EINVAL;
334 
335 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
336 		usleep_range(1000, 2000);
337 
338 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
339 	vsi->current_netdev_flags = vsi->netdev->flags;
340 
341 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
342 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
343 
344 	if (ice_vsi_fltr_changed(vsi)) {
345 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
346 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
347 
348 		/* grab the netdev's addr_list_lock */
349 		netif_addr_lock_bh(netdev);
350 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
351 			      ice_add_mac_to_unsync_list);
352 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
353 			      ice_add_mac_to_unsync_list);
354 		/* our temp lists are populated. release lock */
355 		netif_addr_unlock_bh(netdev);
356 	}
357 
358 	/* Remove MAC addresses in the unsync list */
359 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
360 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
361 	if (err) {
362 		netdev_err(netdev, "Failed to delete MAC filters\n");
363 		/* if we failed because of alloc failures, just bail */
364 		if (err == -ENOMEM)
365 			goto out;
366 	}
367 
368 	/* Add MAC addresses in the sync list */
369 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
370 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
371 	/* If filter is added successfully or already exists, do not go into
372 	 * 'if' condition and report it as error. Instead continue processing
373 	 * rest of the function.
374 	 */
375 	if (err && err != -EEXIST) {
376 		netdev_err(netdev, "Failed to add MAC filters\n");
377 		/* If there is no more space for new umac filters, VSI
378 		 * should go into promiscuous mode. There should be some
379 		 * space reserved for promiscuous filters.
380 		 */
381 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
382 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
383 				      vsi->state)) {
384 			promisc_forced_on = true;
385 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
386 				    vsi->vsi_num);
387 		} else {
388 			goto out;
389 		}
390 	}
391 	err = 0;
392 	/* check for changes in promiscuous modes */
393 	if (changed_flags & IFF_ALLMULTI) {
394 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
395 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
396 			if (err) {
397 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
398 				goto out_promisc;
399 			}
400 		} else {
401 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
402 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
403 			if (err) {
404 				vsi->current_netdev_flags |= IFF_ALLMULTI;
405 				goto out_promisc;
406 			}
407 		}
408 	}
409 
410 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
411 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
412 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
413 		if (vsi->current_netdev_flags & IFF_PROMISC) {
414 			/* Apply Rx filter rule to get traffic from wire */
415 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
416 				err = ice_set_dflt_vsi(vsi);
417 				if (err && err != -EEXIST) {
418 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
419 						   err, vsi->vsi_num);
420 					vsi->current_netdev_flags &=
421 						~IFF_PROMISC;
422 					goto out_promisc;
423 				}
424 				err = 0;
425 				vlan_ops->dis_rx_filtering(vsi);
426 			}
427 		} else {
428 			/* Clear Rx filter to remove traffic from wire */
429 			if (ice_is_vsi_dflt_vsi(vsi)) {
430 				err = ice_clear_dflt_vsi(vsi);
431 				if (err) {
432 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
433 						   err, vsi->vsi_num);
434 					vsi->current_netdev_flags |=
435 						IFF_PROMISC;
436 					goto out_promisc;
437 				}
438 				if (vsi->netdev->features &
439 				    NETIF_F_HW_VLAN_CTAG_FILTER)
440 					vlan_ops->ena_rx_filtering(vsi);
441 			}
442 		}
443 	}
444 	goto exit;
445 
446 out_promisc:
447 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
448 	goto exit;
449 out:
450 	/* if something went wrong then set the changed flag so we try again */
451 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
452 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
453 exit:
454 	clear_bit(ICE_CFG_BUSY, vsi->state);
455 	return err;
456 }
457 
458 /**
459  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
460  * @pf: board private structure
461  */
462 static void ice_sync_fltr_subtask(struct ice_pf *pf)
463 {
464 	int v;
465 
466 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
467 		return;
468 
469 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
470 
471 	ice_for_each_vsi(pf, v)
472 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
473 		    ice_vsi_sync_fltr(pf->vsi[v])) {
474 			/* come back and try again later */
475 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
476 			break;
477 		}
478 }
479 
480 /**
481  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
482  * @pf: the PF
483  * @locked: is the rtnl_lock already held
484  */
485 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
486 {
487 	int node;
488 	int v;
489 
490 	ice_for_each_vsi(pf, v)
491 		if (pf->vsi[v])
492 			ice_dis_vsi(pf->vsi[v], locked);
493 
494 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
495 		pf->pf_agg_node[node].num_vsis = 0;
496 
497 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
498 		pf->vf_agg_node[node].num_vsis = 0;
499 }
500 
501 /**
502  * ice_clear_sw_switch_recipes - clear switch recipes
503  * @pf: board private structure
504  *
505  * Mark switch recipes as not created in sw structures. There are cases where
506  * rules (especially advanced rules) need to be restored, either re-read from
507  * hardware or added again. For example after the reset. 'recp_created' flag
508  * prevents from doing that and need to be cleared upfront.
509  */
510 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
511 {
512 	struct ice_sw_recipe *recp;
513 	u8 i;
514 
515 	recp = pf->hw.switch_info->recp_list;
516 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
517 		recp[i].recp_created = false;
518 }
519 
520 /**
521  * ice_prepare_for_reset - prep for reset
522  * @pf: board private structure
523  * @reset_type: reset type requested
524  *
525  * Inform or close all dependent features in prep for reset.
526  */
527 static void
528 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
529 {
530 	struct ice_hw *hw = &pf->hw;
531 	struct ice_vsi *vsi;
532 	struct ice_vf *vf;
533 	unsigned int bkt;
534 
535 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
536 
537 	/* already prepared for reset */
538 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
539 		return;
540 
541 	ice_unplug_aux_dev(pf);
542 
543 	/* Notify VFs of impending reset */
544 	if (ice_check_sq_alive(hw, &hw->mailboxq))
545 		ice_vc_notify_reset(pf);
546 
547 	/* Disable VFs until reset is completed */
548 	mutex_lock(&pf->vfs.table_lock);
549 	ice_for_each_vf(pf, bkt, vf)
550 		ice_set_vf_state_qs_dis(vf);
551 	mutex_unlock(&pf->vfs.table_lock);
552 
553 	if (ice_is_eswitch_mode_switchdev(pf)) {
554 		if (reset_type != ICE_RESET_PFR)
555 			ice_clear_sw_switch_recipes(pf);
556 	}
557 
558 	/* release ADQ specific HW and SW resources */
559 	vsi = ice_get_main_vsi(pf);
560 	if (!vsi)
561 		goto skip;
562 
563 	/* to be on safe side, reset orig_rss_size so that normal flow
564 	 * of deciding rss_size can take precedence
565 	 */
566 	vsi->orig_rss_size = 0;
567 
568 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
569 		if (reset_type == ICE_RESET_PFR) {
570 			vsi->old_ena_tc = vsi->all_enatc;
571 			vsi->old_numtc = vsi->all_numtc;
572 		} else {
573 			ice_remove_q_channels(vsi, true);
574 
575 			/* for other reset type, do not support channel rebuild
576 			 * hence reset needed info
577 			 */
578 			vsi->old_ena_tc = 0;
579 			vsi->all_enatc = 0;
580 			vsi->old_numtc = 0;
581 			vsi->all_numtc = 0;
582 			vsi->req_txq = 0;
583 			vsi->req_rxq = 0;
584 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
585 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
586 		}
587 	}
588 skip:
589 
590 	/* clear SW filtering DB */
591 	ice_clear_hw_tbls(hw);
592 	/* disable the VSIs and their queues that are not already DOWN */
593 	ice_pf_dis_all_vsi(pf, false);
594 
595 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
596 		ice_ptp_prepare_for_reset(pf);
597 
598 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
599 		ice_gnss_exit(pf);
600 
601 	if (hw->port_info)
602 		ice_sched_clear_port(hw->port_info);
603 
604 	ice_shutdown_all_ctrlq(hw);
605 
606 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
607 }
608 
609 /**
610  * ice_do_reset - Initiate one of many types of resets
611  * @pf: board private structure
612  * @reset_type: reset type requested before this function was called.
613  */
614 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
615 {
616 	struct device *dev = ice_pf_to_dev(pf);
617 	struct ice_hw *hw = &pf->hw;
618 
619 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
620 
621 	ice_prepare_for_reset(pf, reset_type);
622 
623 	/* trigger the reset */
624 	if (ice_reset(hw, reset_type)) {
625 		dev_err(dev, "reset %d failed\n", reset_type);
626 		set_bit(ICE_RESET_FAILED, pf->state);
627 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
628 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
629 		clear_bit(ICE_PFR_REQ, pf->state);
630 		clear_bit(ICE_CORER_REQ, pf->state);
631 		clear_bit(ICE_GLOBR_REQ, pf->state);
632 		wake_up(&pf->reset_wait_queue);
633 		return;
634 	}
635 
636 	/* PFR is a bit of a special case because it doesn't result in an OICR
637 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
638 	 * associated state bits.
639 	 */
640 	if (reset_type == ICE_RESET_PFR) {
641 		pf->pfr_count++;
642 		ice_rebuild(pf, reset_type);
643 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
644 		clear_bit(ICE_PFR_REQ, pf->state);
645 		wake_up(&pf->reset_wait_queue);
646 		ice_reset_all_vfs(pf);
647 	}
648 }
649 
650 /**
651  * ice_reset_subtask - Set up for resetting the device and driver
652  * @pf: board private structure
653  */
654 static void ice_reset_subtask(struct ice_pf *pf)
655 {
656 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
657 
658 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
659 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
660 	 * of reset is pending and sets bits in pf->state indicating the reset
661 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
662 	 * prepare for pending reset if not already (for PF software-initiated
663 	 * global resets the software should already be prepared for it as
664 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
665 	 * by firmware or software on other PFs, that bit is not set so prepare
666 	 * for the reset now), poll for reset done, rebuild and return.
667 	 */
668 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
669 		/* Perform the largest reset requested */
670 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
671 			reset_type = ICE_RESET_CORER;
672 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
673 			reset_type = ICE_RESET_GLOBR;
674 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
675 			reset_type = ICE_RESET_EMPR;
676 		/* return if no valid reset type requested */
677 		if (reset_type == ICE_RESET_INVAL)
678 			return;
679 		ice_prepare_for_reset(pf, reset_type);
680 
681 		/* make sure we are ready to rebuild */
682 		if (ice_check_reset(&pf->hw)) {
683 			set_bit(ICE_RESET_FAILED, pf->state);
684 		} else {
685 			/* done with reset. start rebuild */
686 			pf->hw.reset_ongoing = false;
687 			ice_rebuild(pf, reset_type);
688 			/* clear bit to resume normal operations, but
689 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
690 			 */
691 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
692 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
693 			clear_bit(ICE_PFR_REQ, pf->state);
694 			clear_bit(ICE_CORER_REQ, pf->state);
695 			clear_bit(ICE_GLOBR_REQ, pf->state);
696 			wake_up(&pf->reset_wait_queue);
697 			ice_reset_all_vfs(pf);
698 		}
699 
700 		return;
701 	}
702 
703 	/* No pending resets to finish processing. Check for new resets */
704 	if (test_bit(ICE_PFR_REQ, pf->state))
705 		reset_type = ICE_RESET_PFR;
706 	if (test_bit(ICE_CORER_REQ, pf->state))
707 		reset_type = ICE_RESET_CORER;
708 	if (test_bit(ICE_GLOBR_REQ, pf->state))
709 		reset_type = ICE_RESET_GLOBR;
710 	/* If no valid reset type requested just return */
711 	if (reset_type == ICE_RESET_INVAL)
712 		return;
713 
714 	/* reset if not already down or busy */
715 	if (!test_bit(ICE_DOWN, pf->state) &&
716 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
717 		ice_do_reset(pf, reset_type);
718 	}
719 }
720 
721 /**
722  * ice_print_topo_conflict - print topology conflict message
723  * @vsi: the VSI whose topology status is being checked
724  */
725 static void ice_print_topo_conflict(struct ice_vsi *vsi)
726 {
727 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
728 	case ICE_AQ_LINK_TOPO_CONFLICT:
729 	case ICE_AQ_LINK_MEDIA_CONFLICT:
730 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
731 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
732 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
733 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
734 		break;
735 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
736 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
737 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
738 		else
739 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
740 		break;
741 	default:
742 		break;
743 	}
744 }
745 
746 /**
747  * ice_print_link_msg - print link up or down message
748  * @vsi: the VSI whose link status is being queried
749  * @isup: boolean for if the link is now up or down
750  */
751 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
752 {
753 	struct ice_aqc_get_phy_caps_data *caps;
754 	const char *an_advertised;
755 	const char *fec_req;
756 	const char *speed;
757 	const char *fec;
758 	const char *fc;
759 	const char *an;
760 	int status;
761 
762 	if (!vsi)
763 		return;
764 
765 	if (vsi->current_isup == isup)
766 		return;
767 
768 	vsi->current_isup = isup;
769 
770 	if (!isup) {
771 		netdev_info(vsi->netdev, "NIC Link is Down\n");
772 		return;
773 	}
774 
775 	switch (vsi->port_info->phy.link_info.link_speed) {
776 	case ICE_AQ_LINK_SPEED_100GB:
777 		speed = "100 G";
778 		break;
779 	case ICE_AQ_LINK_SPEED_50GB:
780 		speed = "50 G";
781 		break;
782 	case ICE_AQ_LINK_SPEED_40GB:
783 		speed = "40 G";
784 		break;
785 	case ICE_AQ_LINK_SPEED_25GB:
786 		speed = "25 G";
787 		break;
788 	case ICE_AQ_LINK_SPEED_20GB:
789 		speed = "20 G";
790 		break;
791 	case ICE_AQ_LINK_SPEED_10GB:
792 		speed = "10 G";
793 		break;
794 	case ICE_AQ_LINK_SPEED_5GB:
795 		speed = "5 G";
796 		break;
797 	case ICE_AQ_LINK_SPEED_2500MB:
798 		speed = "2.5 G";
799 		break;
800 	case ICE_AQ_LINK_SPEED_1000MB:
801 		speed = "1 G";
802 		break;
803 	case ICE_AQ_LINK_SPEED_100MB:
804 		speed = "100 M";
805 		break;
806 	default:
807 		speed = "Unknown ";
808 		break;
809 	}
810 
811 	switch (vsi->port_info->fc.current_mode) {
812 	case ICE_FC_FULL:
813 		fc = "Rx/Tx";
814 		break;
815 	case ICE_FC_TX_PAUSE:
816 		fc = "Tx";
817 		break;
818 	case ICE_FC_RX_PAUSE:
819 		fc = "Rx";
820 		break;
821 	case ICE_FC_NONE:
822 		fc = "None";
823 		break;
824 	default:
825 		fc = "Unknown";
826 		break;
827 	}
828 
829 	/* Get FEC mode based on negotiated link info */
830 	switch (vsi->port_info->phy.link_info.fec_info) {
831 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
832 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
833 		fec = "RS-FEC";
834 		break;
835 	case ICE_AQ_LINK_25G_KR_FEC_EN:
836 		fec = "FC-FEC/BASE-R";
837 		break;
838 	default:
839 		fec = "NONE";
840 		break;
841 	}
842 
843 	/* check if autoneg completed, might be false due to not supported */
844 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
845 		an = "True";
846 	else
847 		an = "False";
848 
849 	/* Get FEC mode requested based on PHY caps last SW configuration */
850 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
851 	if (!caps) {
852 		fec_req = "Unknown";
853 		an_advertised = "Unknown";
854 		goto done;
855 	}
856 
857 	status = ice_aq_get_phy_caps(vsi->port_info, false,
858 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
859 	if (status)
860 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
861 
862 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
863 
864 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
865 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
866 		fec_req = "RS-FEC";
867 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
868 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
869 		fec_req = "FC-FEC/BASE-R";
870 	else
871 		fec_req = "NONE";
872 
873 	kfree(caps);
874 
875 done:
876 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
877 		    speed, fec_req, fec, an_advertised, an, fc);
878 	ice_print_topo_conflict(vsi);
879 }
880 
881 /**
882  * ice_vsi_link_event - update the VSI's netdev
883  * @vsi: the VSI on which the link event occurred
884  * @link_up: whether or not the VSI needs to be set up or down
885  */
886 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
887 {
888 	if (!vsi)
889 		return;
890 
891 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
892 		return;
893 
894 	if (vsi->type == ICE_VSI_PF) {
895 		if (link_up == netif_carrier_ok(vsi->netdev))
896 			return;
897 
898 		if (link_up) {
899 			netif_carrier_on(vsi->netdev);
900 			netif_tx_wake_all_queues(vsi->netdev);
901 		} else {
902 			netif_carrier_off(vsi->netdev);
903 			netif_tx_stop_all_queues(vsi->netdev);
904 		}
905 	}
906 }
907 
908 /**
909  * ice_set_dflt_mib - send a default config MIB to the FW
910  * @pf: private PF struct
911  *
912  * This function sends a default configuration MIB to the FW.
913  *
914  * If this function errors out at any point, the driver is still able to
915  * function.  The main impact is that LFC may not operate as expected.
916  * Therefore an error state in this function should be treated with a DBG
917  * message and continue on with driver rebuild/reenable.
918  */
919 static void ice_set_dflt_mib(struct ice_pf *pf)
920 {
921 	struct device *dev = ice_pf_to_dev(pf);
922 	u8 mib_type, *buf, *lldpmib = NULL;
923 	u16 len, typelen, offset = 0;
924 	struct ice_lldp_org_tlv *tlv;
925 	struct ice_hw *hw = &pf->hw;
926 	u32 ouisubtype;
927 
928 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
929 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
930 	if (!lldpmib) {
931 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
932 			__func__);
933 		return;
934 	}
935 
936 	/* Add ETS CFG TLV */
937 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
938 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
939 		   ICE_IEEE_ETS_TLV_LEN);
940 	tlv->typelen = htons(typelen);
941 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
942 		      ICE_IEEE_SUBTYPE_ETS_CFG);
943 	tlv->ouisubtype = htonl(ouisubtype);
944 
945 	buf = tlv->tlvinfo;
946 	buf[0] = 0;
947 
948 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
949 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
950 	 * Octets 13 - 20 are TSA values - leave as zeros
951 	 */
952 	buf[5] = 0x64;
953 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
954 	offset += len + 2;
955 	tlv = (struct ice_lldp_org_tlv *)
956 		((char *)tlv + sizeof(tlv->typelen) + len);
957 
958 	/* Add ETS REC TLV */
959 	buf = tlv->tlvinfo;
960 	tlv->typelen = htons(typelen);
961 
962 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
963 		      ICE_IEEE_SUBTYPE_ETS_REC);
964 	tlv->ouisubtype = htonl(ouisubtype);
965 
966 	/* First octet of buf is reserved
967 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
968 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
969 	 * Octets 13 - 20 are TSA value - leave as zeros
970 	 */
971 	buf[5] = 0x64;
972 	offset += len + 2;
973 	tlv = (struct ice_lldp_org_tlv *)
974 		((char *)tlv + sizeof(tlv->typelen) + len);
975 
976 	/* Add PFC CFG TLV */
977 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
978 		   ICE_IEEE_PFC_TLV_LEN);
979 	tlv->typelen = htons(typelen);
980 
981 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
982 		      ICE_IEEE_SUBTYPE_PFC_CFG);
983 	tlv->ouisubtype = htonl(ouisubtype);
984 
985 	/* Octet 1 left as all zeros - PFC disabled */
986 	buf[0] = 0x08;
987 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
988 	offset += len + 2;
989 
990 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
991 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
992 
993 	kfree(lldpmib);
994 }
995 
996 /**
997  * ice_check_phy_fw_load - check if PHY FW load failed
998  * @pf: pointer to PF struct
999  * @link_cfg_err: bitmap from the link info structure
1000  *
1001  * check if external PHY FW load failed and print an error message if it did
1002  */
1003 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1004 {
1005 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1006 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1007 		return;
1008 	}
1009 
1010 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1011 		return;
1012 
1013 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1014 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1015 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1016 	}
1017 }
1018 
1019 /**
1020  * ice_check_module_power
1021  * @pf: pointer to PF struct
1022  * @link_cfg_err: bitmap from the link info structure
1023  *
1024  * check module power level returned by a previous call to aq_get_link_info
1025  * and print error messages if module power level is not supported
1026  */
1027 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1028 {
1029 	/* if module power level is supported, clear the flag */
1030 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1031 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1032 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1033 		return;
1034 	}
1035 
1036 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1037 	 * above block didn't clear this bit, there's nothing to do
1038 	 */
1039 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1040 		return;
1041 
1042 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1043 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1044 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1045 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1046 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1047 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1048 	}
1049 }
1050 
1051 /**
1052  * ice_check_link_cfg_err - check if link configuration failed
1053  * @pf: pointer to the PF struct
1054  * @link_cfg_err: bitmap from the link info structure
1055  *
1056  * print if any link configuration failure happens due to the value in the
1057  * link_cfg_err parameter in the link info structure
1058  */
1059 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1060 {
1061 	ice_check_module_power(pf, link_cfg_err);
1062 	ice_check_phy_fw_load(pf, link_cfg_err);
1063 }
1064 
1065 /**
1066  * ice_link_event - process the link event
1067  * @pf: PF that the link event is associated with
1068  * @pi: port_info for the port that the link event is associated with
1069  * @link_up: true if the physical link is up and false if it is down
1070  * @link_speed: current link speed received from the link event
1071  *
1072  * Returns 0 on success and negative on failure
1073  */
1074 static int
1075 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1076 	       u16 link_speed)
1077 {
1078 	struct device *dev = ice_pf_to_dev(pf);
1079 	struct ice_phy_info *phy_info;
1080 	struct ice_vsi *vsi;
1081 	u16 old_link_speed;
1082 	bool old_link;
1083 	int status;
1084 
1085 	phy_info = &pi->phy;
1086 	phy_info->link_info_old = phy_info->link_info;
1087 
1088 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1089 	old_link_speed = phy_info->link_info_old.link_speed;
1090 
1091 	/* update the link info structures and re-enable link events,
1092 	 * don't bail on failure due to other book keeping needed
1093 	 */
1094 	status = ice_update_link_info(pi);
1095 	if (status)
1096 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1097 			pi->lport, status,
1098 			ice_aq_str(pi->hw->adminq.sq_last_status));
1099 
1100 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1101 
1102 	/* Check if the link state is up after updating link info, and treat
1103 	 * this event as an UP event since the link is actually UP now.
1104 	 */
1105 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1106 		link_up = true;
1107 
1108 	vsi = ice_get_main_vsi(pf);
1109 	if (!vsi || !vsi->port_info)
1110 		return -EINVAL;
1111 
1112 	/* turn off PHY if media was removed */
1113 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1114 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1115 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1116 		ice_set_link(vsi, false);
1117 	}
1118 
1119 	/* if the old link up/down and speed is the same as the new */
1120 	if (link_up == old_link && link_speed == old_link_speed)
1121 		return 0;
1122 
1123 	if (!ice_is_e810(&pf->hw))
1124 		ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1125 
1126 	if (ice_is_dcb_active(pf)) {
1127 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1128 			ice_dcb_rebuild(pf);
1129 	} else {
1130 		if (link_up)
1131 			ice_set_dflt_mib(pf);
1132 	}
1133 	ice_vsi_link_event(vsi, link_up);
1134 	ice_print_link_msg(vsi, link_up);
1135 
1136 	ice_vc_notify_link_state(pf);
1137 
1138 	return 0;
1139 }
1140 
1141 /**
1142  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1143  * @pf: board private structure
1144  */
1145 static void ice_watchdog_subtask(struct ice_pf *pf)
1146 {
1147 	int i;
1148 
1149 	/* if interface is down do nothing */
1150 	if (test_bit(ICE_DOWN, pf->state) ||
1151 	    test_bit(ICE_CFG_BUSY, pf->state))
1152 		return;
1153 
1154 	/* make sure we don't do these things too often */
1155 	if (time_before(jiffies,
1156 			pf->serv_tmr_prev + pf->serv_tmr_period))
1157 		return;
1158 
1159 	pf->serv_tmr_prev = jiffies;
1160 
1161 	/* Update the stats for active netdevs so the network stack
1162 	 * can look at updated numbers whenever it cares to
1163 	 */
1164 	ice_update_pf_stats(pf);
1165 	ice_for_each_vsi(pf, i)
1166 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1167 			ice_update_vsi_stats(pf->vsi[i]);
1168 }
1169 
1170 /**
1171  * ice_init_link_events - enable/initialize link events
1172  * @pi: pointer to the port_info instance
1173  *
1174  * Returns -EIO on failure, 0 on success
1175  */
1176 static int ice_init_link_events(struct ice_port_info *pi)
1177 {
1178 	u16 mask;
1179 
1180 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1181 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1182 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1183 
1184 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1185 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1186 			pi->lport);
1187 		return -EIO;
1188 	}
1189 
1190 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1191 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1192 			pi->lport);
1193 		return -EIO;
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 /**
1200  * ice_handle_link_event - handle link event via ARQ
1201  * @pf: PF that the link event is associated with
1202  * @event: event structure containing link status info
1203  */
1204 static int
1205 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1206 {
1207 	struct ice_aqc_get_link_status_data *link_data;
1208 	struct ice_port_info *port_info;
1209 	int status;
1210 
1211 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1212 	port_info = pf->hw.port_info;
1213 	if (!port_info)
1214 		return -EINVAL;
1215 
1216 	status = ice_link_event(pf, port_info,
1217 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1218 				le16_to_cpu(link_data->link_speed));
1219 	if (status)
1220 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1221 			status);
1222 
1223 	return status;
1224 }
1225 
1226 enum ice_aq_task_state {
1227 	ICE_AQ_TASK_WAITING = 0,
1228 	ICE_AQ_TASK_COMPLETE,
1229 	ICE_AQ_TASK_CANCELED,
1230 };
1231 
1232 struct ice_aq_task {
1233 	struct hlist_node entry;
1234 
1235 	u16 opcode;
1236 	struct ice_rq_event_info *event;
1237 	enum ice_aq_task_state state;
1238 };
1239 
1240 /**
1241  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1242  * @pf: pointer to the PF private structure
1243  * @opcode: the opcode to wait for
1244  * @timeout: how long to wait, in jiffies
1245  * @event: storage for the event info
1246  *
1247  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1248  * current thread will be put to sleep until the specified event occurs or
1249  * until the given timeout is reached.
1250  *
1251  * To obtain only the descriptor contents, pass an event without an allocated
1252  * msg_buf. If the complete data buffer is desired, allocate the
1253  * event->msg_buf with enough space ahead of time.
1254  *
1255  * Returns: zero on success, or a negative error code on failure.
1256  */
1257 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1258 			  struct ice_rq_event_info *event)
1259 {
1260 	struct device *dev = ice_pf_to_dev(pf);
1261 	struct ice_aq_task *task;
1262 	unsigned long start;
1263 	long ret;
1264 	int err;
1265 
1266 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1267 	if (!task)
1268 		return -ENOMEM;
1269 
1270 	INIT_HLIST_NODE(&task->entry);
1271 	task->opcode = opcode;
1272 	task->event = event;
1273 	task->state = ICE_AQ_TASK_WAITING;
1274 
1275 	spin_lock_bh(&pf->aq_wait_lock);
1276 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1277 	spin_unlock_bh(&pf->aq_wait_lock);
1278 
1279 	start = jiffies;
1280 
1281 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1282 					       timeout);
1283 	switch (task->state) {
1284 	case ICE_AQ_TASK_WAITING:
1285 		err = ret < 0 ? ret : -ETIMEDOUT;
1286 		break;
1287 	case ICE_AQ_TASK_CANCELED:
1288 		err = ret < 0 ? ret : -ECANCELED;
1289 		break;
1290 	case ICE_AQ_TASK_COMPLETE:
1291 		err = ret < 0 ? ret : 0;
1292 		break;
1293 	default:
1294 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1295 		err = -EINVAL;
1296 		break;
1297 	}
1298 
1299 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1300 		jiffies_to_msecs(jiffies - start),
1301 		jiffies_to_msecs(timeout),
1302 		opcode);
1303 
1304 	spin_lock_bh(&pf->aq_wait_lock);
1305 	hlist_del(&task->entry);
1306 	spin_unlock_bh(&pf->aq_wait_lock);
1307 	kfree(task);
1308 
1309 	return err;
1310 }
1311 
1312 /**
1313  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1314  * @pf: pointer to the PF private structure
1315  * @opcode: the opcode of the event
1316  * @event: the event to check
1317  *
1318  * Loops over the current list of pending threads waiting for an AdminQ event.
1319  * For each matching task, copy the contents of the event into the task
1320  * structure and wake up the thread.
1321  *
1322  * If multiple threads wait for the same opcode, they will all be woken up.
1323  *
1324  * Note that event->msg_buf will only be duplicated if the event has a buffer
1325  * with enough space already allocated. Otherwise, only the descriptor and
1326  * message length will be copied.
1327  *
1328  * Returns: true if an event was found, false otherwise
1329  */
1330 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1331 				struct ice_rq_event_info *event)
1332 {
1333 	struct ice_aq_task *task;
1334 	bool found = false;
1335 
1336 	spin_lock_bh(&pf->aq_wait_lock);
1337 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1338 		if (task->state || task->opcode != opcode)
1339 			continue;
1340 
1341 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1342 		task->event->msg_len = event->msg_len;
1343 
1344 		/* Only copy the data buffer if a destination was set */
1345 		if (task->event->msg_buf &&
1346 		    task->event->buf_len > event->buf_len) {
1347 			memcpy(task->event->msg_buf, event->msg_buf,
1348 			       event->buf_len);
1349 			task->event->buf_len = event->buf_len;
1350 		}
1351 
1352 		task->state = ICE_AQ_TASK_COMPLETE;
1353 		found = true;
1354 	}
1355 	spin_unlock_bh(&pf->aq_wait_lock);
1356 
1357 	if (found)
1358 		wake_up(&pf->aq_wait_queue);
1359 }
1360 
1361 /**
1362  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1363  * @pf: the PF private structure
1364  *
1365  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1366  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1367  */
1368 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1369 {
1370 	struct ice_aq_task *task;
1371 
1372 	spin_lock_bh(&pf->aq_wait_lock);
1373 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1374 		task->state = ICE_AQ_TASK_CANCELED;
1375 	spin_unlock_bh(&pf->aq_wait_lock);
1376 
1377 	wake_up(&pf->aq_wait_queue);
1378 }
1379 
1380 /**
1381  * __ice_clean_ctrlq - helper function to clean controlq rings
1382  * @pf: ptr to struct ice_pf
1383  * @q_type: specific Control queue type
1384  */
1385 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1386 {
1387 	struct device *dev = ice_pf_to_dev(pf);
1388 	struct ice_rq_event_info event;
1389 	struct ice_hw *hw = &pf->hw;
1390 	struct ice_ctl_q_info *cq;
1391 	u16 pending, i = 0;
1392 	const char *qtype;
1393 	u32 oldval, val;
1394 
1395 	/* Do not clean control queue if/when PF reset fails */
1396 	if (test_bit(ICE_RESET_FAILED, pf->state))
1397 		return 0;
1398 
1399 	switch (q_type) {
1400 	case ICE_CTL_Q_ADMIN:
1401 		cq = &hw->adminq;
1402 		qtype = "Admin";
1403 		break;
1404 	case ICE_CTL_Q_SB:
1405 		cq = &hw->sbq;
1406 		qtype = "Sideband";
1407 		break;
1408 	case ICE_CTL_Q_MAILBOX:
1409 		cq = &hw->mailboxq;
1410 		qtype = "Mailbox";
1411 		/* we are going to try to detect a malicious VF, so set the
1412 		 * state to begin detection
1413 		 */
1414 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1415 		break;
1416 	default:
1417 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1418 		return 0;
1419 	}
1420 
1421 	/* check for error indications - PF_xx_AxQLEN register layout for
1422 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1423 	 */
1424 	val = rd32(hw, cq->rq.len);
1425 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1426 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1427 		oldval = val;
1428 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1429 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1430 				qtype);
1431 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1432 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1433 				qtype);
1434 		}
1435 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1436 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1437 				qtype);
1438 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1439 			 PF_FW_ARQLEN_ARQCRIT_M);
1440 		if (oldval != val)
1441 			wr32(hw, cq->rq.len, val);
1442 	}
1443 
1444 	val = rd32(hw, cq->sq.len);
1445 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1446 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1447 		oldval = val;
1448 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1449 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1450 				qtype);
1451 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1452 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1453 				qtype);
1454 		}
1455 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1456 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1457 				qtype);
1458 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1459 			 PF_FW_ATQLEN_ATQCRIT_M);
1460 		if (oldval != val)
1461 			wr32(hw, cq->sq.len, val);
1462 	}
1463 
1464 	event.buf_len = cq->rq_buf_size;
1465 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1466 	if (!event.msg_buf)
1467 		return 0;
1468 
1469 	do {
1470 		u16 opcode;
1471 		int ret;
1472 
1473 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1474 		if (ret == -EALREADY)
1475 			break;
1476 		if (ret) {
1477 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1478 				ret);
1479 			break;
1480 		}
1481 
1482 		opcode = le16_to_cpu(event.desc.opcode);
1483 
1484 		/* Notify any thread that might be waiting for this event */
1485 		ice_aq_check_events(pf, opcode, &event);
1486 
1487 		switch (opcode) {
1488 		case ice_aqc_opc_get_link_status:
1489 			if (ice_handle_link_event(pf, &event))
1490 				dev_err(dev, "Could not handle link event\n");
1491 			break;
1492 		case ice_aqc_opc_event_lan_overflow:
1493 			ice_vf_lan_overflow_event(pf, &event);
1494 			break;
1495 		case ice_mbx_opc_send_msg_to_pf:
1496 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1497 				ice_vc_process_vf_msg(pf, &event);
1498 			break;
1499 		case ice_aqc_opc_fw_logging:
1500 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1501 			break;
1502 		case ice_aqc_opc_lldp_set_mib_change:
1503 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1504 			break;
1505 		default:
1506 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1507 				qtype, opcode);
1508 			break;
1509 		}
1510 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1511 
1512 	kfree(event.msg_buf);
1513 
1514 	return pending && (i == ICE_DFLT_IRQ_WORK);
1515 }
1516 
1517 /**
1518  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1519  * @hw: pointer to hardware info
1520  * @cq: control queue information
1521  *
1522  * returns true if there are pending messages in a queue, false if there aren't
1523  */
1524 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1525 {
1526 	u16 ntu;
1527 
1528 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1529 	return cq->rq.next_to_clean != ntu;
1530 }
1531 
1532 /**
1533  * ice_clean_adminq_subtask - clean the AdminQ rings
1534  * @pf: board private structure
1535  */
1536 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1537 {
1538 	struct ice_hw *hw = &pf->hw;
1539 
1540 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1541 		return;
1542 
1543 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1544 		return;
1545 
1546 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1547 
1548 	/* There might be a situation where new messages arrive to a control
1549 	 * queue between processing the last message and clearing the
1550 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1551 	 * ice_ctrlq_pending) and process new messages if any.
1552 	 */
1553 	if (ice_ctrlq_pending(hw, &hw->adminq))
1554 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1555 
1556 	ice_flush(hw);
1557 }
1558 
1559 /**
1560  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1561  * @pf: board private structure
1562  */
1563 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1564 {
1565 	struct ice_hw *hw = &pf->hw;
1566 
1567 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1568 		return;
1569 
1570 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1571 		return;
1572 
1573 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1574 
1575 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1576 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1577 
1578 	ice_flush(hw);
1579 }
1580 
1581 /**
1582  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1583  * @pf: board private structure
1584  */
1585 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1586 {
1587 	struct ice_hw *hw = &pf->hw;
1588 
1589 	/* Nothing to do here if sideband queue is not supported */
1590 	if (!ice_is_sbq_supported(hw)) {
1591 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1592 		return;
1593 	}
1594 
1595 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1596 		return;
1597 
1598 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1599 		return;
1600 
1601 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1602 
1603 	if (ice_ctrlq_pending(hw, &hw->sbq))
1604 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1605 
1606 	ice_flush(hw);
1607 }
1608 
1609 /**
1610  * ice_service_task_schedule - schedule the service task to wake up
1611  * @pf: board private structure
1612  *
1613  * If not already scheduled, this puts the task into the work queue.
1614  */
1615 void ice_service_task_schedule(struct ice_pf *pf)
1616 {
1617 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1618 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1619 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1620 		queue_work(ice_wq, &pf->serv_task);
1621 }
1622 
1623 /**
1624  * ice_service_task_complete - finish up the service task
1625  * @pf: board private structure
1626  */
1627 static void ice_service_task_complete(struct ice_pf *pf)
1628 {
1629 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1630 
1631 	/* force memory (pf->state) to sync before next service task */
1632 	smp_mb__before_atomic();
1633 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1634 }
1635 
1636 /**
1637  * ice_service_task_stop - stop service task and cancel works
1638  * @pf: board private structure
1639  *
1640  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1641  * 1 otherwise.
1642  */
1643 static int ice_service_task_stop(struct ice_pf *pf)
1644 {
1645 	int ret;
1646 
1647 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1648 
1649 	if (pf->serv_tmr.function)
1650 		del_timer_sync(&pf->serv_tmr);
1651 	if (pf->serv_task.func)
1652 		cancel_work_sync(&pf->serv_task);
1653 
1654 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1655 	return ret;
1656 }
1657 
1658 /**
1659  * ice_service_task_restart - restart service task and schedule works
1660  * @pf: board private structure
1661  *
1662  * This function is needed for suspend and resume works (e.g WoL scenario)
1663  */
1664 static void ice_service_task_restart(struct ice_pf *pf)
1665 {
1666 	clear_bit(ICE_SERVICE_DIS, pf->state);
1667 	ice_service_task_schedule(pf);
1668 }
1669 
1670 /**
1671  * ice_service_timer - timer callback to schedule service task
1672  * @t: pointer to timer_list
1673  */
1674 static void ice_service_timer(struct timer_list *t)
1675 {
1676 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1677 
1678 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1679 	ice_service_task_schedule(pf);
1680 }
1681 
1682 /**
1683  * ice_handle_mdd_event - handle malicious driver detect event
1684  * @pf: pointer to the PF structure
1685  *
1686  * Called from service task. OICR interrupt handler indicates MDD event.
1687  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1688  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1689  * disable the queue, the PF can be configured to reset the VF using ethtool
1690  * private flag mdd-auto-reset-vf.
1691  */
1692 static void ice_handle_mdd_event(struct ice_pf *pf)
1693 {
1694 	struct device *dev = ice_pf_to_dev(pf);
1695 	struct ice_hw *hw = &pf->hw;
1696 	struct ice_vf *vf;
1697 	unsigned int bkt;
1698 	u32 reg;
1699 
1700 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1701 		/* Since the VF MDD event logging is rate limited, check if
1702 		 * there are pending MDD events.
1703 		 */
1704 		ice_print_vfs_mdd_events(pf);
1705 		return;
1706 	}
1707 
1708 	/* find what triggered an MDD event */
1709 	reg = rd32(hw, GL_MDET_TX_PQM);
1710 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1711 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1712 				GL_MDET_TX_PQM_PF_NUM_S;
1713 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1714 				GL_MDET_TX_PQM_VF_NUM_S;
1715 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1716 				GL_MDET_TX_PQM_MAL_TYPE_S;
1717 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1718 				GL_MDET_TX_PQM_QNUM_S);
1719 
1720 		if (netif_msg_tx_err(pf))
1721 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1722 				 event, queue, pf_num, vf_num);
1723 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1724 	}
1725 
1726 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1727 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1728 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1729 				GL_MDET_TX_TCLAN_PF_NUM_S;
1730 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1731 				GL_MDET_TX_TCLAN_VF_NUM_S;
1732 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1733 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1734 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1735 				GL_MDET_TX_TCLAN_QNUM_S);
1736 
1737 		if (netif_msg_tx_err(pf))
1738 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1739 				 event, queue, pf_num, vf_num);
1740 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1741 	}
1742 
1743 	reg = rd32(hw, GL_MDET_RX);
1744 	if (reg & GL_MDET_RX_VALID_M) {
1745 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1746 				GL_MDET_RX_PF_NUM_S;
1747 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1748 				GL_MDET_RX_VF_NUM_S;
1749 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1750 				GL_MDET_RX_MAL_TYPE_S;
1751 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1752 				GL_MDET_RX_QNUM_S);
1753 
1754 		if (netif_msg_rx_err(pf))
1755 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1756 				 event, queue, pf_num, vf_num);
1757 		wr32(hw, GL_MDET_RX, 0xffffffff);
1758 	}
1759 
1760 	/* check to see if this PF caused an MDD event */
1761 	reg = rd32(hw, PF_MDET_TX_PQM);
1762 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1763 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1764 		if (netif_msg_tx_err(pf))
1765 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1766 	}
1767 
1768 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1769 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1770 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1771 		if (netif_msg_tx_err(pf))
1772 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1773 	}
1774 
1775 	reg = rd32(hw, PF_MDET_RX);
1776 	if (reg & PF_MDET_RX_VALID_M) {
1777 		wr32(hw, PF_MDET_RX, 0xFFFF);
1778 		if (netif_msg_rx_err(pf))
1779 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1780 	}
1781 
1782 	/* Check to see if one of the VFs caused an MDD event, and then
1783 	 * increment counters and set print pending
1784 	 */
1785 	mutex_lock(&pf->vfs.table_lock);
1786 	ice_for_each_vf(pf, bkt, vf) {
1787 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1788 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1789 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1790 			vf->mdd_tx_events.count++;
1791 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1792 			if (netif_msg_tx_err(pf))
1793 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1794 					 vf->vf_id);
1795 		}
1796 
1797 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1798 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1799 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1800 			vf->mdd_tx_events.count++;
1801 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1802 			if (netif_msg_tx_err(pf))
1803 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1804 					 vf->vf_id);
1805 		}
1806 
1807 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1808 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1809 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1810 			vf->mdd_tx_events.count++;
1811 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1812 			if (netif_msg_tx_err(pf))
1813 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1814 					 vf->vf_id);
1815 		}
1816 
1817 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1818 		if (reg & VP_MDET_RX_VALID_M) {
1819 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1820 			vf->mdd_rx_events.count++;
1821 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1822 			if (netif_msg_rx_err(pf))
1823 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1824 					 vf->vf_id);
1825 
1826 			/* Since the queue is disabled on VF Rx MDD events, the
1827 			 * PF can be configured to reset the VF through ethtool
1828 			 * private flag mdd-auto-reset-vf.
1829 			 */
1830 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1831 				/* VF MDD event counters will be cleared by
1832 				 * reset, so print the event prior to reset.
1833 				 */
1834 				ice_print_vf_rx_mdd_event(vf);
1835 				ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1836 			}
1837 		}
1838 	}
1839 	mutex_unlock(&pf->vfs.table_lock);
1840 
1841 	ice_print_vfs_mdd_events(pf);
1842 }
1843 
1844 /**
1845  * ice_force_phys_link_state - Force the physical link state
1846  * @vsi: VSI to force the physical link state to up/down
1847  * @link_up: true/false indicates to set the physical link to up/down
1848  *
1849  * Force the physical link state by getting the current PHY capabilities from
1850  * hardware and setting the PHY config based on the determined capabilities. If
1851  * link changes a link event will be triggered because both the Enable Automatic
1852  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1853  *
1854  * Returns 0 on success, negative on failure
1855  */
1856 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1857 {
1858 	struct ice_aqc_get_phy_caps_data *pcaps;
1859 	struct ice_aqc_set_phy_cfg_data *cfg;
1860 	struct ice_port_info *pi;
1861 	struct device *dev;
1862 	int retcode;
1863 
1864 	if (!vsi || !vsi->port_info || !vsi->back)
1865 		return -EINVAL;
1866 	if (vsi->type != ICE_VSI_PF)
1867 		return 0;
1868 
1869 	dev = ice_pf_to_dev(vsi->back);
1870 
1871 	pi = vsi->port_info;
1872 
1873 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1874 	if (!pcaps)
1875 		return -ENOMEM;
1876 
1877 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1878 				      NULL);
1879 	if (retcode) {
1880 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1881 			vsi->vsi_num, retcode);
1882 		retcode = -EIO;
1883 		goto out;
1884 	}
1885 
1886 	/* No change in link */
1887 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1888 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1889 		goto out;
1890 
1891 	/* Use the current user PHY configuration. The current user PHY
1892 	 * configuration is initialized during probe from PHY capabilities
1893 	 * software mode, and updated on set PHY configuration.
1894 	 */
1895 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1896 	if (!cfg) {
1897 		retcode = -ENOMEM;
1898 		goto out;
1899 	}
1900 
1901 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1902 	if (link_up)
1903 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1904 	else
1905 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1906 
1907 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1908 	if (retcode) {
1909 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1910 			vsi->vsi_num, retcode);
1911 		retcode = -EIO;
1912 	}
1913 
1914 	kfree(cfg);
1915 out:
1916 	kfree(pcaps);
1917 	return retcode;
1918 }
1919 
1920 /**
1921  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1922  * @pi: port info structure
1923  *
1924  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1925  */
1926 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1927 {
1928 	struct ice_aqc_get_phy_caps_data *pcaps;
1929 	struct ice_pf *pf = pi->hw->back;
1930 	int err;
1931 
1932 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1933 	if (!pcaps)
1934 		return -ENOMEM;
1935 
1936 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1937 				  pcaps, NULL);
1938 
1939 	if (err) {
1940 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1941 		goto out;
1942 	}
1943 
1944 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1945 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1946 
1947 out:
1948 	kfree(pcaps);
1949 	return err;
1950 }
1951 
1952 /**
1953  * ice_init_link_dflt_override - Initialize link default override
1954  * @pi: port info structure
1955  *
1956  * Initialize link default override and PHY total port shutdown during probe
1957  */
1958 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1959 {
1960 	struct ice_link_default_override_tlv *ldo;
1961 	struct ice_pf *pf = pi->hw->back;
1962 
1963 	ldo = &pf->link_dflt_override;
1964 	if (ice_get_link_default_override(ldo, pi))
1965 		return;
1966 
1967 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1968 		return;
1969 
1970 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1971 	 * ethtool private flag) for ports with Port Disable bit set.
1972 	 */
1973 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1974 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1975 }
1976 
1977 /**
1978  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1979  * @pi: port info structure
1980  *
1981  * If default override is enabled, initialize the user PHY cfg speed and FEC
1982  * settings using the default override mask from the NVM.
1983  *
1984  * The PHY should only be configured with the default override settings the
1985  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1986  * is used to indicate that the user PHY cfg default override is initialized
1987  * and the PHY has not been configured with the default override settings. The
1988  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1989  * configured.
1990  *
1991  * This function should be called only if the FW doesn't support default
1992  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1993  */
1994 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1995 {
1996 	struct ice_link_default_override_tlv *ldo;
1997 	struct ice_aqc_set_phy_cfg_data *cfg;
1998 	struct ice_phy_info *phy = &pi->phy;
1999 	struct ice_pf *pf = pi->hw->back;
2000 
2001 	ldo = &pf->link_dflt_override;
2002 
2003 	/* If link default override is enabled, use to mask NVM PHY capabilities
2004 	 * for speed and FEC default configuration.
2005 	 */
2006 	cfg = &phy->curr_user_phy_cfg;
2007 
2008 	if (ldo->phy_type_low || ldo->phy_type_high) {
2009 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2010 				    cpu_to_le64(ldo->phy_type_low);
2011 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2012 				     cpu_to_le64(ldo->phy_type_high);
2013 	}
2014 	cfg->link_fec_opt = ldo->fec_options;
2015 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2016 
2017 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2018 }
2019 
2020 /**
2021  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2022  * @pi: port info structure
2023  *
2024  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2025  * mode to default. The PHY defaults are from get PHY capabilities topology
2026  * with media so call when media is first available. An error is returned if
2027  * called when media is not available. The PHY initialization completed state is
2028  * set here.
2029  *
2030  * These configurations are used when setting PHY
2031  * configuration. The user PHY configuration is updated on set PHY
2032  * configuration. Returns 0 on success, negative on failure
2033  */
2034 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2035 {
2036 	struct ice_aqc_get_phy_caps_data *pcaps;
2037 	struct ice_phy_info *phy = &pi->phy;
2038 	struct ice_pf *pf = pi->hw->back;
2039 	int err;
2040 
2041 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2042 		return -EIO;
2043 
2044 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2045 	if (!pcaps)
2046 		return -ENOMEM;
2047 
2048 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2049 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2050 					  pcaps, NULL);
2051 	else
2052 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2053 					  pcaps, NULL);
2054 	if (err) {
2055 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2056 		goto err_out;
2057 	}
2058 
2059 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2060 
2061 	/* check if lenient mode is supported and enabled */
2062 	if (ice_fw_supports_link_override(pi->hw) &&
2063 	    !(pcaps->module_compliance_enforcement &
2064 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2065 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2066 
2067 		/* if the FW supports default PHY configuration mode, then the driver
2068 		 * does not have to apply link override settings. If not,
2069 		 * initialize user PHY configuration with link override values
2070 		 */
2071 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2072 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2073 			ice_init_phy_cfg_dflt_override(pi);
2074 			goto out;
2075 		}
2076 	}
2077 
2078 	/* if link default override is not enabled, set user flow control and
2079 	 * FEC settings based on what get_phy_caps returned
2080 	 */
2081 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2082 						      pcaps->link_fec_options);
2083 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2084 
2085 out:
2086 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2087 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2088 err_out:
2089 	kfree(pcaps);
2090 	return err;
2091 }
2092 
2093 /**
2094  * ice_configure_phy - configure PHY
2095  * @vsi: VSI of PHY
2096  *
2097  * Set the PHY configuration. If the current PHY configuration is the same as
2098  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2099  * configure the based get PHY capabilities for topology with media.
2100  */
2101 static int ice_configure_phy(struct ice_vsi *vsi)
2102 {
2103 	struct device *dev = ice_pf_to_dev(vsi->back);
2104 	struct ice_port_info *pi = vsi->port_info;
2105 	struct ice_aqc_get_phy_caps_data *pcaps;
2106 	struct ice_aqc_set_phy_cfg_data *cfg;
2107 	struct ice_phy_info *phy = &pi->phy;
2108 	struct ice_pf *pf = vsi->back;
2109 	int err;
2110 
2111 	/* Ensure we have media as we cannot configure a medialess port */
2112 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2113 		return -EPERM;
2114 
2115 	ice_print_topo_conflict(vsi);
2116 
2117 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2118 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2119 		return -EPERM;
2120 
2121 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2122 		return ice_force_phys_link_state(vsi, true);
2123 
2124 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2125 	if (!pcaps)
2126 		return -ENOMEM;
2127 
2128 	/* Get current PHY config */
2129 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2130 				  NULL);
2131 	if (err) {
2132 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2133 			vsi->vsi_num, err);
2134 		goto done;
2135 	}
2136 
2137 	/* If PHY enable link is configured and configuration has not changed,
2138 	 * there's nothing to do
2139 	 */
2140 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2141 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2142 		goto done;
2143 
2144 	/* Use PHY topology as baseline for configuration */
2145 	memset(pcaps, 0, sizeof(*pcaps));
2146 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2147 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2148 					  pcaps, NULL);
2149 	else
2150 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2151 					  pcaps, NULL);
2152 	if (err) {
2153 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2154 			vsi->vsi_num, err);
2155 		goto done;
2156 	}
2157 
2158 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2159 	if (!cfg) {
2160 		err = -ENOMEM;
2161 		goto done;
2162 	}
2163 
2164 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2165 
2166 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2167 	 * ice_init_phy_user_cfg_ldo.
2168 	 */
2169 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2170 			       vsi->back->state)) {
2171 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2172 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2173 	} else {
2174 		u64 phy_low = 0, phy_high = 0;
2175 
2176 		ice_update_phy_type(&phy_low, &phy_high,
2177 				    pi->phy.curr_user_speed_req);
2178 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2179 		cfg->phy_type_high = pcaps->phy_type_high &
2180 				     cpu_to_le64(phy_high);
2181 	}
2182 
2183 	/* Can't provide what was requested; use PHY capabilities */
2184 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2185 		cfg->phy_type_low = pcaps->phy_type_low;
2186 		cfg->phy_type_high = pcaps->phy_type_high;
2187 	}
2188 
2189 	/* FEC */
2190 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2191 
2192 	/* Can't provide what was requested; use PHY capabilities */
2193 	if (cfg->link_fec_opt !=
2194 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2195 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2196 		cfg->link_fec_opt = pcaps->link_fec_options;
2197 	}
2198 
2199 	/* Flow Control - always supported; no need to check against
2200 	 * capabilities
2201 	 */
2202 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2203 
2204 	/* Enable link and link update */
2205 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2206 
2207 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2208 	if (err)
2209 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2210 			vsi->vsi_num, err);
2211 
2212 	kfree(cfg);
2213 done:
2214 	kfree(pcaps);
2215 	return err;
2216 }
2217 
2218 /**
2219  * ice_check_media_subtask - Check for media
2220  * @pf: pointer to PF struct
2221  *
2222  * If media is available, then initialize PHY user configuration if it is not
2223  * been, and configure the PHY if the interface is up.
2224  */
2225 static void ice_check_media_subtask(struct ice_pf *pf)
2226 {
2227 	struct ice_port_info *pi;
2228 	struct ice_vsi *vsi;
2229 	int err;
2230 
2231 	/* No need to check for media if it's already present */
2232 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2233 		return;
2234 
2235 	vsi = ice_get_main_vsi(pf);
2236 	if (!vsi)
2237 		return;
2238 
2239 	/* Refresh link info and check if media is present */
2240 	pi = vsi->port_info;
2241 	err = ice_update_link_info(pi);
2242 	if (err)
2243 		return;
2244 
2245 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2246 
2247 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2248 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2249 			ice_init_phy_user_cfg(pi);
2250 
2251 		/* PHY settings are reset on media insertion, reconfigure
2252 		 * PHY to preserve settings.
2253 		 */
2254 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2255 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2256 			return;
2257 
2258 		err = ice_configure_phy(vsi);
2259 		if (!err)
2260 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2261 
2262 		/* A Link Status Event will be generated; the event handler
2263 		 * will complete bringing the interface up
2264 		 */
2265 	}
2266 }
2267 
2268 /**
2269  * ice_service_task - manage and run subtasks
2270  * @work: pointer to work_struct contained by the PF struct
2271  */
2272 static void ice_service_task(struct work_struct *work)
2273 {
2274 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2275 	unsigned long start_time = jiffies;
2276 
2277 	/* subtasks */
2278 
2279 	/* process reset requests first */
2280 	ice_reset_subtask(pf);
2281 
2282 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2283 	if (ice_is_reset_in_progress(pf->state) ||
2284 	    test_bit(ICE_SUSPENDED, pf->state) ||
2285 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2286 		ice_service_task_complete(pf);
2287 		return;
2288 	}
2289 
2290 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2291 		struct iidc_event *event;
2292 
2293 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2294 		if (event) {
2295 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2296 			/* report the entire OICR value to AUX driver */
2297 			swap(event->reg, pf->oicr_err_reg);
2298 			ice_send_event_to_aux(pf, event);
2299 			kfree(event);
2300 		}
2301 	}
2302 
2303 	if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
2304 		/* Plug aux device per request */
2305 		ice_plug_aux_dev(pf);
2306 
2307 		/* Mark plugging as done but check whether unplug was
2308 		 * requested during ice_plug_aux_dev() call
2309 		 * (e.g. from ice_clear_rdma_cap()) and if so then
2310 		 * plug aux device.
2311 		 */
2312 		if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2313 			ice_unplug_aux_dev(pf);
2314 	}
2315 
2316 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2317 		struct iidc_event *event;
2318 
2319 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2320 		if (event) {
2321 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2322 			ice_send_event_to_aux(pf, event);
2323 			kfree(event);
2324 		}
2325 	}
2326 
2327 	ice_clean_adminq_subtask(pf);
2328 	ice_check_media_subtask(pf);
2329 	ice_check_for_hang_subtask(pf);
2330 	ice_sync_fltr_subtask(pf);
2331 	ice_handle_mdd_event(pf);
2332 	ice_watchdog_subtask(pf);
2333 
2334 	if (ice_is_safe_mode(pf)) {
2335 		ice_service_task_complete(pf);
2336 		return;
2337 	}
2338 
2339 	ice_process_vflr_event(pf);
2340 	ice_clean_mailboxq_subtask(pf);
2341 	ice_clean_sbq_subtask(pf);
2342 	ice_sync_arfs_fltrs(pf);
2343 	ice_flush_fdir_ctx(pf);
2344 
2345 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2346 	ice_service_task_complete(pf);
2347 
2348 	/* If the tasks have taken longer than one service timer period
2349 	 * or there is more work to be done, reset the service timer to
2350 	 * schedule the service task now.
2351 	 */
2352 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2353 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2354 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2355 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2356 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2357 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2358 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2359 		mod_timer(&pf->serv_tmr, jiffies);
2360 }
2361 
2362 /**
2363  * ice_set_ctrlq_len - helper function to set controlq length
2364  * @hw: pointer to the HW instance
2365  */
2366 static void ice_set_ctrlq_len(struct ice_hw *hw)
2367 {
2368 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2369 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2370 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2371 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2372 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2373 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2374 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2375 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2376 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2377 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2378 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2379 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2380 }
2381 
2382 /**
2383  * ice_schedule_reset - schedule a reset
2384  * @pf: board private structure
2385  * @reset: reset being requested
2386  */
2387 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2388 {
2389 	struct device *dev = ice_pf_to_dev(pf);
2390 
2391 	/* bail out if earlier reset has failed */
2392 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2393 		dev_dbg(dev, "earlier reset has failed\n");
2394 		return -EIO;
2395 	}
2396 	/* bail if reset/recovery already in progress */
2397 	if (ice_is_reset_in_progress(pf->state)) {
2398 		dev_dbg(dev, "Reset already in progress\n");
2399 		return -EBUSY;
2400 	}
2401 
2402 	switch (reset) {
2403 	case ICE_RESET_PFR:
2404 		set_bit(ICE_PFR_REQ, pf->state);
2405 		break;
2406 	case ICE_RESET_CORER:
2407 		set_bit(ICE_CORER_REQ, pf->state);
2408 		break;
2409 	case ICE_RESET_GLOBR:
2410 		set_bit(ICE_GLOBR_REQ, pf->state);
2411 		break;
2412 	default:
2413 		return -EINVAL;
2414 	}
2415 
2416 	ice_service_task_schedule(pf);
2417 	return 0;
2418 }
2419 
2420 /**
2421  * ice_irq_affinity_notify - Callback for affinity changes
2422  * @notify: context as to what irq was changed
2423  * @mask: the new affinity mask
2424  *
2425  * This is a callback function used by the irq_set_affinity_notifier function
2426  * so that we may register to receive changes to the irq affinity masks.
2427  */
2428 static void
2429 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2430 			const cpumask_t *mask)
2431 {
2432 	struct ice_q_vector *q_vector =
2433 		container_of(notify, struct ice_q_vector, affinity_notify);
2434 
2435 	cpumask_copy(&q_vector->affinity_mask, mask);
2436 }
2437 
2438 /**
2439  * ice_irq_affinity_release - Callback for affinity notifier release
2440  * @ref: internal core kernel usage
2441  *
2442  * This is a callback function used by the irq_set_affinity_notifier function
2443  * to inform the current notification subscriber that they will no longer
2444  * receive notifications.
2445  */
2446 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2447 
2448 /**
2449  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2450  * @vsi: the VSI being configured
2451  */
2452 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2453 {
2454 	struct ice_hw *hw = &vsi->back->hw;
2455 	int i;
2456 
2457 	ice_for_each_q_vector(vsi, i)
2458 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2459 
2460 	ice_flush(hw);
2461 	return 0;
2462 }
2463 
2464 /**
2465  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2466  * @vsi: the VSI being configured
2467  * @basename: name for the vector
2468  */
2469 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2470 {
2471 	int q_vectors = vsi->num_q_vectors;
2472 	struct ice_pf *pf = vsi->back;
2473 	int base = vsi->base_vector;
2474 	struct device *dev;
2475 	int rx_int_idx = 0;
2476 	int tx_int_idx = 0;
2477 	int vector, err;
2478 	int irq_num;
2479 
2480 	dev = ice_pf_to_dev(pf);
2481 	for (vector = 0; vector < q_vectors; vector++) {
2482 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2483 
2484 		irq_num = pf->msix_entries[base + vector].vector;
2485 
2486 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2487 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2488 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2489 			tx_int_idx++;
2490 		} else if (q_vector->rx.rx_ring) {
2491 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2492 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2493 		} else if (q_vector->tx.tx_ring) {
2494 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2495 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2496 		} else {
2497 			/* skip this unused q_vector */
2498 			continue;
2499 		}
2500 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2501 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2502 					       IRQF_SHARED, q_vector->name,
2503 					       q_vector);
2504 		else
2505 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2506 					       0, q_vector->name, q_vector);
2507 		if (err) {
2508 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2509 				   err);
2510 			goto free_q_irqs;
2511 		}
2512 
2513 		/* register for affinity change notifications */
2514 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2515 			struct irq_affinity_notify *affinity_notify;
2516 
2517 			affinity_notify = &q_vector->affinity_notify;
2518 			affinity_notify->notify = ice_irq_affinity_notify;
2519 			affinity_notify->release = ice_irq_affinity_release;
2520 			irq_set_affinity_notifier(irq_num, affinity_notify);
2521 		}
2522 
2523 		/* assign the mask for this irq */
2524 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2525 	}
2526 
2527 	err = ice_set_cpu_rx_rmap(vsi);
2528 	if (err) {
2529 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2530 			   vsi->vsi_num, ERR_PTR(err));
2531 		goto free_q_irqs;
2532 	}
2533 
2534 	vsi->irqs_ready = true;
2535 	return 0;
2536 
2537 free_q_irqs:
2538 	while (vector) {
2539 		vector--;
2540 		irq_num = pf->msix_entries[base + vector].vector;
2541 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2542 			irq_set_affinity_notifier(irq_num, NULL);
2543 		irq_set_affinity_hint(irq_num, NULL);
2544 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2545 	}
2546 	return err;
2547 }
2548 
2549 /**
2550  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2551  * @vsi: VSI to setup Tx rings used by XDP
2552  *
2553  * Return 0 on success and negative value on error
2554  */
2555 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2556 {
2557 	struct device *dev = ice_pf_to_dev(vsi->back);
2558 	struct ice_tx_desc *tx_desc;
2559 	int i, j;
2560 
2561 	ice_for_each_xdp_txq(vsi, i) {
2562 		u16 xdp_q_idx = vsi->alloc_txq + i;
2563 		struct ice_tx_ring *xdp_ring;
2564 
2565 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2566 
2567 		if (!xdp_ring)
2568 			goto free_xdp_rings;
2569 
2570 		xdp_ring->q_index = xdp_q_idx;
2571 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2572 		xdp_ring->vsi = vsi;
2573 		xdp_ring->netdev = NULL;
2574 		xdp_ring->dev = dev;
2575 		xdp_ring->count = vsi->num_tx_desc;
2576 		xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
2577 		xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
2578 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2579 		if (ice_setup_tx_ring(xdp_ring))
2580 			goto free_xdp_rings;
2581 		ice_set_ring_xdp(xdp_ring);
2582 		spin_lock_init(&xdp_ring->tx_lock);
2583 		for (j = 0; j < xdp_ring->count; j++) {
2584 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2585 			tx_desc->cmd_type_offset_bsz = 0;
2586 		}
2587 	}
2588 
2589 	return 0;
2590 
2591 free_xdp_rings:
2592 	for (; i >= 0; i--)
2593 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2594 			ice_free_tx_ring(vsi->xdp_rings[i]);
2595 	return -ENOMEM;
2596 }
2597 
2598 /**
2599  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2600  * @vsi: VSI to set the bpf prog on
2601  * @prog: the bpf prog pointer
2602  */
2603 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2604 {
2605 	struct bpf_prog *old_prog;
2606 	int i;
2607 
2608 	old_prog = xchg(&vsi->xdp_prog, prog);
2609 	if (old_prog)
2610 		bpf_prog_put(old_prog);
2611 
2612 	ice_for_each_rxq(vsi, i)
2613 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2614 }
2615 
2616 /**
2617  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2618  * @vsi: VSI to bring up Tx rings used by XDP
2619  * @prog: bpf program that will be assigned to VSI
2620  *
2621  * Return 0 on success and negative value on error
2622  */
2623 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2624 {
2625 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2626 	int xdp_rings_rem = vsi->num_xdp_txq;
2627 	struct ice_pf *pf = vsi->back;
2628 	struct ice_qs_cfg xdp_qs_cfg = {
2629 		.qs_mutex = &pf->avail_q_mutex,
2630 		.pf_map = pf->avail_txqs,
2631 		.pf_map_size = pf->max_pf_txqs,
2632 		.q_count = vsi->num_xdp_txq,
2633 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2634 		.vsi_map = vsi->txq_map,
2635 		.vsi_map_offset = vsi->alloc_txq,
2636 		.mapping_mode = ICE_VSI_MAP_CONTIG
2637 	};
2638 	struct device *dev;
2639 	int i, v_idx;
2640 	int status;
2641 
2642 	dev = ice_pf_to_dev(pf);
2643 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2644 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2645 	if (!vsi->xdp_rings)
2646 		return -ENOMEM;
2647 
2648 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2649 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2650 		goto err_map_xdp;
2651 
2652 	if (static_key_enabled(&ice_xdp_locking_key))
2653 		netdev_warn(vsi->netdev,
2654 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2655 
2656 	if (ice_xdp_alloc_setup_rings(vsi))
2657 		goto clear_xdp_rings;
2658 
2659 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2660 	ice_for_each_q_vector(vsi, v_idx) {
2661 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2662 		int xdp_rings_per_v, q_id, q_base;
2663 
2664 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2665 					       vsi->num_q_vectors - v_idx);
2666 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2667 
2668 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2669 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2670 
2671 			xdp_ring->q_vector = q_vector;
2672 			xdp_ring->next = q_vector->tx.tx_ring;
2673 			q_vector->tx.tx_ring = xdp_ring;
2674 		}
2675 		xdp_rings_rem -= xdp_rings_per_v;
2676 	}
2677 
2678 	ice_for_each_rxq(vsi, i) {
2679 		if (static_key_enabled(&ice_xdp_locking_key)) {
2680 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2681 		} else {
2682 			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2683 			struct ice_tx_ring *ring;
2684 
2685 			ice_for_each_tx_ring(ring, q_vector->tx) {
2686 				if (ice_ring_is_xdp(ring)) {
2687 					vsi->rx_rings[i]->xdp_ring = ring;
2688 					break;
2689 				}
2690 			}
2691 		}
2692 		ice_tx_xsk_pool(vsi, i);
2693 	}
2694 
2695 	/* omit the scheduler update if in reset path; XDP queues will be
2696 	 * taken into account at the end of ice_vsi_rebuild, where
2697 	 * ice_cfg_vsi_lan is being called
2698 	 */
2699 	if (ice_is_reset_in_progress(pf->state))
2700 		return 0;
2701 
2702 	/* tell the Tx scheduler that right now we have
2703 	 * additional queues
2704 	 */
2705 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2706 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2707 
2708 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2709 				 max_txqs);
2710 	if (status) {
2711 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2712 			status);
2713 		goto clear_xdp_rings;
2714 	}
2715 
2716 	/* assign the prog only when it's not already present on VSI;
2717 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2718 	 * VSI rebuild that happens under ethtool -L can expose us to
2719 	 * the bpf_prog refcount issues as we would be swapping same
2720 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2721 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2722 	 * this is not harmful as dev_xdp_install bumps the refcount
2723 	 * before calling the op exposed by the driver;
2724 	 */
2725 	if (!ice_is_xdp_ena_vsi(vsi))
2726 		ice_vsi_assign_bpf_prog(vsi, prog);
2727 
2728 	return 0;
2729 clear_xdp_rings:
2730 	ice_for_each_xdp_txq(vsi, i)
2731 		if (vsi->xdp_rings[i]) {
2732 			kfree_rcu(vsi->xdp_rings[i], rcu);
2733 			vsi->xdp_rings[i] = NULL;
2734 		}
2735 
2736 err_map_xdp:
2737 	mutex_lock(&pf->avail_q_mutex);
2738 	ice_for_each_xdp_txq(vsi, i) {
2739 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2740 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2741 	}
2742 	mutex_unlock(&pf->avail_q_mutex);
2743 
2744 	devm_kfree(dev, vsi->xdp_rings);
2745 	return -ENOMEM;
2746 }
2747 
2748 /**
2749  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2750  * @vsi: VSI to remove XDP rings
2751  *
2752  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2753  * resources
2754  */
2755 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2756 {
2757 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2758 	struct ice_pf *pf = vsi->back;
2759 	int i, v_idx;
2760 
2761 	/* q_vectors are freed in reset path so there's no point in detaching
2762 	 * rings; in case of rebuild being triggered not from reset bits
2763 	 * in pf->state won't be set, so additionally check first q_vector
2764 	 * against NULL
2765 	 */
2766 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2767 		goto free_qmap;
2768 
2769 	ice_for_each_q_vector(vsi, v_idx) {
2770 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2771 		struct ice_tx_ring *ring;
2772 
2773 		ice_for_each_tx_ring(ring, q_vector->tx)
2774 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2775 				break;
2776 
2777 		/* restore the value of last node prior to XDP setup */
2778 		q_vector->tx.tx_ring = ring;
2779 	}
2780 
2781 free_qmap:
2782 	mutex_lock(&pf->avail_q_mutex);
2783 	ice_for_each_xdp_txq(vsi, i) {
2784 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2785 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2786 	}
2787 	mutex_unlock(&pf->avail_q_mutex);
2788 
2789 	ice_for_each_xdp_txq(vsi, i)
2790 		if (vsi->xdp_rings[i]) {
2791 			if (vsi->xdp_rings[i]->desc) {
2792 				synchronize_rcu();
2793 				ice_free_tx_ring(vsi->xdp_rings[i]);
2794 			}
2795 			kfree_rcu(vsi->xdp_rings[i], rcu);
2796 			vsi->xdp_rings[i] = NULL;
2797 		}
2798 
2799 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2800 	vsi->xdp_rings = NULL;
2801 
2802 	if (static_key_enabled(&ice_xdp_locking_key))
2803 		static_branch_dec(&ice_xdp_locking_key);
2804 
2805 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2806 		return 0;
2807 
2808 	ice_vsi_assign_bpf_prog(vsi, NULL);
2809 
2810 	/* notify Tx scheduler that we destroyed XDP queues and bring
2811 	 * back the old number of child nodes
2812 	 */
2813 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2814 		max_txqs[i] = vsi->num_txq;
2815 
2816 	/* change number of XDP Tx queues to 0 */
2817 	vsi->num_xdp_txq = 0;
2818 
2819 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2820 			       max_txqs);
2821 }
2822 
2823 /**
2824  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2825  * @vsi: VSI to schedule napi on
2826  */
2827 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2828 {
2829 	int i;
2830 
2831 	ice_for_each_rxq(vsi, i) {
2832 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2833 
2834 		if (rx_ring->xsk_pool)
2835 			napi_schedule(&rx_ring->q_vector->napi);
2836 	}
2837 }
2838 
2839 /**
2840  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2841  * @vsi: VSI to determine the count of XDP Tx qs
2842  *
2843  * returns 0 if Tx qs count is higher than at least half of CPU count,
2844  * -ENOMEM otherwise
2845  */
2846 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2847 {
2848 	u16 avail = ice_get_avail_txq_count(vsi->back);
2849 	u16 cpus = num_possible_cpus();
2850 
2851 	if (avail < cpus / 2)
2852 		return -ENOMEM;
2853 
2854 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2855 
2856 	if (vsi->num_xdp_txq < cpus)
2857 		static_branch_inc(&ice_xdp_locking_key);
2858 
2859 	return 0;
2860 }
2861 
2862 /**
2863  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2864  * @vsi: VSI to setup XDP for
2865  * @prog: XDP program
2866  * @extack: netlink extended ack
2867  */
2868 static int
2869 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2870 		   struct netlink_ext_ack *extack)
2871 {
2872 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2873 	bool if_running = netif_running(vsi->netdev);
2874 	int ret = 0, xdp_ring_err = 0;
2875 
2876 	if (frame_size > vsi->rx_buf_len) {
2877 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2878 		return -EOPNOTSUPP;
2879 	}
2880 
2881 	/* need to stop netdev while setting up the program for Rx rings */
2882 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2883 		ret = ice_down(vsi);
2884 		if (ret) {
2885 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2886 			return ret;
2887 		}
2888 	}
2889 
2890 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2891 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2892 		if (xdp_ring_err) {
2893 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2894 		} else {
2895 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2896 			if (xdp_ring_err)
2897 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2898 		}
2899 		/* reallocate Rx queues that are used for zero-copy */
2900 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2901 		if (xdp_ring_err)
2902 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2903 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2904 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2905 		if (xdp_ring_err)
2906 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2907 		/* reallocate Rx queues that were used for zero-copy */
2908 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2909 		if (xdp_ring_err)
2910 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2911 	} else {
2912 		/* safe to call even when prog == vsi->xdp_prog as
2913 		 * dev_xdp_install in net/core/dev.c incremented prog's
2914 		 * refcount so corresponding bpf_prog_put won't cause
2915 		 * underflow
2916 		 */
2917 		ice_vsi_assign_bpf_prog(vsi, prog);
2918 	}
2919 
2920 	if (if_running)
2921 		ret = ice_up(vsi);
2922 
2923 	if (!ret && prog)
2924 		ice_vsi_rx_napi_schedule(vsi);
2925 
2926 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2927 }
2928 
2929 /**
2930  * ice_xdp_safe_mode - XDP handler for safe mode
2931  * @dev: netdevice
2932  * @xdp: XDP command
2933  */
2934 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2935 			     struct netdev_bpf *xdp)
2936 {
2937 	NL_SET_ERR_MSG_MOD(xdp->extack,
2938 			   "Please provide working DDP firmware package in order to use XDP\n"
2939 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2940 	return -EOPNOTSUPP;
2941 }
2942 
2943 /**
2944  * ice_xdp - implements XDP handler
2945  * @dev: netdevice
2946  * @xdp: XDP command
2947  */
2948 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2949 {
2950 	struct ice_netdev_priv *np = netdev_priv(dev);
2951 	struct ice_vsi *vsi = np->vsi;
2952 
2953 	if (vsi->type != ICE_VSI_PF) {
2954 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2955 		return -EINVAL;
2956 	}
2957 
2958 	switch (xdp->command) {
2959 	case XDP_SETUP_PROG:
2960 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2961 	case XDP_SETUP_XSK_POOL:
2962 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2963 					  xdp->xsk.queue_id);
2964 	default:
2965 		return -EINVAL;
2966 	}
2967 }
2968 
2969 /**
2970  * ice_ena_misc_vector - enable the non-queue interrupts
2971  * @pf: board private structure
2972  */
2973 static void ice_ena_misc_vector(struct ice_pf *pf)
2974 {
2975 	struct ice_hw *hw = &pf->hw;
2976 	u32 val;
2977 
2978 	/* Disable anti-spoof detection interrupt to prevent spurious event
2979 	 * interrupts during a function reset. Anti-spoof functionally is
2980 	 * still supported.
2981 	 */
2982 	val = rd32(hw, GL_MDCK_TX_TDPU);
2983 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2984 	wr32(hw, GL_MDCK_TX_TDPU, val);
2985 
2986 	/* clear things first */
2987 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2988 	rd32(hw, PFINT_OICR);		/* read to clear */
2989 
2990 	val = (PFINT_OICR_ECC_ERR_M |
2991 	       PFINT_OICR_MAL_DETECT_M |
2992 	       PFINT_OICR_GRST_M |
2993 	       PFINT_OICR_PCI_EXCEPTION_M |
2994 	       PFINT_OICR_VFLR_M |
2995 	       PFINT_OICR_HMC_ERR_M |
2996 	       PFINT_OICR_PE_PUSH_M |
2997 	       PFINT_OICR_PE_CRITERR_M);
2998 
2999 	wr32(hw, PFINT_OICR_ENA, val);
3000 
3001 	/* SW_ITR_IDX = 0, but don't change INTENA */
3002 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
3003 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3004 }
3005 
3006 /**
3007  * ice_misc_intr - misc interrupt handler
3008  * @irq: interrupt number
3009  * @data: pointer to a q_vector
3010  */
3011 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3012 {
3013 	struct ice_pf *pf = (struct ice_pf *)data;
3014 	struct ice_hw *hw = &pf->hw;
3015 	irqreturn_t ret = IRQ_NONE;
3016 	struct device *dev;
3017 	u32 oicr, ena_mask;
3018 
3019 	dev = ice_pf_to_dev(pf);
3020 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3021 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3022 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3023 
3024 	oicr = rd32(hw, PFINT_OICR);
3025 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3026 
3027 	if (oicr & PFINT_OICR_SWINT_M) {
3028 		ena_mask &= ~PFINT_OICR_SWINT_M;
3029 		pf->sw_int_count++;
3030 	}
3031 
3032 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3033 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3034 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3035 	}
3036 	if (oicr & PFINT_OICR_VFLR_M) {
3037 		/* disable any further VFLR event notifications */
3038 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3039 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3040 
3041 			reg &= ~PFINT_OICR_VFLR_M;
3042 			wr32(hw, PFINT_OICR_ENA, reg);
3043 		} else {
3044 			ena_mask &= ~PFINT_OICR_VFLR_M;
3045 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3046 		}
3047 	}
3048 
3049 	if (oicr & PFINT_OICR_GRST_M) {
3050 		u32 reset;
3051 
3052 		/* we have a reset warning */
3053 		ena_mask &= ~PFINT_OICR_GRST_M;
3054 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3055 			GLGEN_RSTAT_RESET_TYPE_S;
3056 
3057 		if (reset == ICE_RESET_CORER)
3058 			pf->corer_count++;
3059 		else if (reset == ICE_RESET_GLOBR)
3060 			pf->globr_count++;
3061 		else if (reset == ICE_RESET_EMPR)
3062 			pf->empr_count++;
3063 		else
3064 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3065 
3066 		/* If a reset cycle isn't already in progress, we set a bit in
3067 		 * pf->state so that the service task can start a reset/rebuild.
3068 		 */
3069 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3070 			if (reset == ICE_RESET_CORER)
3071 				set_bit(ICE_CORER_RECV, pf->state);
3072 			else if (reset == ICE_RESET_GLOBR)
3073 				set_bit(ICE_GLOBR_RECV, pf->state);
3074 			else
3075 				set_bit(ICE_EMPR_RECV, pf->state);
3076 
3077 			/* There are couple of different bits at play here.
3078 			 * hw->reset_ongoing indicates whether the hardware is
3079 			 * in reset. This is set to true when a reset interrupt
3080 			 * is received and set back to false after the driver
3081 			 * has determined that the hardware is out of reset.
3082 			 *
3083 			 * ICE_RESET_OICR_RECV in pf->state indicates
3084 			 * that a post reset rebuild is required before the
3085 			 * driver is operational again. This is set above.
3086 			 *
3087 			 * As this is the start of the reset/rebuild cycle, set
3088 			 * both to indicate that.
3089 			 */
3090 			hw->reset_ongoing = true;
3091 		}
3092 	}
3093 
3094 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3095 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3096 		ice_ptp_process_ts(pf);
3097 	}
3098 
3099 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3100 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3101 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3102 
3103 		/* Save EVENTs from GTSYN register */
3104 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3105 						     GLTSYN_STAT_EVENT1_M |
3106 						     GLTSYN_STAT_EVENT2_M);
3107 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3108 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3109 	}
3110 
3111 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3112 	if (oicr & ICE_AUX_CRIT_ERR) {
3113 		pf->oicr_err_reg |= oicr;
3114 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3115 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3116 	}
3117 
3118 	/* Report any remaining unexpected interrupts */
3119 	oicr &= ena_mask;
3120 	if (oicr) {
3121 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3122 		/* If a critical error is pending there is no choice but to
3123 		 * reset the device.
3124 		 */
3125 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3126 			    PFINT_OICR_ECC_ERR_M)) {
3127 			set_bit(ICE_PFR_REQ, pf->state);
3128 			ice_service_task_schedule(pf);
3129 		}
3130 	}
3131 	ret = IRQ_HANDLED;
3132 
3133 	ice_service_task_schedule(pf);
3134 	ice_irq_dynamic_ena(hw, NULL, NULL);
3135 
3136 	return ret;
3137 }
3138 
3139 /**
3140  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3141  * @hw: pointer to HW structure
3142  */
3143 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3144 {
3145 	/* disable Admin queue Interrupt causes */
3146 	wr32(hw, PFINT_FW_CTL,
3147 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3148 
3149 	/* disable Mailbox queue Interrupt causes */
3150 	wr32(hw, PFINT_MBX_CTL,
3151 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3152 
3153 	wr32(hw, PFINT_SB_CTL,
3154 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3155 
3156 	/* disable Control queue Interrupt causes */
3157 	wr32(hw, PFINT_OICR_CTL,
3158 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3159 
3160 	ice_flush(hw);
3161 }
3162 
3163 /**
3164  * ice_free_irq_msix_misc - Unroll misc vector setup
3165  * @pf: board private structure
3166  */
3167 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3168 {
3169 	struct ice_hw *hw = &pf->hw;
3170 
3171 	ice_dis_ctrlq_interrupts(hw);
3172 
3173 	/* disable OICR interrupt */
3174 	wr32(hw, PFINT_OICR_ENA, 0);
3175 	ice_flush(hw);
3176 
3177 	if (pf->msix_entries) {
3178 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3179 		devm_free_irq(ice_pf_to_dev(pf),
3180 			      pf->msix_entries[pf->oicr_idx].vector, pf);
3181 	}
3182 
3183 	pf->num_avail_sw_msix += 1;
3184 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3185 }
3186 
3187 /**
3188  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3189  * @hw: pointer to HW structure
3190  * @reg_idx: HW vector index to associate the control queue interrupts with
3191  */
3192 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3193 {
3194 	u32 val;
3195 
3196 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3197 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3198 	wr32(hw, PFINT_OICR_CTL, val);
3199 
3200 	/* enable Admin queue Interrupt causes */
3201 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3202 	       PFINT_FW_CTL_CAUSE_ENA_M);
3203 	wr32(hw, PFINT_FW_CTL, val);
3204 
3205 	/* enable Mailbox queue Interrupt causes */
3206 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3207 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3208 	wr32(hw, PFINT_MBX_CTL, val);
3209 
3210 	/* This enables Sideband queue Interrupt causes */
3211 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3212 	       PFINT_SB_CTL_CAUSE_ENA_M);
3213 	wr32(hw, PFINT_SB_CTL, val);
3214 
3215 	ice_flush(hw);
3216 }
3217 
3218 /**
3219  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3220  * @pf: board private structure
3221  *
3222  * This sets up the handler for MSIX 0, which is used to manage the
3223  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3224  * when in MSI or Legacy interrupt mode.
3225  */
3226 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3227 {
3228 	struct device *dev = ice_pf_to_dev(pf);
3229 	struct ice_hw *hw = &pf->hw;
3230 	int oicr_idx, err = 0;
3231 
3232 	if (!pf->int_name[0])
3233 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3234 			 dev_driver_string(dev), dev_name(dev));
3235 
3236 	/* Do not request IRQ but do enable OICR interrupt since settings are
3237 	 * lost during reset. Note that this function is called only during
3238 	 * rebuild path and not while reset is in progress.
3239 	 */
3240 	if (ice_is_reset_in_progress(pf->state))
3241 		goto skip_req_irq;
3242 
3243 	/* reserve one vector in irq_tracker for misc interrupts */
3244 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3245 	if (oicr_idx < 0)
3246 		return oicr_idx;
3247 
3248 	pf->num_avail_sw_msix -= 1;
3249 	pf->oicr_idx = (u16)oicr_idx;
3250 
3251 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3252 			       ice_misc_intr, 0, pf->int_name, pf);
3253 	if (err) {
3254 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3255 			pf->int_name, err);
3256 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3257 		pf->num_avail_sw_msix += 1;
3258 		return err;
3259 	}
3260 
3261 skip_req_irq:
3262 	ice_ena_misc_vector(pf);
3263 
3264 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3265 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3266 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3267 
3268 	ice_flush(hw);
3269 	ice_irq_dynamic_ena(hw, NULL, NULL);
3270 
3271 	return 0;
3272 }
3273 
3274 /**
3275  * ice_napi_add - register NAPI handler for the VSI
3276  * @vsi: VSI for which NAPI handler is to be registered
3277  *
3278  * This function is only called in the driver's load path. Registering the NAPI
3279  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3280  * reset/rebuild, etc.)
3281  */
3282 static void ice_napi_add(struct ice_vsi *vsi)
3283 {
3284 	int v_idx;
3285 
3286 	if (!vsi->netdev)
3287 		return;
3288 
3289 	ice_for_each_q_vector(vsi, v_idx)
3290 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3291 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3292 }
3293 
3294 /**
3295  * ice_set_ops - set netdev and ethtools ops for the given netdev
3296  * @netdev: netdev instance
3297  */
3298 static void ice_set_ops(struct net_device *netdev)
3299 {
3300 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3301 
3302 	if (ice_is_safe_mode(pf)) {
3303 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3304 		ice_set_ethtool_safe_mode_ops(netdev);
3305 		return;
3306 	}
3307 
3308 	netdev->netdev_ops = &ice_netdev_ops;
3309 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3310 	ice_set_ethtool_ops(netdev);
3311 }
3312 
3313 /**
3314  * ice_set_netdev_features - set features for the given netdev
3315  * @netdev: netdev instance
3316  */
3317 static void ice_set_netdev_features(struct net_device *netdev)
3318 {
3319 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3320 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3321 	netdev_features_t csumo_features;
3322 	netdev_features_t vlano_features;
3323 	netdev_features_t dflt_features;
3324 	netdev_features_t tso_features;
3325 
3326 	if (ice_is_safe_mode(pf)) {
3327 		/* safe mode */
3328 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3329 		netdev->hw_features = netdev->features;
3330 		return;
3331 	}
3332 
3333 	dflt_features = NETIF_F_SG	|
3334 			NETIF_F_HIGHDMA	|
3335 			NETIF_F_NTUPLE	|
3336 			NETIF_F_RXHASH;
3337 
3338 	csumo_features = NETIF_F_RXCSUM	  |
3339 			 NETIF_F_IP_CSUM  |
3340 			 NETIF_F_SCTP_CRC |
3341 			 NETIF_F_IPV6_CSUM;
3342 
3343 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3344 			 NETIF_F_HW_VLAN_CTAG_TX     |
3345 			 NETIF_F_HW_VLAN_CTAG_RX;
3346 
3347 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3348 	if (is_dvm_ena)
3349 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3350 
3351 	tso_features = NETIF_F_TSO			|
3352 		       NETIF_F_TSO_ECN			|
3353 		       NETIF_F_TSO6			|
3354 		       NETIF_F_GSO_GRE			|
3355 		       NETIF_F_GSO_UDP_TUNNEL		|
3356 		       NETIF_F_GSO_GRE_CSUM		|
3357 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3358 		       NETIF_F_GSO_PARTIAL		|
3359 		       NETIF_F_GSO_IPXIP4		|
3360 		       NETIF_F_GSO_IPXIP6		|
3361 		       NETIF_F_GSO_UDP_L4;
3362 
3363 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3364 					NETIF_F_GSO_GRE_CSUM;
3365 	/* set features that user can change */
3366 	netdev->hw_features = dflt_features | csumo_features |
3367 			      vlano_features | tso_features;
3368 
3369 	/* add support for HW_CSUM on packets with MPLS header */
3370 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3371 				 NETIF_F_TSO     |
3372 				 NETIF_F_TSO6;
3373 
3374 	/* enable features */
3375 	netdev->features |= netdev->hw_features;
3376 
3377 	netdev->hw_features |= NETIF_F_HW_TC;
3378 	netdev->hw_features |= NETIF_F_LOOPBACK;
3379 
3380 	/* encap and VLAN devices inherit default, csumo and tso features */
3381 	netdev->hw_enc_features |= dflt_features | csumo_features |
3382 				   tso_features;
3383 	netdev->vlan_features |= dflt_features | csumo_features |
3384 				 tso_features;
3385 
3386 	/* advertise support but don't enable by default since only one type of
3387 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3388 	 * type turns on the other has to be turned off. This is enforced by the
3389 	 * ice_fix_features() ndo callback.
3390 	 */
3391 	if (is_dvm_ena)
3392 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3393 			NETIF_F_HW_VLAN_STAG_TX;
3394 }
3395 
3396 /**
3397  * ice_cfg_netdev - Allocate, configure and register a netdev
3398  * @vsi: the VSI associated with the new netdev
3399  *
3400  * Returns 0 on success, negative value on failure
3401  */
3402 static int ice_cfg_netdev(struct ice_vsi *vsi)
3403 {
3404 	struct ice_netdev_priv *np;
3405 	struct net_device *netdev;
3406 	u8 mac_addr[ETH_ALEN];
3407 
3408 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3409 				    vsi->alloc_rxq);
3410 	if (!netdev)
3411 		return -ENOMEM;
3412 
3413 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3414 	vsi->netdev = netdev;
3415 	np = netdev_priv(netdev);
3416 	np->vsi = vsi;
3417 
3418 	ice_set_netdev_features(netdev);
3419 
3420 	ice_set_ops(netdev);
3421 
3422 	if (vsi->type == ICE_VSI_PF) {
3423 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3424 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3425 		eth_hw_addr_set(netdev, mac_addr);
3426 		ether_addr_copy(netdev->perm_addr, mac_addr);
3427 	}
3428 
3429 	netdev->priv_flags |= IFF_UNICAST_FLT;
3430 
3431 	/* Setup netdev TC information */
3432 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3433 
3434 	/* setup watchdog timeout value to be 5 second */
3435 	netdev->watchdog_timeo = 5 * HZ;
3436 
3437 	netdev->min_mtu = ETH_MIN_MTU;
3438 	netdev->max_mtu = ICE_MAX_MTU;
3439 
3440 	return 0;
3441 }
3442 
3443 /**
3444  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3445  * @lut: Lookup table
3446  * @rss_table_size: Lookup table size
3447  * @rss_size: Range of queue number for hashing
3448  */
3449 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3450 {
3451 	u16 i;
3452 
3453 	for (i = 0; i < rss_table_size; i++)
3454 		lut[i] = i % rss_size;
3455 }
3456 
3457 /**
3458  * ice_pf_vsi_setup - Set up a PF VSI
3459  * @pf: board private structure
3460  * @pi: pointer to the port_info instance
3461  *
3462  * Returns pointer to the successfully allocated VSI software struct
3463  * on success, otherwise returns NULL on failure.
3464  */
3465 static struct ice_vsi *
3466 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3467 {
3468 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
3469 }
3470 
3471 static struct ice_vsi *
3472 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3473 		   struct ice_channel *ch)
3474 {
3475 	return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
3476 }
3477 
3478 /**
3479  * ice_ctrl_vsi_setup - Set up a control VSI
3480  * @pf: board private structure
3481  * @pi: pointer to the port_info instance
3482  *
3483  * Returns pointer to the successfully allocated VSI software struct
3484  * on success, otherwise returns NULL on failure.
3485  */
3486 static struct ice_vsi *
3487 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3488 {
3489 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
3490 }
3491 
3492 /**
3493  * ice_lb_vsi_setup - Set up a loopback VSI
3494  * @pf: board private structure
3495  * @pi: pointer to the port_info instance
3496  *
3497  * Returns pointer to the successfully allocated VSI software struct
3498  * on success, otherwise returns NULL on failure.
3499  */
3500 struct ice_vsi *
3501 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3502 {
3503 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
3504 }
3505 
3506 /**
3507  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3508  * @netdev: network interface to be adjusted
3509  * @proto: VLAN TPID
3510  * @vid: VLAN ID to be added
3511  *
3512  * net_device_ops implementation for adding VLAN IDs
3513  */
3514 static int
3515 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3516 {
3517 	struct ice_netdev_priv *np = netdev_priv(netdev);
3518 	struct ice_vsi_vlan_ops *vlan_ops;
3519 	struct ice_vsi *vsi = np->vsi;
3520 	struct ice_vlan vlan;
3521 	int ret;
3522 
3523 	/* VLAN 0 is added by default during load/reset */
3524 	if (!vid)
3525 		return 0;
3526 
3527 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3528 		usleep_range(1000, 2000);
3529 
3530 	/* Add multicast promisc rule for the VLAN ID to be added if
3531 	 * all-multicast is currently enabled.
3532 	 */
3533 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3534 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3535 					       ICE_MCAST_VLAN_PROMISC_BITS,
3536 					       vid);
3537 		if (ret)
3538 			goto finish;
3539 	}
3540 
3541 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3542 
3543 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3544 	 * packets aren't pruned by the device's internal switch on Rx
3545 	 */
3546 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3547 	ret = vlan_ops->add_vlan(vsi, &vlan);
3548 	if (ret)
3549 		goto finish;
3550 
3551 	/* If all-multicast is currently enabled and this VLAN ID is only one
3552 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3553 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3554 	 */
3555 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3556 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3557 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3558 					   ICE_MCAST_PROMISC_BITS, 0);
3559 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3560 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3561 	}
3562 
3563 finish:
3564 	clear_bit(ICE_CFG_BUSY, vsi->state);
3565 
3566 	return ret;
3567 }
3568 
3569 /**
3570  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3571  * @netdev: network interface to be adjusted
3572  * @proto: VLAN TPID
3573  * @vid: VLAN ID to be removed
3574  *
3575  * net_device_ops implementation for removing VLAN IDs
3576  */
3577 static int
3578 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3579 {
3580 	struct ice_netdev_priv *np = netdev_priv(netdev);
3581 	struct ice_vsi_vlan_ops *vlan_ops;
3582 	struct ice_vsi *vsi = np->vsi;
3583 	struct ice_vlan vlan;
3584 	int ret;
3585 
3586 	/* don't allow removal of VLAN 0 */
3587 	if (!vid)
3588 		return 0;
3589 
3590 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3591 		usleep_range(1000, 2000);
3592 
3593 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3594 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3595 	if (ret) {
3596 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3597 			   vsi->vsi_num);
3598 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3599 	}
3600 
3601 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3602 
3603 	/* Make sure VLAN delete is successful before updating VLAN
3604 	 * information
3605 	 */
3606 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3607 	ret = vlan_ops->del_vlan(vsi, &vlan);
3608 	if (ret)
3609 		goto finish;
3610 
3611 	/* Remove multicast promisc rule for the removed VLAN ID if
3612 	 * all-multicast is enabled.
3613 	 */
3614 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3615 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3616 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3617 
3618 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3619 		/* Update look-up type of multicast promisc rule for VLAN 0
3620 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3621 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3622 		 */
3623 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3624 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3625 						   ICE_MCAST_VLAN_PROMISC_BITS,
3626 						   0);
3627 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3628 						 ICE_MCAST_PROMISC_BITS, 0);
3629 		}
3630 	}
3631 
3632 finish:
3633 	clear_bit(ICE_CFG_BUSY, vsi->state);
3634 
3635 	return ret;
3636 }
3637 
3638 /**
3639  * ice_rep_indr_tc_block_unbind
3640  * @cb_priv: indirection block private data
3641  */
3642 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3643 {
3644 	struct ice_indr_block_priv *indr_priv = cb_priv;
3645 
3646 	list_del(&indr_priv->list);
3647 	kfree(indr_priv);
3648 }
3649 
3650 /**
3651  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3652  * @vsi: VSI struct which has the netdev
3653  */
3654 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3655 {
3656 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3657 
3658 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3659 				 ice_rep_indr_tc_block_unbind);
3660 }
3661 
3662 /**
3663  * ice_tc_indir_block_remove - clean indirect TC block notifications
3664  * @pf: PF structure
3665  */
3666 static void ice_tc_indir_block_remove(struct ice_pf *pf)
3667 {
3668 	struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3669 
3670 	if (!pf_vsi)
3671 		return;
3672 
3673 	ice_tc_indir_block_unregister(pf_vsi);
3674 }
3675 
3676 /**
3677  * ice_tc_indir_block_register - Register TC indirect block notifications
3678  * @vsi: VSI struct which has the netdev
3679  *
3680  * Returns 0 on success, negative value on failure
3681  */
3682 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3683 {
3684 	struct ice_netdev_priv *np;
3685 
3686 	if (!vsi || !vsi->netdev)
3687 		return -EINVAL;
3688 
3689 	np = netdev_priv(vsi->netdev);
3690 
3691 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3692 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3693 }
3694 
3695 /**
3696  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3697  * @pf: board private structure
3698  *
3699  * Returns 0 on success, negative value on failure
3700  */
3701 static int ice_setup_pf_sw(struct ice_pf *pf)
3702 {
3703 	struct device *dev = ice_pf_to_dev(pf);
3704 	bool dvm = ice_is_dvm_ena(&pf->hw);
3705 	struct ice_vsi *vsi;
3706 	int status;
3707 
3708 	if (ice_is_reset_in_progress(pf->state))
3709 		return -EBUSY;
3710 
3711 	status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
3712 	if (status)
3713 		return -EIO;
3714 
3715 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3716 	if (!vsi)
3717 		return -ENOMEM;
3718 
3719 	/* init channel list */
3720 	INIT_LIST_HEAD(&vsi->ch_list);
3721 
3722 	status = ice_cfg_netdev(vsi);
3723 	if (status)
3724 		goto unroll_vsi_setup;
3725 	/* netdev has to be configured before setting frame size */
3726 	ice_vsi_cfg_frame_size(vsi);
3727 
3728 	/* init indirect block notifications */
3729 	status = ice_tc_indir_block_register(vsi);
3730 	if (status) {
3731 		dev_err(dev, "Failed to register netdev notifier\n");
3732 		goto unroll_cfg_netdev;
3733 	}
3734 
3735 	/* Setup DCB netlink interface */
3736 	ice_dcbnl_setup(vsi);
3737 
3738 	/* registering the NAPI handler requires both the queues and
3739 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3740 	 * and ice_cfg_netdev() respectively
3741 	 */
3742 	ice_napi_add(vsi);
3743 
3744 	status = ice_init_mac_fltr(pf);
3745 	if (status)
3746 		goto unroll_napi_add;
3747 
3748 	return 0;
3749 
3750 unroll_napi_add:
3751 	ice_tc_indir_block_unregister(vsi);
3752 unroll_cfg_netdev:
3753 	if (vsi) {
3754 		ice_napi_del(vsi);
3755 		if (vsi->netdev) {
3756 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3757 			free_netdev(vsi->netdev);
3758 			vsi->netdev = NULL;
3759 		}
3760 	}
3761 
3762 unroll_vsi_setup:
3763 	ice_vsi_release(vsi);
3764 	return status;
3765 }
3766 
3767 /**
3768  * ice_get_avail_q_count - Get count of queues in use
3769  * @pf_qmap: bitmap to get queue use count from
3770  * @lock: pointer to a mutex that protects access to pf_qmap
3771  * @size: size of the bitmap
3772  */
3773 static u16
3774 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3775 {
3776 	unsigned long bit;
3777 	u16 count = 0;
3778 
3779 	mutex_lock(lock);
3780 	for_each_clear_bit(bit, pf_qmap, size)
3781 		count++;
3782 	mutex_unlock(lock);
3783 
3784 	return count;
3785 }
3786 
3787 /**
3788  * ice_get_avail_txq_count - Get count of Tx queues in use
3789  * @pf: pointer to an ice_pf instance
3790  */
3791 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3792 {
3793 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3794 				     pf->max_pf_txqs);
3795 }
3796 
3797 /**
3798  * ice_get_avail_rxq_count - Get count of Rx queues in use
3799  * @pf: pointer to an ice_pf instance
3800  */
3801 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3802 {
3803 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3804 				     pf->max_pf_rxqs);
3805 }
3806 
3807 /**
3808  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3809  * @pf: board private structure to initialize
3810  */
3811 static void ice_deinit_pf(struct ice_pf *pf)
3812 {
3813 	ice_service_task_stop(pf);
3814 	mutex_destroy(&pf->adev_mutex);
3815 	mutex_destroy(&pf->sw_mutex);
3816 	mutex_destroy(&pf->tc_mutex);
3817 	mutex_destroy(&pf->avail_q_mutex);
3818 	mutex_destroy(&pf->vfs.table_lock);
3819 
3820 	if (pf->avail_txqs) {
3821 		bitmap_free(pf->avail_txqs);
3822 		pf->avail_txqs = NULL;
3823 	}
3824 
3825 	if (pf->avail_rxqs) {
3826 		bitmap_free(pf->avail_rxqs);
3827 		pf->avail_rxqs = NULL;
3828 	}
3829 
3830 	if (pf->ptp.clock)
3831 		ptp_clock_unregister(pf->ptp.clock);
3832 }
3833 
3834 /**
3835  * ice_set_pf_caps - set PFs capability flags
3836  * @pf: pointer to the PF instance
3837  */
3838 static void ice_set_pf_caps(struct ice_pf *pf)
3839 {
3840 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3841 
3842 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3843 	if (func_caps->common_cap.rdma)
3844 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3845 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3846 	if (func_caps->common_cap.dcb)
3847 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3848 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3849 	if (func_caps->common_cap.sr_iov_1_1) {
3850 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3851 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3852 					      ICE_MAX_SRIOV_VFS);
3853 	}
3854 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3855 	if (func_caps->common_cap.rss_table_size)
3856 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3857 
3858 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3859 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3860 		u16 unused;
3861 
3862 		/* ctrl_vsi_idx will be set to a valid value when flow director
3863 		 * is setup by ice_init_fdir
3864 		 */
3865 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3866 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3867 		/* force guaranteed filter pool for PF */
3868 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3869 				       func_caps->fd_fltr_guar);
3870 		/* force shared filter pool for PF */
3871 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3872 				       func_caps->fd_fltr_best_effort);
3873 	}
3874 
3875 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3876 	if (func_caps->common_cap.ieee_1588)
3877 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3878 
3879 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3880 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3881 }
3882 
3883 /**
3884  * ice_init_pf - Initialize general software structures (struct ice_pf)
3885  * @pf: board private structure to initialize
3886  */
3887 static int ice_init_pf(struct ice_pf *pf)
3888 {
3889 	ice_set_pf_caps(pf);
3890 
3891 	mutex_init(&pf->sw_mutex);
3892 	mutex_init(&pf->tc_mutex);
3893 	mutex_init(&pf->adev_mutex);
3894 
3895 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3896 	spin_lock_init(&pf->aq_wait_lock);
3897 	init_waitqueue_head(&pf->aq_wait_queue);
3898 
3899 	init_waitqueue_head(&pf->reset_wait_queue);
3900 
3901 	/* setup service timer and periodic service task */
3902 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3903 	pf->serv_tmr_period = HZ;
3904 	INIT_WORK(&pf->serv_task, ice_service_task);
3905 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3906 
3907 	mutex_init(&pf->avail_q_mutex);
3908 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3909 	if (!pf->avail_txqs)
3910 		return -ENOMEM;
3911 
3912 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3913 	if (!pf->avail_rxqs) {
3914 		bitmap_free(pf->avail_txqs);
3915 		pf->avail_txqs = NULL;
3916 		return -ENOMEM;
3917 	}
3918 
3919 	mutex_init(&pf->vfs.table_lock);
3920 	hash_init(pf->vfs.table);
3921 
3922 	return 0;
3923 }
3924 
3925 /**
3926  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3927  * @pf: board private structure
3928  *
3929  * compute the number of MSIX vectors required (v_budget) and request from
3930  * the OS. Return the number of vectors reserved or negative on failure
3931  */
3932 static int ice_ena_msix_range(struct ice_pf *pf)
3933 {
3934 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3935 	struct device *dev = ice_pf_to_dev(pf);
3936 	int needed, err, i;
3937 
3938 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3939 	num_cpus = num_online_cpus();
3940 
3941 	/* reserve for LAN miscellaneous handler */
3942 	needed = ICE_MIN_LAN_OICR_MSIX;
3943 	if (v_left < needed)
3944 		goto no_hw_vecs_left_err;
3945 	v_budget += needed;
3946 	v_left -= needed;
3947 
3948 	/* reserve for flow director */
3949 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3950 		needed = ICE_FDIR_MSIX;
3951 		if (v_left < needed)
3952 			goto no_hw_vecs_left_err;
3953 		v_budget += needed;
3954 		v_left -= needed;
3955 	}
3956 
3957 	/* reserve for switchdev */
3958 	needed = ICE_ESWITCH_MSIX;
3959 	if (v_left < needed)
3960 		goto no_hw_vecs_left_err;
3961 	v_budget += needed;
3962 	v_left -= needed;
3963 
3964 	/* total used for non-traffic vectors */
3965 	v_other = v_budget;
3966 
3967 	/* reserve vectors for LAN traffic */
3968 	needed = num_cpus;
3969 	if (v_left < needed)
3970 		goto no_hw_vecs_left_err;
3971 	pf->num_lan_msix = needed;
3972 	v_budget += needed;
3973 	v_left -= needed;
3974 
3975 	/* reserve vectors for RDMA auxiliary driver */
3976 	if (ice_is_rdma_ena(pf)) {
3977 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3978 		if (v_left < needed)
3979 			goto no_hw_vecs_left_err;
3980 		pf->num_rdma_msix = needed;
3981 		v_budget += needed;
3982 		v_left -= needed;
3983 	}
3984 
3985 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3986 					sizeof(*pf->msix_entries), GFP_KERNEL);
3987 	if (!pf->msix_entries) {
3988 		err = -ENOMEM;
3989 		goto exit_err;
3990 	}
3991 
3992 	for (i = 0; i < v_budget; i++)
3993 		pf->msix_entries[i].entry = i;
3994 
3995 	/* actually reserve the vectors */
3996 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3997 					 ICE_MIN_MSIX, v_budget);
3998 	if (v_actual < 0) {
3999 		dev_err(dev, "unable to reserve MSI-X vectors\n");
4000 		err = v_actual;
4001 		goto msix_err;
4002 	}
4003 
4004 	if (v_actual < v_budget) {
4005 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
4006 			 v_budget, v_actual);
4007 
4008 		if (v_actual < ICE_MIN_MSIX) {
4009 			/* error if we can't get minimum vectors */
4010 			pci_disable_msix(pf->pdev);
4011 			err = -ERANGE;
4012 			goto msix_err;
4013 		} else {
4014 			int v_remain = v_actual - v_other;
4015 			int v_rdma = 0, v_min_rdma = 0;
4016 
4017 			if (ice_is_rdma_ena(pf)) {
4018 				/* Need at least 1 interrupt in addition to
4019 				 * AEQ MSIX
4020 				 */
4021 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
4022 				v_min_rdma = ICE_MIN_RDMA_MSIX;
4023 			}
4024 
4025 			if (v_actual == ICE_MIN_MSIX ||
4026 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
4027 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
4028 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4029 
4030 				pf->num_rdma_msix = 0;
4031 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
4032 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
4033 				   (v_remain - v_rdma < v_rdma)) {
4034 				/* Support minimum RDMA and give remaining
4035 				 * vectors to LAN MSIX
4036 				 */
4037 				pf->num_rdma_msix = v_min_rdma;
4038 				pf->num_lan_msix = v_remain - v_min_rdma;
4039 			} else {
4040 				/* Split remaining MSIX with RDMA after
4041 				 * accounting for AEQ MSIX
4042 				 */
4043 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
4044 						    ICE_RDMA_NUM_AEQ_MSIX;
4045 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
4046 			}
4047 
4048 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4049 				   pf->num_lan_msix);
4050 
4051 			if (ice_is_rdma_ena(pf))
4052 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4053 					   pf->num_rdma_msix);
4054 		}
4055 	}
4056 
4057 	return v_actual;
4058 
4059 msix_err:
4060 	devm_kfree(dev, pf->msix_entries);
4061 	goto exit_err;
4062 
4063 no_hw_vecs_left_err:
4064 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
4065 		needed, v_left);
4066 	err = -ERANGE;
4067 exit_err:
4068 	pf->num_rdma_msix = 0;
4069 	pf->num_lan_msix = 0;
4070 	return err;
4071 }
4072 
4073 /**
4074  * ice_dis_msix - Disable MSI-X interrupt setup in OS
4075  * @pf: board private structure
4076  */
4077 static void ice_dis_msix(struct ice_pf *pf)
4078 {
4079 	pci_disable_msix(pf->pdev);
4080 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
4081 	pf->msix_entries = NULL;
4082 }
4083 
4084 /**
4085  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4086  * @pf: board private structure
4087  */
4088 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4089 {
4090 	ice_dis_msix(pf);
4091 
4092 	if (pf->irq_tracker) {
4093 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
4094 		pf->irq_tracker = NULL;
4095 	}
4096 }
4097 
4098 /**
4099  * ice_init_interrupt_scheme - Determine proper interrupt scheme
4100  * @pf: board private structure to initialize
4101  */
4102 static int ice_init_interrupt_scheme(struct ice_pf *pf)
4103 {
4104 	int vectors;
4105 
4106 	vectors = ice_ena_msix_range(pf);
4107 
4108 	if (vectors < 0)
4109 		return vectors;
4110 
4111 	/* set up vector assignment tracking */
4112 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4113 				       struct_size(pf->irq_tracker, list, vectors),
4114 				       GFP_KERNEL);
4115 	if (!pf->irq_tracker) {
4116 		ice_dis_msix(pf);
4117 		return -ENOMEM;
4118 	}
4119 
4120 	/* populate SW interrupts pool with number of OS granted IRQs. */
4121 	pf->num_avail_sw_msix = (u16)vectors;
4122 	pf->irq_tracker->num_entries = (u16)vectors;
4123 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
4124 
4125 	return 0;
4126 }
4127 
4128 /**
4129  * ice_is_wol_supported - check if WoL is supported
4130  * @hw: pointer to hardware info
4131  *
4132  * Check if WoL is supported based on the HW configuration.
4133  * Returns true if NVM supports and enables WoL for this port, false otherwise
4134  */
4135 bool ice_is_wol_supported(struct ice_hw *hw)
4136 {
4137 	u16 wol_ctrl;
4138 
4139 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4140 	 * word) indicates WoL is not supported on the corresponding PF ID.
4141 	 */
4142 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4143 		return false;
4144 
4145 	return !(BIT(hw->port_info->lport) & wol_ctrl);
4146 }
4147 
4148 /**
4149  * ice_vsi_recfg_qs - Change the number of queues on a VSI
4150  * @vsi: VSI being changed
4151  * @new_rx: new number of Rx queues
4152  * @new_tx: new number of Tx queues
4153  *
4154  * Only change the number of queues if new_tx, or new_rx is non-0.
4155  *
4156  * Returns 0 on success.
4157  */
4158 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
4159 {
4160 	struct ice_pf *pf = vsi->back;
4161 	int err = 0, timeout = 50;
4162 
4163 	if (!new_rx && !new_tx)
4164 		return -EINVAL;
4165 
4166 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4167 		timeout--;
4168 		if (!timeout)
4169 			return -EBUSY;
4170 		usleep_range(1000, 2000);
4171 	}
4172 
4173 	if (new_tx)
4174 		vsi->req_txq = (u16)new_tx;
4175 	if (new_rx)
4176 		vsi->req_rxq = (u16)new_rx;
4177 
4178 	/* set for the next time the netdev is started */
4179 	if (!netif_running(vsi->netdev)) {
4180 		ice_vsi_rebuild(vsi, false);
4181 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4182 		goto done;
4183 	}
4184 
4185 	ice_vsi_close(vsi);
4186 	ice_vsi_rebuild(vsi, false);
4187 	ice_pf_dcb_recfg(pf);
4188 	ice_vsi_open(vsi);
4189 done:
4190 	clear_bit(ICE_CFG_BUSY, pf->state);
4191 	return err;
4192 }
4193 
4194 /**
4195  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4196  * @pf: PF to configure
4197  *
4198  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4199  * VSI can still Tx/Rx VLAN tagged packets.
4200  */
4201 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4202 {
4203 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4204 	struct ice_vsi_ctx *ctxt;
4205 	struct ice_hw *hw;
4206 	int status;
4207 
4208 	if (!vsi)
4209 		return;
4210 
4211 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4212 	if (!ctxt)
4213 		return;
4214 
4215 	hw = &pf->hw;
4216 	ctxt->info = vsi->info;
4217 
4218 	ctxt->info.valid_sections =
4219 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4220 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4221 			    ICE_AQ_VSI_PROP_SW_VALID);
4222 
4223 	/* disable VLAN anti-spoof */
4224 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4225 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4226 
4227 	/* disable VLAN pruning and keep all other settings */
4228 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4229 
4230 	/* allow all VLANs on Tx and don't strip on Rx */
4231 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4232 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4233 
4234 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4235 	if (status) {
4236 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4237 			status, ice_aq_str(hw->adminq.sq_last_status));
4238 	} else {
4239 		vsi->info.sec_flags = ctxt->info.sec_flags;
4240 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4241 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4242 	}
4243 
4244 	kfree(ctxt);
4245 }
4246 
4247 /**
4248  * ice_log_pkg_init - log result of DDP package load
4249  * @hw: pointer to hardware info
4250  * @state: state of package load
4251  */
4252 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4253 {
4254 	struct ice_pf *pf = hw->back;
4255 	struct device *dev;
4256 
4257 	dev = ice_pf_to_dev(pf);
4258 
4259 	switch (state) {
4260 	case ICE_DDP_PKG_SUCCESS:
4261 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4262 			 hw->active_pkg_name,
4263 			 hw->active_pkg_ver.major,
4264 			 hw->active_pkg_ver.minor,
4265 			 hw->active_pkg_ver.update,
4266 			 hw->active_pkg_ver.draft);
4267 		break;
4268 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4269 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4270 			 hw->active_pkg_name,
4271 			 hw->active_pkg_ver.major,
4272 			 hw->active_pkg_ver.minor,
4273 			 hw->active_pkg_ver.update,
4274 			 hw->active_pkg_ver.draft);
4275 		break;
4276 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4277 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4278 			hw->active_pkg_name,
4279 			hw->active_pkg_ver.major,
4280 			hw->active_pkg_ver.minor,
4281 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4282 		break;
4283 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4284 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4285 			 hw->active_pkg_name,
4286 			 hw->active_pkg_ver.major,
4287 			 hw->active_pkg_ver.minor,
4288 			 hw->active_pkg_ver.update,
4289 			 hw->active_pkg_ver.draft,
4290 			 hw->pkg_name,
4291 			 hw->pkg_ver.major,
4292 			 hw->pkg_ver.minor,
4293 			 hw->pkg_ver.update,
4294 			 hw->pkg_ver.draft);
4295 		break;
4296 	case ICE_DDP_PKG_FW_MISMATCH:
4297 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4298 		break;
4299 	case ICE_DDP_PKG_INVALID_FILE:
4300 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4301 		break;
4302 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4303 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4304 		break;
4305 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4306 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4307 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4308 		break;
4309 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4310 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4311 		break;
4312 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4313 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4314 		break;
4315 	case ICE_DDP_PKG_LOAD_ERROR:
4316 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4317 		/* poll for reset to complete */
4318 		if (ice_check_reset(hw))
4319 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4320 		break;
4321 	case ICE_DDP_PKG_ERR:
4322 	default:
4323 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4324 		break;
4325 	}
4326 }
4327 
4328 /**
4329  * ice_load_pkg - load/reload the DDP Package file
4330  * @firmware: firmware structure when firmware requested or NULL for reload
4331  * @pf: pointer to the PF instance
4332  *
4333  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4334  * initialize HW tables.
4335  */
4336 static void
4337 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4338 {
4339 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4340 	struct device *dev = ice_pf_to_dev(pf);
4341 	struct ice_hw *hw = &pf->hw;
4342 
4343 	/* Load DDP Package */
4344 	if (firmware && !hw->pkg_copy) {
4345 		state = ice_copy_and_init_pkg(hw, firmware->data,
4346 					      firmware->size);
4347 		ice_log_pkg_init(hw, state);
4348 	} else if (!firmware && hw->pkg_copy) {
4349 		/* Reload package during rebuild after CORER/GLOBR reset */
4350 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4351 		ice_log_pkg_init(hw, state);
4352 	} else {
4353 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4354 	}
4355 
4356 	if (!ice_is_init_pkg_successful(state)) {
4357 		/* Safe Mode */
4358 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4359 		return;
4360 	}
4361 
4362 	/* Successful download package is the precondition for advanced
4363 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4364 	 */
4365 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4366 }
4367 
4368 /**
4369  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4370  * @pf: pointer to the PF structure
4371  *
4372  * There is no error returned here because the driver should be able to handle
4373  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4374  * specifically with Tx.
4375  */
4376 static void ice_verify_cacheline_size(struct ice_pf *pf)
4377 {
4378 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4379 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4380 			 ICE_CACHE_LINE_BYTES);
4381 }
4382 
4383 /**
4384  * ice_send_version - update firmware with driver version
4385  * @pf: PF struct
4386  *
4387  * Returns 0 on success, else error code
4388  */
4389 static int ice_send_version(struct ice_pf *pf)
4390 {
4391 	struct ice_driver_ver dv;
4392 
4393 	dv.major_ver = 0xff;
4394 	dv.minor_ver = 0xff;
4395 	dv.build_ver = 0xff;
4396 	dv.subbuild_ver = 0;
4397 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4398 		sizeof(dv.driver_string));
4399 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4400 }
4401 
4402 /**
4403  * ice_init_fdir - Initialize flow director VSI and configuration
4404  * @pf: pointer to the PF instance
4405  *
4406  * returns 0 on success, negative on error
4407  */
4408 static int ice_init_fdir(struct ice_pf *pf)
4409 {
4410 	struct device *dev = ice_pf_to_dev(pf);
4411 	struct ice_vsi *ctrl_vsi;
4412 	int err;
4413 
4414 	/* Side Band Flow Director needs to have a control VSI.
4415 	 * Allocate it and store it in the PF.
4416 	 */
4417 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4418 	if (!ctrl_vsi) {
4419 		dev_dbg(dev, "could not create control VSI\n");
4420 		return -ENOMEM;
4421 	}
4422 
4423 	err = ice_vsi_open_ctrl(ctrl_vsi);
4424 	if (err) {
4425 		dev_dbg(dev, "could not open control VSI\n");
4426 		goto err_vsi_open;
4427 	}
4428 
4429 	mutex_init(&pf->hw.fdir_fltr_lock);
4430 
4431 	err = ice_fdir_create_dflt_rules(pf);
4432 	if (err)
4433 		goto err_fdir_rule;
4434 
4435 	return 0;
4436 
4437 err_fdir_rule:
4438 	ice_fdir_release_flows(&pf->hw);
4439 	ice_vsi_close(ctrl_vsi);
4440 err_vsi_open:
4441 	ice_vsi_release(ctrl_vsi);
4442 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4443 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4444 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4445 	}
4446 	return err;
4447 }
4448 
4449 /**
4450  * ice_get_opt_fw_name - return optional firmware file name or NULL
4451  * @pf: pointer to the PF instance
4452  */
4453 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4454 {
4455 	/* Optional firmware name same as default with additional dash
4456 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4457 	 */
4458 	struct pci_dev *pdev = pf->pdev;
4459 	char *opt_fw_filename;
4460 	u64 dsn;
4461 
4462 	/* Determine the name of the optional file using the DSN (two
4463 	 * dwords following the start of the DSN Capability).
4464 	 */
4465 	dsn = pci_get_dsn(pdev);
4466 	if (!dsn)
4467 		return NULL;
4468 
4469 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4470 	if (!opt_fw_filename)
4471 		return NULL;
4472 
4473 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4474 		 ICE_DDP_PKG_PATH, dsn);
4475 
4476 	return opt_fw_filename;
4477 }
4478 
4479 /**
4480  * ice_request_fw - Device initialization routine
4481  * @pf: pointer to the PF instance
4482  */
4483 static void ice_request_fw(struct ice_pf *pf)
4484 {
4485 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4486 	const struct firmware *firmware = NULL;
4487 	struct device *dev = ice_pf_to_dev(pf);
4488 	int err = 0;
4489 
4490 	/* optional device-specific DDP (if present) overrides the default DDP
4491 	 * package file. kernel logs a debug message if the file doesn't exist,
4492 	 * and warning messages for other errors.
4493 	 */
4494 	if (opt_fw_filename) {
4495 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4496 		if (err) {
4497 			kfree(opt_fw_filename);
4498 			goto dflt_pkg_load;
4499 		}
4500 
4501 		/* request for firmware was successful. Download to device */
4502 		ice_load_pkg(firmware, pf);
4503 		kfree(opt_fw_filename);
4504 		release_firmware(firmware);
4505 		return;
4506 	}
4507 
4508 dflt_pkg_load:
4509 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4510 	if (err) {
4511 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4512 		return;
4513 	}
4514 
4515 	/* request for firmware was successful. Download to device */
4516 	ice_load_pkg(firmware, pf);
4517 	release_firmware(firmware);
4518 }
4519 
4520 /**
4521  * ice_print_wake_reason - show the wake up cause in the log
4522  * @pf: pointer to the PF struct
4523  */
4524 static void ice_print_wake_reason(struct ice_pf *pf)
4525 {
4526 	u32 wus = pf->wakeup_reason;
4527 	const char *wake_str;
4528 
4529 	/* if no wake event, nothing to print */
4530 	if (!wus)
4531 		return;
4532 
4533 	if (wus & PFPM_WUS_LNKC_M)
4534 		wake_str = "Link\n";
4535 	else if (wus & PFPM_WUS_MAG_M)
4536 		wake_str = "Magic Packet\n";
4537 	else if (wus & PFPM_WUS_MNG_M)
4538 		wake_str = "Management\n";
4539 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4540 		wake_str = "Firmware Reset\n";
4541 	else
4542 		wake_str = "Unknown\n";
4543 
4544 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4545 }
4546 
4547 /**
4548  * ice_register_netdev - register netdev and devlink port
4549  * @pf: pointer to the PF struct
4550  */
4551 static int ice_register_netdev(struct ice_pf *pf)
4552 {
4553 	struct ice_vsi *vsi;
4554 	int err = 0;
4555 
4556 	vsi = ice_get_main_vsi(pf);
4557 	if (!vsi || !vsi->netdev)
4558 		return -EIO;
4559 
4560 	err = register_netdev(vsi->netdev);
4561 	if (err)
4562 		goto err_register_netdev;
4563 
4564 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4565 	netif_carrier_off(vsi->netdev);
4566 	netif_tx_stop_all_queues(vsi->netdev);
4567 	err = ice_devlink_create_pf_port(pf);
4568 	if (err)
4569 		goto err_devlink_create;
4570 
4571 	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4572 
4573 	return 0;
4574 err_devlink_create:
4575 	unregister_netdev(vsi->netdev);
4576 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4577 err_register_netdev:
4578 	free_netdev(vsi->netdev);
4579 	vsi->netdev = NULL;
4580 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4581 	return err;
4582 }
4583 
4584 /**
4585  * ice_probe - Device initialization routine
4586  * @pdev: PCI device information struct
4587  * @ent: entry in ice_pci_tbl
4588  *
4589  * Returns 0 on success, negative on failure
4590  */
4591 static int
4592 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4593 {
4594 	struct device *dev = &pdev->dev;
4595 	struct ice_pf *pf;
4596 	struct ice_hw *hw;
4597 	int i, err;
4598 
4599 	if (pdev->is_virtfn) {
4600 		dev_err(dev, "can't probe a virtual function\n");
4601 		return -EINVAL;
4602 	}
4603 
4604 	/* this driver uses devres, see
4605 	 * Documentation/driver-api/driver-model/devres.rst
4606 	 */
4607 	err = pcim_enable_device(pdev);
4608 	if (err)
4609 		return err;
4610 
4611 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4612 	if (err) {
4613 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4614 		return err;
4615 	}
4616 
4617 	pf = ice_allocate_pf(dev);
4618 	if (!pf)
4619 		return -ENOMEM;
4620 
4621 	/* initialize Auxiliary index to invalid value */
4622 	pf->aux_idx = -1;
4623 
4624 	/* set up for high or low DMA */
4625 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4626 	if (err) {
4627 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4628 		return err;
4629 	}
4630 
4631 	pci_enable_pcie_error_reporting(pdev);
4632 	pci_set_master(pdev);
4633 
4634 	pf->pdev = pdev;
4635 	pci_set_drvdata(pdev, pf);
4636 	set_bit(ICE_DOWN, pf->state);
4637 	/* Disable service task until DOWN bit is cleared */
4638 	set_bit(ICE_SERVICE_DIS, pf->state);
4639 
4640 	hw = &pf->hw;
4641 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4642 	pci_save_state(pdev);
4643 
4644 	hw->back = pf;
4645 	hw->vendor_id = pdev->vendor;
4646 	hw->device_id = pdev->device;
4647 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4648 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4649 	hw->subsystem_device_id = pdev->subsystem_device;
4650 	hw->bus.device = PCI_SLOT(pdev->devfn);
4651 	hw->bus.func = PCI_FUNC(pdev->devfn);
4652 	ice_set_ctrlq_len(hw);
4653 
4654 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4655 
4656 #ifndef CONFIG_DYNAMIC_DEBUG
4657 	if (debug < -1)
4658 		hw->debug_mask = debug;
4659 #endif
4660 
4661 	err = ice_init_hw(hw);
4662 	if (err) {
4663 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4664 		err = -EIO;
4665 		goto err_exit_unroll;
4666 	}
4667 
4668 	ice_init_feature_support(pf);
4669 
4670 	ice_request_fw(pf);
4671 
4672 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4673 	 * set in pf->state, which will cause ice_is_safe_mode to return
4674 	 * true
4675 	 */
4676 	if (ice_is_safe_mode(pf)) {
4677 		/* we already got function/device capabilities but these don't
4678 		 * reflect what the driver needs to do in safe mode. Instead of
4679 		 * adding conditional logic everywhere to ignore these
4680 		 * device/function capabilities, override them.
4681 		 */
4682 		ice_set_safe_mode_caps(hw);
4683 	}
4684 
4685 	hw->ucast_shared = true;
4686 
4687 	err = ice_init_pf(pf);
4688 	if (err) {
4689 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4690 		goto err_init_pf_unroll;
4691 	}
4692 
4693 	ice_devlink_init_regions(pf);
4694 
4695 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4696 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4697 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4698 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4699 	i = 0;
4700 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4701 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4702 			pf->hw.tnl.valid_count[TNL_VXLAN];
4703 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4704 			UDP_TUNNEL_TYPE_VXLAN;
4705 		i++;
4706 	}
4707 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4708 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4709 			pf->hw.tnl.valid_count[TNL_GENEVE];
4710 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4711 			UDP_TUNNEL_TYPE_GENEVE;
4712 		i++;
4713 	}
4714 
4715 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4716 	if (!pf->num_alloc_vsi) {
4717 		err = -EIO;
4718 		goto err_init_pf_unroll;
4719 	}
4720 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4721 		dev_warn(&pf->pdev->dev,
4722 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4723 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4724 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4725 	}
4726 
4727 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4728 			       GFP_KERNEL);
4729 	if (!pf->vsi) {
4730 		err = -ENOMEM;
4731 		goto err_init_pf_unroll;
4732 	}
4733 
4734 	err = ice_init_interrupt_scheme(pf);
4735 	if (err) {
4736 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4737 		err = -EIO;
4738 		goto err_init_vsi_unroll;
4739 	}
4740 
4741 	/* In case of MSIX we are going to setup the misc vector right here
4742 	 * to handle admin queue events etc. In case of legacy and MSI
4743 	 * the misc functionality and queue processing is combined in
4744 	 * the same vector and that gets setup at open.
4745 	 */
4746 	err = ice_req_irq_msix_misc(pf);
4747 	if (err) {
4748 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4749 		goto err_init_interrupt_unroll;
4750 	}
4751 
4752 	/* create switch struct for the switch element created by FW on boot */
4753 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4754 	if (!pf->first_sw) {
4755 		err = -ENOMEM;
4756 		goto err_msix_misc_unroll;
4757 	}
4758 
4759 	if (hw->evb_veb)
4760 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4761 	else
4762 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4763 
4764 	pf->first_sw->pf = pf;
4765 
4766 	/* record the sw_id available for later use */
4767 	pf->first_sw->sw_id = hw->port_info->sw_id;
4768 
4769 	err = ice_setup_pf_sw(pf);
4770 	if (err) {
4771 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4772 		goto err_alloc_sw_unroll;
4773 	}
4774 
4775 	clear_bit(ICE_SERVICE_DIS, pf->state);
4776 
4777 	/* tell the firmware we are up */
4778 	err = ice_send_version(pf);
4779 	if (err) {
4780 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4781 			UTS_RELEASE, err);
4782 		goto err_send_version_unroll;
4783 	}
4784 
4785 	/* since everything is good, start the service timer */
4786 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4787 
4788 	err = ice_init_link_events(pf->hw.port_info);
4789 	if (err) {
4790 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4791 		goto err_send_version_unroll;
4792 	}
4793 
4794 	/* not a fatal error if this fails */
4795 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4796 	if (err)
4797 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4798 
4799 	/* not a fatal error if this fails */
4800 	err = ice_update_link_info(pf->hw.port_info);
4801 	if (err)
4802 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4803 
4804 	ice_init_link_dflt_override(pf->hw.port_info);
4805 
4806 	ice_check_link_cfg_err(pf,
4807 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4808 
4809 	/* if media available, initialize PHY settings */
4810 	if (pf->hw.port_info->phy.link_info.link_info &
4811 	    ICE_AQ_MEDIA_AVAILABLE) {
4812 		/* not a fatal error if this fails */
4813 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4814 		if (err)
4815 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4816 
4817 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4818 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4819 
4820 			if (vsi)
4821 				ice_configure_phy(vsi);
4822 		}
4823 	} else {
4824 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4825 	}
4826 
4827 	ice_verify_cacheline_size(pf);
4828 
4829 	/* Save wakeup reason register for later use */
4830 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4831 
4832 	/* check for a power management event */
4833 	ice_print_wake_reason(pf);
4834 
4835 	/* clear wake status, all bits */
4836 	wr32(hw, PFPM_WUS, U32_MAX);
4837 
4838 	/* Disable WoL at init, wait for user to enable */
4839 	device_set_wakeup_enable(dev, false);
4840 
4841 	if (ice_is_safe_mode(pf)) {
4842 		ice_set_safe_mode_vlan_cfg(pf);
4843 		goto probe_done;
4844 	}
4845 
4846 	/* initialize DDP driven features */
4847 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4848 		ice_ptp_init(pf);
4849 
4850 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4851 		ice_gnss_init(pf);
4852 
4853 	/* Note: Flow director init failure is non-fatal to load */
4854 	if (ice_init_fdir(pf))
4855 		dev_err(dev, "could not initialize flow director\n");
4856 
4857 	/* Note: DCB init failure is non-fatal to load */
4858 	if (ice_init_pf_dcb(pf, false)) {
4859 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4860 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4861 	} else {
4862 		ice_cfg_lldp_mib_change(&pf->hw, true);
4863 	}
4864 
4865 	if (ice_init_lag(pf))
4866 		dev_warn(dev, "Failed to init link aggregation support\n");
4867 
4868 	/* print PCI link speed and width */
4869 	pcie_print_link_status(pf->pdev);
4870 
4871 probe_done:
4872 	err = ice_register_netdev(pf);
4873 	if (err)
4874 		goto err_netdev_reg;
4875 
4876 	err = ice_devlink_register_params(pf);
4877 	if (err)
4878 		goto err_netdev_reg;
4879 
4880 	/* ready to go, so clear down state bit */
4881 	clear_bit(ICE_DOWN, pf->state);
4882 	if (ice_is_rdma_ena(pf)) {
4883 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4884 		if (pf->aux_idx < 0) {
4885 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4886 			err = -ENOMEM;
4887 			goto err_devlink_reg_param;
4888 		}
4889 
4890 		err = ice_init_rdma(pf);
4891 		if (err) {
4892 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4893 			err = -EIO;
4894 			goto err_init_aux_unroll;
4895 		}
4896 	} else {
4897 		dev_warn(dev, "RDMA is not supported on this device\n");
4898 	}
4899 
4900 	ice_devlink_register(pf);
4901 	return 0;
4902 
4903 err_init_aux_unroll:
4904 	pf->adev = NULL;
4905 	ida_free(&ice_aux_ida, pf->aux_idx);
4906 err_devlink_reg_param:
4907 	ice_devlink_unregister_params(pf);
4908 err_netdev_reg:
4909 err_send_version_unroll:
4910 	ice_vsi_release_all(pf);
4911 err_alloc_sw_unroll:
4912 	set_bit(ICE_SERVICE_DIS, pf->state);
4913 	set_bit(ICE_DOWN, pf->state);
4914 	devm_kfree(dev, pf->first_sw);
4915 err_msix_misc_unroll:
4916 	ice_free_irq_msix_misc(pf);
4917 err_init_interrupt_unroll:
4918 	ice_clear_interrupt_scheme(pf);
4919 err_init_vsi_unroll:
4920 	devm_kfree(dev, pf->vsi);
4921 err_init_pf_unroll:
4922 	ice_deinit_pf(pf);
4923 	ice_devlink_destroy_regions(pf);
4924 	ice_deinit_hw(hw);
4925 err_exit_unroll:
4926 	pci_disable_pcie_error_reporting(pdev);
4927 	pci_disable_device(pdev);
4928 	return err;
4929 }
4930 
4931 /**
4932  * ice_set_wake - enable or disable Wake on LAN
4933  * @pf: pointer to the PF struct
4934  *
4935  * Simple helper for WoL control
4936  */
4937 static void ice_set_wake(struct ice_pf *pf)
4938 {
4939 	struct ice_hw *hw = &pf->hw;
4940 	bool wol = pf->wol_ena;
4941 
4942 	/* clear wake state, otherwise new wake events won't fire */
4943 	wr32(hw, PFPM_WUS, U32_MAX);
4944 
4945 	/* enable / disable APM wake up, no RMW needed */
4946 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4947 
4948 	/* set magic packet filter enabled */
4949 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4950 }
4951 
4952 /**
4953  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4954  * @pf: pointer to the PF struct
4955  *
4956  * Issue firmware command to enable multicast magic wake, making
4957  * sure that any locally administered address (LAA) is used for
4958  * wake, and that PF reset doesn't undo the LAA.
4959  */
4960 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4961 {
4962 	struct device *dev = ice_pf_to_dev(pf);
4963 	struct ice_hw *hw = &pf->hw;
4964 	u8 mac_addr[ETH_ALEN];
4965 	struct ice_vsi *vsi;
4966 	int status;
4967 	u8 flags;
4968 
4969 	if (!pf->wol_ena)
4970 		return;
4971 
4972 	vsi = ice_get_main_vsi(pf);
4973 	if (!vsi)
4974 		return;
4975 
4976 	/* Get current MAC address in case it's an LAA */
4977 	if (vsi->netdev)
4978 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4979 	else
4980 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4981 
4982 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4983 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4984 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4985 
4986 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4987 	if (status)
4988 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
4989 			status, ice_aq_str(hw->adminq.sq_last_status));
4990 }
4991 
4992 /**
4993  * ice_remove - Device removal routine
4994  * @pdev: PCI device information struct
4995  */
4996 static void ice_remove(struct pci_dev *pdev)
4997 {
4998 	struct ice_pf *pf = pci_get_drvdata(pdev);
4999 	int i;
5000 
5001 	ice_devlink_unregister(pf);
5002 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5003 		if (!ice_is_reset_in_progress(pf->state))
5004 			break;
5005 		msleep(100);
5006 	}
5007 
5008 	ice_tc_indir_block_remove(pf);
5009 
5010 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5011 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5012 		ice_free_vfs(pf);
5013 	}
5014 
5015 	ice_service_task_stop(pf);
5016 
5017 	ice_aq_cancel_waiting_tasks(pf);
5018 	ice_unplug_aux_dev(pf);
5019 	if (pf->aux_idx >= 0)
5020 		ida_free(&ice_aux_ida, pf->aux_idx);
5021 	ice_devlink_unregister_params(pf);
5022 	set_bit(ICE_DOWN, pf->state);
5023 
5024 	ice_deinit_lag(pf);
5025 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
5026 		ice_ptp_release(pf);
5027 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
5028 		ice_gnss_exit(pf);
5029 	if (!ice_is_safe_mode(pf))
5030 		ice_remove_arfs(pf);
5031 	ice_setup_mc_magic_wake(pf);
5032 	ice_vsi_release_all(pf);
5033 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
5034 	ice_set_wake(pf);
5035 	ice_free_irq_msix_misc(pf);
5036 	ice_for_each_vsi(pf, i) {
5037 		if (!pf->vsi[i])
5038 			continue;
5039 		ice_vsi_free_q_vectors(pf->vsi[i]);
5040 	}
5041 	ice_deinit_pf(pf);
5042 	ice_devlink_destroy_regions(pf);
5043 	ice_deinit_hw(&pf->hw);
5044 
5045 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
5046 	 * do it via ice_schedule_reset() since there is no need to rebuild
5047 	 * and the service task is already stopped.
5048 	 */
5049 	ice_reset(&pf->hw, ICE_RESET_PFR);
5050 	pci_wait_for_pending_transaction(pdev);
5051 	ice_clear_interrupt_scheme(pf);
5052 	pci_disable_pcie_error_reporting(pdev);
5053 	pci_disable_device(pdev);
5054 }
5055 
5056 /**
5057  * ice_shutdown - PCI callback for shutting down device
5058  * @pdev: PCI device information struct
5059  */
5060 static void ice_shutdown(struct pci_dev *pdev)
5061 {
5062 	struct ice_pf *pf = pci_get_drvdata(pdev);
5063 
5064 	ice_remove(pdev);
5065 
5066 	if (system_state == SYSTEM_POWER_OFF) {
5067 		pci_wake_from_d3(pdev, pf->wol_ena);
5068 		pci_set_power_state(pdev, PCI_D3hot);
5069 	}
5070 }
5071 
5072 #ifdef CONFIG_PM
5073 /**
5074  * ice_prepare_for_shutdown - prep for PCI shutdown
5075  * @pf: board private structure
5076  *
5077  * Inform or close all dependent features in prep for PCI device shutdown
5078  */
5079 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5080 {
5081 	struct ice_hw *hw = &pf->hw;
5082 	u32 v;
5083 
5084 	/* Notify VFs of impending reset */
5085 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5086 		ice_vc_notify_reset(pf);
5087 
5088 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5089 
5090 	/* disable the VSIs and their queues that are not already DOWN */
5091 	ice_pf_dis_all_vsi(pf, false);
5092 
5093 	ice_for_each_vsi(pf, v)
5094 		if (pf->vsi[v])
5095 			pf->vsi[v]->vsi_num = 0;
5096 
5097 	ice_shutdown_all_ctrlq(hw);
5098 }
5099 
5100 /**
5101  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5102  * @pf: board private structure to reinitialize
5103  *
5104  * This routine reinitialize interrupt scheme that was cleared during
5105  * power management suspend callback.
5106  *
5107  * This should be called during resume routine to re-allocate the q_vectors
5108  * and reacquire interrupts.
5109  */
5110 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5111 {
5112 	struct device *dev = ice_pf_to_dev(pf);
5113 	int ret, v;
5114 
5115 	/* Since we clear MSIX flag during suspend, we need to
5116 	 * set it back during resume...
5117 	 */
5118 
5119 	ret = ice_init_interrupt_scheme(pf);
5120 	if (ret) {
5121 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5122 		return ret;
5123 	}
5124 
5125 	/* Remap vectors and rings, after successful re-init interrupts */
5126 	ice_for_each_vsi(pf, v) {
5127 		if (!pf->vsi[v])
5128 			continue;
5129 
5130 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5131 		if (ret)
5132 			goto err_reinit;
5133 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5134 	}
5135 
5136 	ret = ice_req_irq_msix_misc(pf);
5137 	if (ret) {
5138 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5139 			ret);
5140 		goto err_reinit;
5141 	}
5142 
5143 	return 0;
5144 
5145 err_reinit:
5146 	while (v--)
5147 		if (pf->vsi[v])
5148 			ice_vsi_free_q_vectors(pf->vsi[v]);
5149 
5150 	return ret;
5151 }
5152 
5153 /**
5154  * ice_suspend
5155  * @dev: generic device information structure
5156  *
5157  * Power Management callback to quiesce the device and prepare
5158  * for D3 transition.
5159  */
5160 static int __maybe_unused ice_suspend(struct device *dev)
5161 {
5162 	struct pci_dev *pdev = to_pci_dev(dev);
5163 	struct ice_pf *pf;
5164 	int disabled, v;
5165 
5166 	pf = pci_get_drvdata(pdev);
5167 
5168 	if (!ice_pf_state_is_nominal(pf)) {
5169 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5170 		return -EBUSY;
5171 	}
5172 
5173 	/* Stop watchdog tasks until resume completion.
5174 	 * Even though it is most likely that the service task is
5175 	 * disabled if the device is suspended or down, the service task's
5176 	 * state is controlled by a different state bit, and we should
5177 	 * store and honor whatever state that bit is in at this point.
5178 	 */
5179 	disabled = ice_service_task_stop(pf);
5180 
5181 	ice_unplug_aux_dev(pf);
5182 
5183 	/* Already suspended?, then there is nothing to do */
5184 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5185 		if (!disabled)
5186 			ice_service_task_restart(pf);
5187 		return 0;
5188 	}
5189 
5190 	if (test_bit(ICE_DOWN, pf->state) ||
5191 	    ice_is_reset_in_progress(pf->state)) {
5192 		dev_err(dev, "can't suspend device in reset or already down\n");
5193 		if (!disabled)
5194 			ice_service_task_restart(pf);
5195 		return 0;
5196 	}
5197 
5198 	ice_setup_mc_magic_wake(pf);
5199 
5200 	ice_prepare_for_shutdown(pf);
5201 
5202 	ice_set_wake(pf);
5203 
5204 	/* Free vectors, clear the interrupt scheme and release IRQs
5205 	 * for proper hibernation, especially with large number of CPUs.
5206 	 * Otherwise hibernation might fail when mapping all the vectors back
5207 	 * to CPU0.
5208 	 */
5209 	ice_free_irq_msix_misc(pf);
5210 	ice_for_each_vsi(pf, v) {
5211 		if (!pf->vsi[v])
5212 			continue;
5213 		ice_vsi_free_q_vectors(pf->vsi[v]);
5214 	}
5215 	ice_clear_interrupt_scheme(pf);
5216 
5217 	pci_save_state(pdev);
5218 	pci_wake_from_d3(pdev, pf->wol_ena);
5219 	pci_set_power_state(pdev, PCI_D3hot);
5220 	return 0;
5221 }
5222 
5223 /**
5224  * ice_resume - PM callback for waking up from D3
5225  * @dev: generic device information structure
5226  */
5227 static int __maybe_unused ice_resume(struct device *dev)
5228 {
5229 	struct pci_dev *pdev = to_pci_dev(dev);
5230 	enum ice_reset_req reset_type;
5231 	struct ice_pf *pf;
5232 	struct ice_hw *hw;
5233 	int ret;
5234 
5235 	pci_set_power_state(pdev, PCI_D0);
5236 	pci_restore_state(pdev);
5237 	pci_save_state(pdev);
5238 
5239 	if (!pci_device_is_present(pdev))
5240 		return -ENODEV;
5241 
5242 	ret = pci_enable_device_mem(pdev);
5243 	if (ret) {
5244 		dev_err(dev, "Cannot enable device after suspend\n");
5245 		return ret;
5246 	}
5247 
5248 	pf = pci_get_drvdata(pdev);
5249 	hw = &pf->hw;
5250 
5251 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5252 	ice_print_wake_reason(pf);
5253 
5254 	/* We cleared the interrupt scheme when we suspended, so we need to
5255 	 * restore it now to resume device functionality.
5256 	 */
5257 	ret = ice_reinit_interrupt_scheme(pf);
5258 	if (ret)
5259 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5260 
5261 	clear_bit(ICE_DOWN, pf->state);
5262 	/* Now perform PF reset and rebuild */
5263 	reset_type = ICE_RESET_PFR;
5264 	/* re-enable service task for reset, but allow reset to schedule it */
5265 	clear_bit(ICE_SERVICE_DIS, pf->state);
5266 
5267 	if (ice_schedule_reset(pf, reset_type))
5268 		dev_err(dev, "Reset during resume failed.\n");
5269 
5270 	clear_bit(ICE_SUSPENDED, pf->state);
5271 	ice_service_task_restart(pf);
5272 
5273 	/* Restart the service task */
5274 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5275 
5276 	return 0;
5277 }
5278 #endif /* CONFIG_PM */
5279 
5280 /**
5281  * ice_pci_err_detected - warning that PCI error has been detected
5282  * @pdev: PCI device information struct
5283  * @err: the type of PCI error
5284  *
5285  * Called to warn that something happened on the PCI bus and the error handling
5286  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5287  */
5288 static pci_ers_result_t
5289 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5290 {
5291 	struct ice_pf *pf = pci_get_drvdata(pdev);
5292 
5293 	if (!pf) {
5294 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5295 			__func__, err);
5296 		return PCI_ERS_RESULT_DISCONNECT;
5297 	}
5298 
5299 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5300 		ice_service_task_stop(pf);
5301 
5302 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5303 			set_bit(ICE_PFR_REQ, pf->state);
5304 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5305 		}
5306 	}
5307 
5308 	return PCI_ERS_RESULT_NEED_RESET;
5309 }
5310 
5311 /**
5312  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5313  * @pdev: PCI device information struct
5314  *
5315  * Called to determine if the driver can recover from the PCI slot reset by
5316  * using a register read to determine if the device is recoverable.
5317  */
5318 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5319 {
5320 	struct ice_pf *pf = pci_get_drvdata(pdev);
5321 	pci_ers_result_t result;
5322 	int err;
5323 	u32 reg;
5324 
5325 	err = pci_enable_device_mem(pdev);
5326 	if (err) {
5327 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5328 			err);
5329 		result = PCI_ERS_RESULT_DISCONNECT;
5330 	} else {
5331 		pci_set_master(pdev);
5332 		pci_restore_state(pdev);
5333 		pci_save_state(pdev);
5334 		pci_wake_from_d3(pdev, false);
5335 
5336 		/* Check for life */
5337 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5338 		if (!reg)
5339 			result = PCI_ERS_RESULT_RECOVERED;
5340 		else
5341 			result = PCI_ERS_RESULT_DISCONNECT;
5342 	}
5343 
5344 	return result;
5345 }
5346 
5347 /**
5348  * ice_pci_err_resume - restart operations after PCI error recovery
5349  * @pdev: PCI device information struct
5350  *
5351  * Called to allow the driver to bring things back up after PCI error and/or
5352  * reset recovery have finished
5353  */
5354 static void ice_pci_err_resume(struct pci_dev *pdev)
5355 {
5356 	struct ice_pf *pf = pci_get_drvdata(pdev);
5357 
5358 	if (!pf) {
5359 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5360 			__func__);
5361 		return;
5362 	}
5363 
5364 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5365 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5366 			__func__);
5367 		return;
5368 	}
5369 
5370 	ice_restore_all_vfs_msi_state(pdev);
5371 
5372 	ice_do_reset(pf, ICE_RESET_PFR);
5373 	ice_service_task_restart(pf);
5374 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5375 }
5376 
5377 /**
5378  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5379  * @pdev: PCI device information struct
5380  */
5381 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5382 {
5383 	struct ice_pf *pf = pci_get_drvdata(pdev);
5384 
5385 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5386 		ice_service_task_stop(pf);
5387 
5388 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5389 			set_bit(ICE_PFR_REQ, pf->state);
5390 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5391 		}
5392 	}
5393 }
5394 
5395 /**
5396  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5397  * @pdev: PCI device information struct
5398  */
5399 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5400 {
5401 	ice_pci_err_resume(pdev);
5402 }
5403 
5404 /* ice_pci_tbl - PCI Device ID Table
5405  *
5406  * Wildcard entries (PCI_ANY_ID) should come last
5407  * Last entry must be all 0s
5408  *
5409  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5410  *   Class, Class Mask, private data (not used) }
5411  */
5412 static const struct pci_device_id ice_pci_tbl[] = {
5413 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5414 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5415 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5416 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5417 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5418 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5419 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5420 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5421 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5422 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5423 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5424 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5425 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5426 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5427 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5428 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5429 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5430 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5431 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5432 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5433 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5434 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5435 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5436 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5437 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5438 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5439 	/* required last entry */
5440 	{ 0, }
5441 };
5442 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5443 
5444 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5445 
5446 static const struct pci_error_handlers ice_pci_err_handler = {
5447 	.error_detected = ice_pci_err_detected,
5448 	.slot_reset = ice_pci_err_slot_reset,
5449 	.reset_prepare = ice_pci_err_reset_prepare,
5450 	.reset_done = ice_pci_err_reset_done,
5451 	.resume = ice_pci_err_resume
5452 };
5453 
5454 static struct pci_driver ice_driver = {
5455 	.name = KBUILD_MODNAME,
5456 	.id_table = ice_pci_tbl,
5457 	.probe = ice_probe,
5458 	.remove = ice_remove,
5459 #ifdef CONFIG_PM
5460 	.driver.pm = &ice_pm_ops,
5461 #endif /* CONFIG_PM */
5462 	.shutdown = ice_shutdown,
5463 	.sriov_configure = ice_sriov_configure,
5464 	.err_handler = &ice_pci_err_handler
5465 };
5466 
5467 /**
5468  * ice_module_init - Driver registration routine
5469  *
5470  * ice_module_init is the first routine called when the driver is
5471  * loaded. All it does is register with the PCI subsystem.
5472  */
5473 static int __init ice_module_init(void)
5474 {
5475 	int status;
5476 
5477 	pr_info("%s\n", ice_driver_string);
5478 	pr_info("%s\n", ice_copyright);
5479 
5480 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5481 	if (!ice_wq) {
5482 		pr_err("Failed to create workqueue\n");
5483 		return -ENOMEM;
5484 	}
5485 
5486 	status = pci_register_driver(&ice_driver);
5487 	if (status) {
5488 		pr_err("failed to register PCI driver, err %d\n", status);
5489 		destroy_workqueue(ice_wq);
5490 	}
5491 
5492 	return status;
5493 }
5494 module_init(ice_module_init);
5495 
5496 /**
5497  * ice_module_exit - Driver exit cleanup routine
5498  *
5499  * ice_module_exit is called just before the driver is removed
5500  * from memory.
5501  */
5502 static void __exit ice_module_exit(void)
5503 {
5504 	pci_unregister_driver(&ice_driver);
5505 	destroy_workqueue(ice_wq);
5506 	pr_info("module unloaded\n");
5507 }
5508 module_exit(ice_module_exit);
5509 
5510 /**
5511  * ice_set_mac_address - NDO callback to set MAC address
5512  * @netdev: network interface device structure
5513  * @pi: pointer to an address structure
5514  *
5515  * Returns 0 on success, negative on failure
5516  */
5517 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5518 {
5519 	struct ice_netdev_priv *np = netdev_priv(netdev);
5520 	struct ice_vsi *vsi = np->vsi;
5521 	struct ice_pf *pf = vsi->back;
5522 	struct ice_hw *hw = &pf->hw;
5523 	struct sockaddr *addr = pi;
5524 	u8 old_mac[ETH_ALEN];
5525 	u8 flags = 0;
5526 	u8 *mac;
5527 	int err;
5528 
5529 	mac = (u8 *)addr->sa_data;
5530 
5531 	if (!is_valid_ether_addr(mac))
5532 		return -EADDRNOTAVAIL;
5533 
5534 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5535 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5536 		return 0;
5537 	}
5538 
5539 	if (test_bit(ICE_DOWN, pf->state) ||
5540 	    ice_is_reset_in_progress(pf->state)) {
5541 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5542 			   mac);
5543 		return -EBUSY;
5544 	}
5545 
5546 	if (ice_chnl_dmac_fltr_cnt(pf)) {
5547 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5548 			   mac);
5549 		return -EAGAIN;
5550 	}
5551 
5552 	netif_addr_lock_bh(netdev);
5553 	ether_addr_copy(old_mac, netdev->dev_addr);
5554 	/* change the netdev's MAC address */
5555 	eth_hw_addr_set(netdev, mac);
5556 	netif_addr_unlock_bh(netdev);
5557 
5558 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5559 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5560 	if (err && err != -ENOENT) {
5561 		err = -EADDRNOTAVAIL;
5562 		goto err_update_filters;
5563 	}
5564 
5565 	/* Add filter for new MAC. If filter exists, return success */
5566 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5567 	if (err == -EEXIST) {
5568 		/* Although this MAC filter is already present in hardware it's
5569 		 * possible in some cases (e.g. bonding) that dev_addr was
5570 		 * modified outside of the driver and needs to be restored back
5571 		 * to this value.
5572 		 */
5573 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5574 
5575 		return 0;
5576 	} else if (err) {
5577 		/* error if the new filter addition failed */
5578 		err = -EADDRNOTAVAIL;
5579 	}
5580 
5581 err_update_filters:
5582 	if (err) {
5583 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5584 			   mac);
5585 		netif_addr_lock_bh(netdev);
5586 		eth_hw_addr_set(netdev, old_mac);
5587 		netif_addr_unlock_bh(netdev);
5588 		return err;
5589 	}
5590 
5591 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5592 		   netdev->dev_addr);
5593 
5594 	/* write new MAC address to the firmware */
5595 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5596 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5597 	if (err) {
5598 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5599 			   mac, err);
5600 	}
5601 	return 0;
5602 }
5603 
5604 /**
5605  * ice_set_rx_mode - NDO callback to set the netdev filters
5606  * @netdev: network interface device structure
5607  */
5608 static void ice_set_rx_mode(struct net_device *netdev)
5609 {
5610 	struct ice_netdev_priv *np = netdev_priv(netdev);
5611 	struct ice_vsi *vsi = np->vsi;
5612 
5613 	if (!vsi)
5614 		return;
5615 
5616 	/* Set the flags to synchronize filters
5617 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5618 	 * flags
5619 	 */
5620 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5621 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5622 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5623 
5624 	/* schedule our worker thread which will take care of
5625 	 * applying the new filter changes
5626 	 */
5627 	ice_service_task_schedule(vsi->back);
5628 }
5629 
5630 /**
5631  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5632  * @netdev: network interface device structure
5633  * @queue_index: Queue ID
5634  * @maxrate: maximum bandwidth in Mbps
5635  */
5636 static int
5637 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5638 {
5639 	struct ice_netdev_priv *np = netdev_priv(netdev);
5640 	struct ice_vsi *vsi = np->vsi;
5641 	u16 q_handle;
5642 	int status;
5643 	u8 tc;
5644 
5645 	/* Validate maxrate requested is within permitted range */
5646 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5647 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5648 			   maxrate, queue_index);
5649 		return -EINVAL;
5650 	}
5651 
5652 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5653 	tc = ice_dcb_get_tc(vsi, queue_index);
5654 
5655 	/* Set BW back to default, when user set maxrate to 0 */
5656 	if (!maxrate)
5657 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5658 					       q_handle, ICE_MAX_BW);
5659 	else
5660 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5661 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5662 	if (status)
5663 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5664 			   status);
5665 
5666 	return status;
5667 }
5668 
5669 /**
5670  * ice_fdb_add - add an entry to the hardware database
5671  * @ndm: the input from the stack
5672  * @tb: pointer to array of nladdr (unused)
5673  * @dev: the net device pointer
5674  * @addr: the MAC address entry being added
5675  * @vid: VLAN ID
5676  * @flags: instructions from stack about fdb operation
5677  * @extack: netlink extended ack
5678  */
5679 static int
5680 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5681 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5682 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5683 {
5684 	int err;
5685 
5686 	if (vid) {
5687 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5688 		return -EINVAL;
5689 	}
5690 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5691 		netdev_err(dev, "FDB only supports static addresses\n");
5692 		return -EINVAL;
5693 	}
5694 
5695 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5696 		err = dev_uc_add_excl(dev, addr);
5697 	else if (is_multicast_ether_addr(addr))
5698 		err = dev_mc_add_excl(dev, addr);
5699 	else
5700 		err = -EINVAL;
5701 
5702 	/* Only return duplicate errors if NLM_F_EXCL is set */
5703 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5704 		err = 0;
5705 
5706 	return err;
5707 }
5708 
5709 /**
5710  * ice_fdb_del - delete an entry from the hardware database
5711  * @ndm: the input from the stack
5712  * @tb: pointer to array of nladdr (unused)
5713  * @dev: the net device pointer
5714  * @addr: the MAC address entry being added
5715  * @vid: VLAN ID
5716  * @extack: netlink extended ack
5717  */
5718 static int
5719 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5720 	    struct net_device *dev, const unsigned char *addr,
5721 	    __always_unused u16 vid, struct netlink_ext_ack *extack)
5722 {
5723 	int err;
5724 
5725 	if (ndm->ndm_state & NUD_PERMANENT) {
5726 		netdev_err(dev, "FDB only supports static addresses\n");
5727 		return -EINVAL;
5728 	}
5729 
5730 	if (is_unicast_ether_addr(addr))
5731 		err = dev_uc_del(dev, addr);
5732 	else if (is_multicast_ether_addr(addr))
5733 		err = dev_mc_del(dev, addr);
5734 	else
5735 		err = -EINVAL;
5736 
5737 	return err;
5738 }
5739 
5740 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
5741 					 NETIF_F_HW_VLAN_CTAG_TX | \
5742 					 NETIF_F_HW_VLAN_STAG_RX | \
5743 					 NETIF_F_HW_VLAN_STAG_TX)
5744 
5745 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
5746 					 NETIF_F_HW_VLAN_STAG_FILTER)
5747 
5748 /**
5749  * ice_fix_features - fix the netdev features flags based on device limitations
5750  * @netdev: ptr to the netdev that flags are being fixed on
5751  * @features: features that need to be checked and possibly fixed
5752  *
5753  * Make sure any fixups are made to features in this callback. This enables the
5754  * driver to not have to check unsupported configurations throughout the driver
5755  * because that's the responsiblity of this callback.
5756  *
5757  * Single VLAN Mode (SVM) Supported Features:
5758  *	NETIF_F_HW_VLAN_CTAG_FILTER
5759  *	NETIF_F_HW_VLAN_CTAG_RX
5760  *	NETIF_F_HW_VLAN_CTAG_TX
5761  *
5762  * Double VLAN Mode (DVM) Supported Features:
5763  *	NETIF_F_HW_VLAN_CTAG_FILTER
5764  *	NETIF_F_HW_VLAN_CTAG_RX
5765  *	NETIF_F_HW_VLAN_CTAG_TX
5766  *
5767  *	NETIF_F_HW_VLAN_STAG_FILTER
5768  *	NETIF_HW_VLAN_STAG_RX
5769  *	NETIF_HW_VLAN_STAG_TX
5770  *
5771  * Features that need fixing:
5772  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5773  *	These are mutually exlusive as the VSI context cannot support multiple
5774  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
5775  *	is not done, then default to clearing the requested STAG offload
5776  *	settings.
5777  *
5778  *	All supported filtering has to be enabled or disabled together. For
5779  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5780  *	together. If this is not done, then default to VLAN filtering disabled.
5781  *	These are mutually exclusive as there is currently no way to
5782  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5783  *	prune rules.
5784  */
5785 static netdev_features_t
5786 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5787 {
5788 	struct ice_netdev_priv *np = netdev_priv(netdev);
5789 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5790 	bool cur_ctag, cur_stag, req_ctag, req_stag;
5791 
5792 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5793 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5794 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5795 
5796 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5797 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5798 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5799 
5800 	if (req_vlan_fltr != cur_vlan_fltr) {
5801 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5802 			if (req_ctag && req_stag) {
5803 				features |= NETIF_VLAN_FILTERING_FEATURES;
5804 			} else if (!req_ctag && !req_stag) {
5805 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
5806 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
5807 				   (!cur_stag && req_stag && !cur_ctag)) {
5808 				features |= NETIF_VLAN_FILTERING_FEATURES;
5809 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5810 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
5811 				   (cur_stag && !req_stag && cur_ctag)) {
5812 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
5813 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5814 			}
5815 		} else {
5816 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5817 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5818 
5819 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5820 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5821 		}
5822 	}
5823 
5824 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5825 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5826 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5827 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5828 			      NETIF_F_HW_VLAN_STAG_TX);
5829 	}
5830 
5831 	return features;
5832 }
5833 
5834 /**
5835  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
5836  * @vsi: PF's VSI
5837  * @features: features used to determine VLAN offload settings
5838  *
5839  * First, determine the vlan_ethertype based on the VLAN offload bits in
5840  * features. Then determine if stripping and insertion should be enabled or
5841  * disabled. Finally enable or disable VLAN stripping and insertion.
5842  */
5843 static int
5844 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
5845 {
5846 	bool enable_stripping = true, enable_insertion = true;
5847 	struct ice_vsi_vlan_ops *vlan_ops;
5848 	int strip_err = 0, insert_err = 0;
5849 	u16 vlan_ethertype = 0;
5850 
5851 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5852 
5853 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
5854 		vlan_ethertype = ETH_P_8021AD;
5855 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
5856 		vlan_ethertype = ETH_P_8021Q;
5857 
5858 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
5859 		enable_stripping = false;
5860 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
5861 		enable_insertion = false;
5862 
5863 	if (enable_stripping)
5864 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
5865 	else
5866 		strip_err = vlan_ops->dis_stripping(vsi);
5867 
5868 	if (enable_insertion)
5869 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
5870 	else
5871 		insert_err = vlan_ops->dis_insertion(vsi);
5872 
5873 	if (strip_err || insert_err)
5874 		return -EIO;
5875 
5876 	return 0;
5877 }
5878 
5879 /**
5880  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
5881  * @vsi: PF's VSI
5882  * @features: features used to determine VLAN filtering settings
5883  *
5884  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
5885  * features.
5886  */
5887 static int
5888 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
5889 {
5890 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5891 	int err = 0;
5892 
5893 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
5894 	 * if either bit is set
5895 	 */
5896 	if (features &
5897 	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
5898 		err = vlan_ops->ena_rx_filtering(vsi);
5899 	else
5900 		err = vlan_ops->dis_rx_filtering(vsi);
5901 
5902 	return err;
5903 }
5904 
5905 /**
5906  * ice_set_vlan_features - set VLAN settings based on suggested feature set
5907  * @netdev: ptr to the netdev being adjusted
5908  * @features: the feature set that the stack is suggesting
5909  *
5910  * Only update VLAN settings if the requested_vlan_features are different than
5911  * the current_vlan_features.
5912  */
5913 static int
5914 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
5915 {
5916 	netdev_features_t current_vlan_features, requested_vlan_features;
5917 	struct ice_netdev_priv *np = netdev_priv(netdev);
5918 	struct ice_vsi *vsi = np->vsi;
5919 	int err;
5920 
5921 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
5922 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
5923 	if (current_vlan_features ^ requested_vlan_features) {
5924 		err = ice_set_vlan_offload_features(vsi, features);
5925 		if (err)
5926 			return err;
5927 	}
5928 
5929 	current_vlan_features = netdev->features &
5930 		NETIF_VLAN_FILTERING_FEATURES;
5931 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
5932 	if (current_vlan_features ^ requested_vlan_features) {
5933 		err = ice_set_vlan_filtering_features(vsi, features);
5934 		if (err)
5935 			return err;
5936 	}
5937 
5938 	return 0;
5939 }
5940 
5941 /**
5942  * ice_set_loopback - turn on/off loopback mode on underlying PF
5943  * @vsi: ptr to VSI
5944  * @ena: flag to indicate the on/off setting
5945  */
5946 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
5947 {
5948 	bool if_running = netif_running(vsi->netdev);
5949 	int ret;
5950 
5951 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
5952 		ret = ice_down(vsi);
5953 		if (ret) {
5954 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
5955 			return ret;
5956 		}
5957 	}
5958 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
5959 	if (ret)
5960 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
5961 	if (if_running)
5962 		ret = ice_up(vsi);
5963 
5964 	return ret;
5965 }
5966 
5967 /**
5968  * ice_set_features - set the netdev feature flags
5969  * @netdev: ptr to the netdev being adjusted
5970  * @features: the feature set that the stack is suggesting
5971  */
5972 static int
5973 ice_set_features(struct net_device *netdev, netdev_features_t features)
5974 {
5975 	netdev_features_t changed = netdev->features ^ features;
5976 	struct ice_netdev_priv *np = netdev_priv(netdev);
5977 	struct ice_vsi *vsi = np->vsi;
5978 	struct ice_pf *pf = vsi->back;
5979 	int ret = 0;
5980 
5981 	/* Don't set any netdev advanced features with device in Safe Mode */
5982 	if (ice_is_safe_mode(pf)) {
5983 		dev_err(ice_pf_to_dev(pf),
5984 			"Device is in Safe Mode - not enabling advanced netdev features\n");
5985 		return ret;
5986 	}
5987 
5988 	/* Do not change setting during reset */
5989 	if (ice_is_reset_in_progress(pf->state)) {
5990 		dev_err(ice_pf_to_dev(pf),
5991 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5992 		return -EBUSY;
5993 	}
5994 
5995 	/* Multiple features can be changed in one call so keep features in
5996 	 * separate if/else statements to guarantee each feature is checked
5997 	 */
5998 	if (changed & NETIF_F_RXHASH)
5999 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6000 
6001 	ret = ice_set_vlan_features(netdev, features);
6002 	if (ret)
6003 		return ret;
6004 
6005 	if (changed & NETIF_F_NTUPLE) {
6006 		bool ena = !!(features & NETIF_F_NTUPLE);
6007 
6008 		ice_vsi_manage_fdir(vsi, ena);
6009 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6010 	}
6011 
6012 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6013 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6014 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6015 		return -EACCES;
6016 	}
6017 
6018 	if (changed & NETIF_F_HW_TC) {
6019 		bool ena = !!(features & NETIF_F_HW_TC);
6020 
6021 		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6022 		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6023 	}
6024 
6025 	if (changed & NETIF_F_LOOPBACK)
6026 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6027 
6028 	return ret;
6029 }
6030 
6031 /**
6032  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6033  * @vsi: VSI to setup VLAN properties for
6034  */
6035 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6036 {
6037 	int err;
6038 
6039 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6040 	if (err)
6041 		return err;
6042 
6043 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6044 	if (err)
6045 		return err;
6046 
6047 	return ice_vsi_add_vlan_zero(vsi);
6048 }
6049 
6050 /**
6051  * ice_vsi_cfg - Setup the VSI
6052  * @vsi: the VSI being configured
6053  *
6054  * Return 0 on success and negative value on error
6055  */
6056 int ice_vsi_cfg(struct ice_vsi *vsi)
6057 {
6058 	int err;
6059 
6060 	if (vsi->netdev) {
6061 		ice_set_rx_mode(vsi->netdev);
6062 
6063 		if (vsi->type != ICE_VSI_LB) {
6064 			err = ice_vsi_vlan_setup(vsi);
6065 
6066 			if (err)
6067 				return err;
6068 		}
6069 	}
6070 	ice_vsi_cfg_dcb_rings(vsi);
6071 
6072 	err = ice_vsi_cfg_lan_txqs(vsi);
6073 	if (!err && ice_is_xdp_ena_vsi(vsi))
6074 		err = ice_vsi_cfg_xdp_txqs(vsi);
6075 	if (!err)
6076 		err = ice_vsi_cfg_rxqs(vsi);
6077 
6078 	return err;
6079 }
6080 
6081 /* THEORY OF MODERATION:
6082  * The ice driver hardware works differently than the hardware that DIMLIB was
6083  * originally made for. ice hardware doesn't have packet count limits that
6084  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6085  * which is hard-coded to a limit of 250,000 ints/second.
6086  * If not using dynamic moderation, the INTRL value can be modified
6087  * by ethtool rx-usecs-high.
6088  */
6089 struct ice_dim {
6090 	/* the throttle rate for interrupts, basically worst case delay before
6091 	 * an initial interrupt fires, value is stored in microseconds.
6092 	 */
6093 	u16 itr;
6094 };
6095 
6096 /* Make a different profile for Rx that doesn't allow quite so aggressive
6097  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6098  * second.
6099  */
6100 static const struct ice_dim rx_profile[] = {
6101 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6102 	{8},    /* 125,000 ints/s */
6103 	{16},   /*  62,500 ints/s */
6104 	{62},   /*  16,129 ints/s */
6105 	{126}   /*   7,936 ints/s */
6106 };
6107 
6108 /* The transmit profile, which has the same sorts of values
6109  * as the previous struct
6110  */
6111 static const struct ice_dim tx_profile[] = {
6112 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6113 	{8},    /* 125,000 ints/s */
6114 	{40},   /*  16,125 ints/s */
6115 	{128},  /*   7,812 ints/s */
6116 	{256}   /*   3,906 ints/s */
6117 };
6118 
6119 static void ice_tx_dim_work(struct work_struct *work)
6120 {
6121 	struct ice_ring_container *rc;
6122 	struct dim *dim;
6123 	u16 itr;
6124 
6125 	dim = container_of(work, struct dim, work);
6126 	rc = (struct ice_ring_container *)dim->priv;
6127 
6128 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6129 
6130 	/* look up the values in our local table */
6131 	itr = tx_profile[dim->profile_ix].itr;
6132 
6133 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6134 	ice_write_itr(rc, itr);
6135 
6136 	dim->state = DIM_START_MEASURE;
6137 }
6138 
6139 static void ice_rx_dim_work(struct work_struct *work)
6140 {
6141 	struct ice_ring_container *rc;
6142 	struct dim *dim;
6143 	u16 itr;
6144 
6145 	dim = container_of(work, struct dim, work);
6146 	rc = (struct ice_ring_container *)dim->priv;
6147 
6148 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6149 
6150 	/* look up the values in our local table */
6151 	itr = rx_profile[dim->profile_ix].itr;
6152 
6153 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6154 	ice_write_itr(rc, itr);
6155 
6156 	dim->state = DIM_START_MEASURE;
6157 }
6158 
6159 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6160 
6161 /**
6162  * ice_init_moderation - set up interrupt moderation
6163  * @q_vector: the vector containing rings to be configured
6164  *
6165  * Set up interrupt moderation registers, with the intent to do the right thing
6166  * when called from reset or from probe, and whether or not dynamic moderation
6167  * is enabled or not. Take special care to write all the registers in both
6168  * dynamic moderation mode or not in order to make sure hardware is in a known
6169  * state.
6170  */
6171 static void ice_init_moderation(struct ice_q_vector *q_vector)
6172 {
6173 	struct ice_ring_container *rc;
6174 	bool tx_dynamic, rx_dynamic;
6175 
6176 	rc = &q_vector->tx;
6177 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6178 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6179 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6180 	rc->dim.priv = rc;
6181 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6182 
6183 	/* set the initial TX ITR to match the above */
6184 	ice_write_itr(rc, tx_dynamic ?
6185 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6186 
6187 	rc = &q_vector->rx;
6188 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6189 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6190 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6191 	rc->dim.priv = rc;
6192 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6193 
6194 	/* set the initial RX ITR to match the above */
6195 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6196 				       rc->itr_setting);
6197 
6198 	ice_set_q_vector_intrl(q_vector);
6199 }
6200 
6201 /**
6202  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6203  * @vsi: the VSI being configured
6204  */
6205 static void ice_napi_enable_all(struct ice_vsi *vsi)
6206 {
6207 	int q_idx;
6208 
6209 	if (!vsi->netdev)
6210 		return;
6211 
6212 	ice_for_each_q_vector(vsi, q_idx) {
6213 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6214 
6215 		ice_init_moderation(q_vector);
6216 
6217 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6218 			napi_enable(&q_vector->napi);
6219 	}
6220 }
6221 
6222 /**
6223  * ice_up_complete - Finish the last steps of bringing up a connection
6224  * @vsi: The VSI being configured
6225  *
6226  * Return 0 on success and negative value on error
6227  */
6228 static int ice_up_complete(struct ice_vsi *vsi)
6229 {
6230 	struct ice_pf *pf = vsi->back;
6231 	int err;
6232 
6233 	ice_vsi_cfg_msix(vsi);
6234 
6235 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6236 	 * Tx queue group list was configured and the context bits were
6237 	 * programmed using ice_vsi_cfg_txqs
6238 	 */
6239 	err = ice_vsi_start_all_rx_rings(vsi);
6240 	if (err)
6241 		return err;
6242 
6243 	clear_bit(ICE_VSI_DOWN, vsi->state);
6244 	ice_napi_enable_all(vsi);
6245 	ice_vsi_ena_irq(vsi);
6246 
6247 	if (vsi->port_info &&
6248 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6249 	    vsi->netdev) {
6250 		ice_print_link_msg(vsi, true);
6251 		netif_tx_start_all_queues(vsi->netdev);
6252 		netif_carrier_on(vsi->netdev);
6253 		if (!ice_is_e810(&pf->hw))
6254 			ice_ptp_link_change(pf, pf->hw.pf_id, true);
6255 	}
6256 
6257 	/* Perform an initial read of the statistics registers now to
6258 	 * set the baseline so counters are ready when interface is up
6259 	 */
6260 	ice_update_eth_stats(vsi);
6261 	ice_service_task_schedule(pf);
6262 
6263 	return 0;
6264 }
6265 
6266 /**
6267  * ice_up - Bring the connection back up after being down
6268  * @vsi: VSI being configured
6269  */
6270 int ice_up(struct ice_vsi *vsi)
6271 {
6272 	int err;
6273 
6274 	err = ice_vsi_cfg(vsi);
6275 	if (!err)
6276 		err = ice_up_complete(vsi);
6277 
6278 	return err;
6279 }
6280 
6281 /**
6282  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6283  * @syncp: pointer to u64_stats_sync
6284  * @stats: stats that pkts and bytes count will be taken from
6285  * @pkts: packets stats counter
6286  * @bytes: bytes stats counter
6287  *
6288  * This function fetches stats from the ring considering the atomic operations
6289  * that needs to be performed to read u64 values in 32 bit machine.
6290  */
6291 void
6292 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6293 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6294 {
6295 	unsigned int start;
6296 
6297 	do {
6298 		start = u64_stats_fetch_begin_irq(syncp);
6299 		*pkts = stats.pkts;
6300 		*bytes = stats.bytes;
6301 	} while (u64_stats_fetch_retry_irq(syncp, start));
6302 }
6303 
6304 /**
6305  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6306  * @vsi: the VSI to be updated
6307  * @vsi_stats: the stats struct to be updated
6308  * @rings: rings to work on
6309  * @count: number of rings
6310  */
6311 static void
6312 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6313 			     struct rtnl_link_stats64 *vsi_stats,
6314 			     struct ice_tx_ring **rings, u16 count)
6315 {
6316 	u16 i;
6317 
6318 	for (i = 0; i < count; i++) {
6319 		struct ice_tx_ring *ring;
6320 		u64 pkts = 0, bytes = 0;
6321 
6322 		ring = READ_ONCE(rings[i]);
6323 		if (!ring)
6324 			continue;
6325 		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
6326 		vsi_stats->tx_packets += pkts;
6327 		vsi_stats->tx_bytes += bytes;
6328 		vsi->tx_restart += ring->tx_stats.restart_q;
6329 		vsi->tx_busy += ring->tx_stats.tx_busy;
6330 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
6331 	}
6332 }
6333 
6334 /**
6335  * ice_update_vsi_ring_stats - Update VSI stats counters
6336  * @vsi: the VSI to be updated
6337  */
6338 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6339 {
6340 	struct rtnl_link_stats64 *vsi_stats;
6341 	u64 pkts, bytes;
6342 	int i;
6343 
6344 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6345 	if (!vsi_stats)
6346 		return;
6347 
6348 	/* reset non-netdev (extended) stats */
6349 	vsi->tx_restart = 0;
6350 	vsi->tx_busy = 0;
6351 	vsi->tx_linearize = 0;
6352 	vsi->rx_buf_failed = 0;
6353 	vsi->rx_page_failed = 0;
6354 
6355 	rcu_read_lock();
6356 
6357 	/* update Tx rings counters */
6358 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6359 				     vsi->num_txq);
6360 
6361 	/* update Rx rings counters */
6362 	ice_for_each_rxq(vsi, i) {
6363 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6364 
6365 		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
6366 		vsi_stats->rx_packets += pkts;
6367 		vsi_stats->rx_bytes += bytes;
6368 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
6369 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
6370 	}
6371 
6372 	/* update XDP Tx rings counters */
6373 	if (ice_is_xdp_ena_vsi(vsi))
6374 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6375 					     vsi->num_xdp_txq);
6376 
6377 	rcu_read_unlock();
6378 
6379 	vsi->net_stats.tx_packets = vsi_stats->tx_packets;
6380 	vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
6381 	vsi->net_stats.rx_packets = vsi_stats->rx_packets;
6382 	vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
6383 
6384 	kfree(vsi_stats);
6385 }
6386 
6387 /**
6388  * ice_update_vsi_stats - Update VSI stats counters
6389  * @vsi: the VSI to be updated
6390  */
6391 void ice_update_vsi_stats(struct ice_vsi *vsi)
6392 {
6393 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6394 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6395 	struct ice_pf *pf = vsi->back;
6396 
6397 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6398 	    test_bit(ICE_CFG_BUSY, pf->state))
6399 		return;
6400 
6401 	/* get stats as recorded by Tx/Rx rings */
6402 	ice_update_vsi_ring_stats(vsi);
6403 
6404 	/* get VSI stats as recorded by the hardware */
6405 	ice_update_eth_stats(vsi);
6406 
6407 	cur_ns->tx_errors = cur_es->tx_errors;
6408 	cur_ns->rx_dropped = cur_es->rx_discards;
6409 	cur_ns->tx_dropped = cur_es->tx_discards;
6410 	cur_ns->multicast = cur_es->rx_multicast;
6411 
6412 	/* update some more netdev stats if this is main VSI */
6413 	if (vsi->type == ICE_VSI_PF) {
6414 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6415 		cur_ns->rx_errors = pf->stats.crc_errors +
6416 				    pf->stats.illegal_bytes +
6417 				    pf->stats.rx_len_errors +
6418 				    pf->stats.rx_undersize +
6419 				    pf->hw_csum_rx_error +
6420 				    pf->stats.rx_jabber +
6421 				    pf->stats.rx_fragments +
6422 				    pf->stats.rx_oversize;
6423 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6424 		/* record drops from the port level */
6425 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6426 	}
6427 }
6428 
6429 /**
6430  * ice_update_pf_stats - Update PF port stats counters
6431  * @pf: PF whose stats needs to be updated
6432  */
6433 void ice_update_pf_stats(struct ice_pf *pf)
6434 {
6435 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6436 	struct ice_hw *hw = &pf->hw;
6437 	u16 fd_ctr_base;
6438 	u8 port;
6439 
6440 	port = hw->port_info->lport;
6441 	prev_ps = &pf->stats_prev;
6442 	cur_ps = &pf->stats;
6443 
6444 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6445 			  &prev_ps->eth.rx_bytes,
6446 			  &cur_ps->eth.rx_bytes);
6447 
6448 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6449 			  &prev_ps->eth.rx_unicast,
6450 			  &cur_ps->eth.rx_unicast);
6451 
6452 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6453 			  &prev_ps->eth.rx_multicast,
6454 			  &cur_ps->eth.rx_multicast);
6455 
6456 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6457 			  &prev_ps->eth.rx_broadcast,
6458 			  &cur_ps->eth.rx_broadcast);
6459 
6460 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6461 			  &prev_ps->eth.rx_discards,
6462 			  &cur_ps->eth.rx_discards);
6463 
6464 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6465 			  &prev_ps->eth.tx_bytes,
6466 			  &cur_ps->eth.tx_bytes);
6467 
6468 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6469 			  &prev_ps->eth.tx_unicast,
6470 			  &cur_ps->eth.tx_unicast);
6471 
6472 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6473 			  &prev_ps->eth.tx_multicast,
6474 			  &cur_ps->eth.tx_multicast);
6475 
6476 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6477 			  &prev_ps->eth.tx_broadcast,
6478 			  &cur_ps->eth.tx_broadcast);
6479 
6480 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6481 			  &prev_ps->tx_dropped_link_down,
6482 			  &cur_ps->tx_dropped_link_down);
6483 
6484 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6485 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6486 
6487 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6488 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6489 
6490 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6491 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6492 
6493 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6494 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6495 
6496 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6497 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6498 
6499 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6500 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6501 
6502 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6503 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6504 
6505 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6506 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6507 
6508 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6509 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6510 
6511 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6512 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6513 
6514 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6515 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6516 
6517 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6518 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6519 
6520 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6521 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6522 
6523 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6524 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6525 
6526 	fd_ctr_base = hw->fd_ctr_base;
6527 
6528 	ice_stat_update40(hw,
6529 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6530 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6531 			  &cur_ps->fd_sb_match);
6532 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6533 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6534 
6535 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6536 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6537 
6538 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6539 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6540 
6541 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6542 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6543 
6544 	ice_update_dcb_stats(pf);
6545 
6546 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6547 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6548 
6549 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6550 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6551 
6552 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6553 			  &prev_ps->mac_local_faults,
6554 			  &cur_ps->mac_local_faults);
6555 
6556 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6557 			  &prev_ps->mac_remote_faults,
6558 			  &cur_ps->mac_remote_faults);
6559 
6560 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6561 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6562 
6563 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6564 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6565 
6566 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6567 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6568 
6569 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6570 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6571 
6572 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6573 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6574 
6575 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6576 
6577 	pf->stat_prev_loaded = true;
6578 }
6579 
6580 /**
6581  * ice_get_stats64 - get statistics for network device structure
6582  * @netdev: network interface device structure
6583  * @stats: main device statistics structure
6584  */
6585 static
6586 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6587 {
6588 	struct ice_netdev_priv *np = netdev_priv(netdev);
6589 	struct rtnl_link_stats64 *vsi_stats;
6590 	struct ice_vsi *vsi = np->vsi;
6591 
6592 	vsi_stats = &vsi->net_stats;
6593 
6594 	if (!vsi->num_txq || !vsi->num_rxq)
6595 		return;
6596 
6597 	/* netdev packet/byte stats come from ring counter. These are obtained
6598 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6599 	 * But, only call the update routine and read the registers if VSI is
6600 	 * not down.
6601 	 */
6602 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6603 		ice_update_vsi_ring_stats(vsi);
6604 	stats->tx_packets = vsi_stats->tx_packets;
6605 	stats->tx_bytes = vsi_stats->tx_bytes;
6606 	stats->rx_packets = vsi_stats->rx_packets;
6607 	stats->rx_bytes = vsi_stats->rx_bytes;
6608 
6609 	/* The rest of the stats can be read from the hardware but instead we
6610 	 * just return values that the watchdog task has already obtained from
6611 	 * the hardware.
6612 	 */
6613 	stats->multicast = vsi_stats->multicast;
6614 	stats->tx_errors = vsi_stats->tx_errors;
6615 	stats->tx_dropped = vsi_stats->tx_dropped;
6616 	stats->rx_errors = vsi_stats->rx_errors;
6617 	stats->rx_dropped = vsi_stats->rx_dropped;
6618 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6619 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6620 }
6621 
6622 /**
6623  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6624  * @vsi: VSI having NAPI disabled
6625  */
6626 static void ice_napi_disable_all(struct ice_vsi *vsi)
6627 {
6628 	int q_idx;
6629 
6630 	if (!vsi->netdev)
6631 		return;
6632 
6633 	ice_for_each_q_vector(vsi, q_idx) {
6634 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6635 
6636 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6637 			napi_disable(&q_vector->napi);
6638 
6639 		cancel_work_sync(&q_vector->tx.dim.work);
6640 		cancel_work_sync(&q_vector->rx.dim.work);
6641 	}
6642 }
6643 
6644 /**
6645  * ice_down - Shutdown the connection
6646  * @vsi: The VSI being stopped
6647  *
6648  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6649  */
6650 int ice_down(struct ice_vsi *vsi)
6651 {
6652 	int i, tx_err, rx_err, vlan_err = 0;
6653 
6654 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6655 
6656 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6657 		vlan_err = ice_vsi_del_vlan_zero(vsi);
6658 		if (!ice_is_e810(&vsi->back->hw))
6659 			ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6660 		netif_carrier_off(vsi->netdev);
6661 		netif_tx_disable(vsi->netdev);
6662 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6663 		ice_eswitch_stop_all_tx_queues(vsi->back);
6664 	}
6665 
6666 	ice_vsi_dis_irq(vsi);
6667 
6668 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6669 	if (tx_err)
6670 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6671 			   vsi->vsi_num, tx_err);
6672 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6673 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6674 		if (tx_err)
6675 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6676 				   vsi->vsi_num, tx_err);
6677 	}
6678 
6679 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6680 	if (rx_err)
6681 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6682 			   vsi->vsi_num, rx_err);
6683 
6684 	ice_napi_disable_all(vsi);
6685 
6686 	ice_for_each_txq(vsi, i)
6687 		ice_clean_tx_ring(vsi->tx_rings[i]);
6688 
6689 	ice_for_each_rxq(vsi, i)
6690 		ice_clean_rx_ring(vsi->rx_rings[i]);
6691 
6692 	if (tx_err || rx_err || vlan_err) {
6693 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6694 			   vsi->vsi_num, vsi->vsw->sw_id);
6695 		return -EIO;
6696 	}
6697 
6698 	return 0;
6699 }
6700 
6701 /**
6702  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6703  * @vsi: VSI having resources allocated
6704  *
6705  * Return 0 on success, negative on failure
6706  */
6707 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6708 {
6709 	int i, err = 0;
6710 
6711 	if (!vsi->num_txq) {
6712 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6713 			vsi->vsi_num);
6714 		return -EINVAL;
6715 	}
6716 
6717 	ice_for_each_txq(vsi, i) {
6718 		struct ice_tx_ring *ring = vsi->tx_rings[i];
6719 
6720 		if (!ring)
6721 			return -EINVAL;
6722 
6723 		if (vsi->netdev)
6724 			ring->netdev = vsi->netdev;
6725 		err = ice_setup_tx_ring(ring);
6726 		if (err)
6727 			break;
6728 	}
6729 
6730 	return err;
6731 }
6732 
6733 /**
6734  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6735  * @vsi: VSI having resources allocated
6736  *
6737  * Return 0 on success, negative on failure
6738  */
6739 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6740 {
6741 	int i, err = 0;
6742 
6743 	if (!vsi->num_rxq) {
6744 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6745 			vsi->vsi_num);
6746 		return -EINVAL;
6747 	}
6748 
6749 	ice_for_each_rxq(vsi, i) {
6750 		struct ice_rx_ring *ring = vsi->rx_rings[i];
6751 
6752 		if (!ring)
6753 			return -EINVAL;
6754 
6755 		if (vsi->netdev)
6756 			ring->netdev = vsi->netdev;
6757 		err = ice_setup_rx_ring(ring);
6758 		if (err)
6759 			break;
6760 	}
6761 
6762 	return err;
6763 }
6764 
6765 /**
6766  * ice_vsi_open_ctrl - open control VSI for use
6767  * @vsi: the VSI to open
6768  *
6769  * Initialization of the Control VSI
6770  *
6771  * Returns 0 on success, negative value on error
6772  */
6773 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6774 {
6775 	char int_name[ICE_INT_NAME_STR_LEN];
6776 	struct ice_pf *pf = vsi->back;
6777 	struct device *dev;
6778 	int err;
6779 
6780 	dev = ice_pf_to_dev(pf);
6781 	/* allocate descriptors */
6782 	err = ice_vsi_setup_tx_rings(vsi);
6783 	if (err)
6784 		goto err_setup_tx;
6785 
6786 	err = ice_vsi_setup_rx_rings(vsi);
6787 	if (err)
6788 		goto err_setup_rx;
6789 
6790 	err = ice_vsi_cfg(vsi);
6791 	if (err)
6792 		goto err_setup_rx;
6793 
6794 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6795 		 dev_driver_string(dev), dev_name(dev));
6796 	err = ice_vsi_req_irq_msix(vsi, int_name);
6797 	if (err)
6798 		goto err_setup_rx;
6799 
6800 	ice_vsi_cfg_msix(vsi);
6801 
6802 	err = ice_vsi_start_all_rx_rings(vsi);
6803 	if (err)
6804 		goto err_up_complete;
6805 
6806 	clear_bit(ICE_VSI_DOWN, vsi->state);
6807 	ice_vsi_ena_irq(vsi);
6808 
6809 	return 0;
6810 
6811 err_up_complete:
6812 	ice_down(vsi);
6813 err_setup_rx:
6814 	ice_vsi_free_rx_rings(vsi);
6815 err_setup_tx:
6816 	ice_vsi_free_tx_rings(vsi);
6817 
6818 	return err;
6819 }
6820 
6821 /**
6822  * ice_vsi_open - Called when a network interface is made active
6823  * @vsi: the VSI to open
6824  *
6825  * Initialization of the VSI
6826  *
6827  * Returns 0 on success, negative value on error
6828  */
6829 int ice_vsi_open(struct ice_vsi *vsi)
6830 {
6831 	char int_name[ICE_INT_NAME_STR_LEN];
6832 	struct ice_pf *pf = vsi->back;
6833 	int err;
6834 
6835 	/* allocate descriptors */
6836 	err = ice_vsi_setup_tx_rings(vsi);
6837 	if (err)
6838 		goto err_setup_tx;
6839 
6840 	err = ice_vsi_setup_rx_rings(vsi);
6841 	if (err)
6842 		goto err_setup_rx;
6843 
6844 	err = ice_vsi_cfg(vsi);
6845 	if (err)
6846 		goto err_setup_rx;
6847 
6848 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6849 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6850 	err = ice_vsi_req_irq_msix(vsi, int_name);
6851 	if (err)
6852 		goto err_setup_rx;
6853 
6854 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
6855 
6856 	if (vsi->type == ICE_VSI_PF) {
6857 		/* Notify the stack of the actual queue counts. */
6858 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6859 		if (err)
6860 			goto err_set_qs;
6861 
6862 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6863 		if (err)
6864 			goto err_set_qs;
6865 	}
6866 
6867 	err = ice_up_complete(vsi);
6868 	if (err)
6869 		goto err_up_complete;
6870 
6871 	return 0;
6872 
6873 err_up_complete:
6874 	ice_down(vsi);
6875 err_set_qs:
6876 	ice_vsi_free_irq(vsi);
6877 err_setup_rx:
6878 	ice_vsi_free_rx_rings(vsi);
6879 err_setup_tx:
6880 	ice_vsi_free_tx_rings(vsi);
6881 
6882 	return err;
6883 }
6884 
6885 /**
6886  * ice_vsi_release_all - Delete all VSIs
6887  * @pf: PF from which all VSIs are being removed
6888  */
6889 static void ice_vsi_release_all(struct ice_pf *pf)
6890 {
6891 	int err, i;
6892 
6893 	if (!pf->vsi)
6894 		return;
6895 
6896 	ice_for_each_vsi(pf, i) {
6897 		if (!pf->vsi[i])
6898 			continue;
6899 
6900 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
6901 			continue;
6902 
6903 		err = ice_vsi_release(pf->vsi[i]);
6904 		if (err)
6905 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6906 				i, err, pf->vsi[i]->vsi_num);
6907 	}
6908 }
6909 
6910 /**
6911  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6912  * @pf: pointer to the PF instance
6913  * @type: VSI type to rebuild
6914  *
6915  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6916  */
6917 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6918 {
6919 	struct device *dev = ice_pf_to_dev(pf);
6920 	int i, err;
6921 
6922 	ice_for_each_vsi(pf, i) {
6923 		struct ice_vsi *vsi = pf->vsi[i];
6924 
6925 		if (!vsi || vsi->type != type)
6926 			continue;
6927 
6928 		/* rebuild the VSI */
6929 		err = ice_vsi_rebuild(vsi, true);
6930 		if (err) {
6931 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6932 				err, vsi->idx, ice_vsi_type_str(type));
6933 			return err;
6934 		}
6935 
6936 		/* replay filters for the VSI */
6937 		err = ice_replay_vsi(&pf->hw, vsi->idx);
6938 		if (err) {
6939 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
6940 				err, vsi->idx, ice_vsi_type_str(type));
6941 			return err;
6942 		}
6943 
6944 		/* Re-map HW VSI number, using VSI handle that has been
6945 		 * previously validated in ice_replay_vsi() call above
6946 		 */
6947 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6948 
6949 		/* enable the VSI */
6950 		err = ice_ena_vsi(vsi, false);
6951 		if (err) {
6952 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6953 				err, vsi->idx, ice_vsi_type_str(type));
6954 			return err;
6955 		}
6956 
6957 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6958 			 ice_vsi_type_str(type));
6959 	}
6960 
6961 	return 0;
6962 }
6963 
6964 /**
6965  * ice_update_pf_netdev_link - Update PF netdev link status
6966  * @pf: pointer to the PF instance
6967  */
6968 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6969 {
6970 	bool link_up;
6971 	int i;
6972 
6973 	ice_for_each_vsi(pf, i) {
6974 		struct ice_vsi *vsi = pf->vsi[i];
6975 
6976 		if (!vsi || vsi->type != ICE_VSI_PF)
6977 			return;
6978 
6979 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6980 		if (link_up) {
6981 			netif_carrier_on(pf->vsi[i]->netdev);
6982 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6983 		} else {
6984 			netif_carrier_off(pf->vsi[i]->netdev);
6985 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6986 		}
6987 	}
6988 }
6989 
6990 /**
6991  * ice_rebuild - rebuild after reset
6992  * @pf: PF to rebuild
6993  * @reset_type: type of reset
6994  *
6995  * Do not rebuild VF VSI in this flow because that is already handled via
6996  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6997  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6998  * to reset/rebuild all the VF VSI twice.
6999  */
7000 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7001 {
7002 	struct device *dev = ice_pf_to_dev(pf);
7003 	struct ice_hw *hw = &pf->hw;
7004 	bool dvm;
7005 	int err;
7006 
7007 	if (test_bit(ICE_DOWN, pf->state))
7008 		goto clear_recovery;
7009 
7010 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7011 
7012 #define ICE_EMP_RESET_SLEEP_MS 5000
7013 	if (reset_type == ICE_RESET_EMPR) {
7014 		/* If an EMP reset has occurred, any previously pending flash
7015 		 * update will have completed. We no longer know whether or
7016 		 * not the NVM update EMP reset is restricted.
7017 		 */
7018 		pf->fw_emp_reset_disabled = false;
7019 
7020 		msleep(ICE_EMP_RESET_SLEEP_MS);
7021 	}
7022 
7023 	err = ice_init_all_ctrlq(hw);
7024 	if (err) {
7025 		dev_err(dev, "control queues init failed %d\n", err);
7026 		goto err_init_ctrlq;
7027 	}
7028 
7029 	/* if DDP was previously loaded successfully */
7030 	if (!ice_is_safe_mode(pf)) {
7031 		/* reload the SW DB of filter tables */
7032 		if (reset_type == ICE_RESET_PFR)
7033 			ice_fill_blk_tbls(hw);
7034 		else
7035 			/* Reload DDP Package after CORER/GLOBR reset */
7036 			ice_load_pkg(NULL, pf);
7037 	}
7038 
7039 	err = ice_clear_pf_cfg(hw);
7040 	if (err) {
7041 		dev_err(dev, "clear PF configuration failed %d\n", err);
7042 		goto err_init_ctrlq;
7043 	}
7044 
7045 	ice_clear_pxe_mode(hw);
7046 
7047 	err = ice_init_nvm(hw);
7048 	if (err) {
7049 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7050 		goto err_init_ctrlq;
7051 	}
7052 
7053 	err = ice_get_caps(hw);
7054 	if (err) {
7055 		dev_err(dev, "ice_get_caps failed %d\n", err);
7056 		goto err_init_ctrlq;
7057 	}
7058 
7059 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7060 	if (err) {
7061 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7062 		goto err_init_ctrlq;
7063 	}
7064 
7065 	dvm = ice_is_dvm_ena(hw);
7066 
7067 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7068 	if (err)
7069 		goto err_init_ctrlq;
7070 
7071 	err = ice_sched_init_port(hw->port_info);
7072 	if (err)
7073 		goto err_sched_init_port;
7074 
7075 	/* start misc vector */
7076 	err = ice_req_irq_msix_misc(pf);
7077 	if (err) {
7078 		dev_err(dev, "misc vector setup failed: %d\n", err);
7079 		goto err_sched_init_port;
7080 	}
7081 
7082 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7083 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7084 		if (!rd32(hw, PFQF_FD_SIZE)) {
7085 			u16 unused, guar, b_effort;
7086 
7087 			guar = hw->func_caps.fd_fltr_guar;
7088 			b_effort = hw->func_caps.fd_fltr_best_effort;
7089 
7090 			/* force guaranteed filter pool for PF */
7091 			ice_alloc_fd_guar_item(hw, &unused, guar);
7092 			/* force shared filter pool for PF */
7093 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7094 		}
7095 	}
7096 
7097 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7098 		ice_dcb_rebuild(pf);
7099 
7100 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7101 	 * the VSI rebuild. If not, this causes the PTP link status events to
7102 	 * fail.
7103 	 */
7104 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7105 		ice_ptp_reset(pf);
7106 
7107 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7108 		ice_gnss_init(pf);
7109 
7110 	/* rebuild PF VSI */
7111 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7112 	if (err) {
7113 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7114 		goto err_vsi_rebuild;
7115 	}
7116 
7117 	/* configure PTP timestamping after VSI rebuild */
7118 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7119 		ice_ptp_cfg_timestamp(pf, false);
7120 
7121 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7122 	if (err) {
7123 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7124 		goto err_vsi_rebuild;
7125 	}
7126 
7127 	if (reset_type == ICE_RESET_PFR) {
7128 		err = ice_rebuild_channels(pf);
7129 		if (err) {
7130 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7131 				err);
7132 			goto err_vsi_rebuild;
7133 		}
7134 	}
7135 
7136 	/* If Flow Director is active */
7137 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7138 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7139 		if (err) {
7140 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7141 			goto err_vsi_rebuild;
7142 		}
7143 
7144 		/* replay HW Flow Director recipes */
7145 		if (hw->fdir_prof)
7146 			ice_fdir_replay_flows(hw);
7147 
7148 		/* replay Flow Director filters */
7149 		ice_fdir_replay_fltrs(pf);
7150 
7151 		ice_rebuild_arfs(pf);
7152 	}
7153 
7154 	ice_update_pf_netdev_link(pf);
7155 
7156 	/* tell the firmware we are up */
7157 	err = ice_send_version(pf);
7158 	if (err) {
7159 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7160 			err);
7161 		goto err_vsi_rebuild;
7162 	}
7163 
7164 	ice_replay_post(hw);
7165 
7166 	/* if we get here, reset flow is successful */
7167 	clear_bit(ICE_RESET_FAILED, pf->state);
7168 
7169 	ice_plug_aux_dev(pf);
7170 	return;
7171 
7172 err_vsi_rebuild:
7173 err_sched_init_port:
7174 	ice_sched_cleanup_all(hw);
7175 err_init_ctrlq:
7176 	ice_shutdown_all_ctrlq(hw);
7177 	set_bit(ICE_RESET_FAILED, pf->state);
7178 clear_recovery:
7179 	/* set this bit in PF state to control service task scheduling */
7180 	set_bit(ICE_NEEDS_RESTART, pf->state);
7181 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7182 }
7183 
7184 /**
7185  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
7186  * @vsi: Pointer to VSI structure
7187  */
7188 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
7189 {
7190 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
7191 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
7192 	else
7193 		return ICE_RXBUF_3072;
7194 }
7195 
7196 /**
7197  * ice_change_mtu - NDO callback to change the MTU
7198  * @netdev: network interface device structure
7199  * @new_mtu: new value for maximum frame size
7200  *
7201  * Returns 0 on success, negative on failure
7202  */
7203 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7204 {
7205 	struct ice_netdev_priv *np = netdev_priv(netdev);
7206 	struct ice_vsi *vsi = np->vsi;
7207 	struct ice_pf *pf = vsi->back;
7208 	u8 count = 0;
7209 	int err = 0;
7210 
7211 	if (new_mtu == (int)netdev->mtu) {
7212 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7213 		return 0;
7214 	}
7215 
7216 	if (ice_is_xdp_ena_vsi(vsi)) {
7217 		int frame_size = ice_max_xdp_frame_size(vsi);
7218 
7219 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7220 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7221 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7222 			return -EINVAL;
7223 		}
7224 	}
7225 
7226 	/* if a reset is in progress, wait for some time for it to complete */
7227 	do {
7228 		if (ice_is_reset_in_progress(pf->state)) {
7229 			count++;
7230 			usleep_range(1000, 2000);
7231 		} else {
7232 			break;
7233 		}
7234 
7235 	} while (count < 100);
7236 
7237 	if (count == 100) {
7238 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7239 		return -EBUSY;
7240 	}
7241 
7242 	netdev->mtu = (unsigned int)new_mtu;
7243 
7244 	/* if VSI is up, bring it down and then back up */
7245 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7246 		err = ice_down(vsi);
7247 		if (err) {
7248 			netdev_err(netdev, "change MTU if_down err %d\n", err);
7249 			return err;
7250 		}
7251 
7252 		err = ice_up(vsi);
7253 		if (err) {
7254 			netdev_err(netdev, "change MTU if_up err %d\n", err);
7255 			return err;
7256 		}
7257 	}
7258 
7259 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7260 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7261 
7262 	return err;
7263 }
7264 
7265 /**
7266  * ice_eth_ioctl - Access the hwtstamp interface
7267  * @netdev: network interface device structure
7268  * @ifr: interface request data
7269  * @cmd: ioctl command
7270  */
7271 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7272 {
7273 	struct ice_netdev_priv *np = netdev_priv(netdev);
7274 	struct ice_pf *pf = np->vsi->back;
7275 
7276 	switch (cmd) {
7277 	case SIOCGHWTSTAMP:
7278 		return ice_ptp_get_ts_config(pf, ifr);
7279 	case SIOCSHWTSTAMP:
7280 		return ice_ptp_set_ts_config(pf, ifr);
7281 	default:
7282 		return -EOPNOTSUPP;
7283 	}
7284 }
7285 
7286 /**
7287  * ice_aq_str - convert AQ err code to a string
7288  * @aq_err: the AQ error code to convert
7289  */
7290 const char *ice_aq_str(enum ice_aq_err aq_err)
7291 {
7292 	switch (aq_err) {
7293 	case ICE_AQ_RC_OK:
7294 		return "OK";
7295 	case ICE_AQ_RC_EPERM:
7296 		return "ICE_AQ_RC_EPERM";
7297 	case ICE_AQ_RC_ENOENT:
7298 		return "ICE_AQ_RC_ENOENT";
7299 	case ICE_AQ_RC_ENOMEM:
7300 		return "ICE_AQ_RC_ENOMEM";
7301 	case ICE_AQ_RC_EBUSY:
7302 		return "ICE_AQ_RC_EBUSY";
7303 	case ICE_AQ_RC_EEXIST:
7304 		return "ICE_AQ_RC_EEXIST";
7305 	case ICE_AQ_RC_EINVAL:
7306 		return "ICE_AQ_RC_EINVAL";
7307 	case ICE_AQ_RC_ENOSPC:
7308 		return "ICE_AQ_RC_ENOSPC";
7309 	case ICE_AQ_RC_ENOSYS:
7310 		return "ICE_AQ_RC_ENOSYS";
7311 	case ICE_AQ_RC_EMODE:
7312 		return "ICE_AQ_RC_EMODE";
7313 	case ICE_AQ_RC_ENOSEC:
7314 		return "ICE_AQ_RC_ENOSEC";
7315 	case ICE_AQ_RC_EBADSIG:
7316 		return "ICE_AQ_RC_EBADSIG";
7317 	case ICE_AQ_RC_ESVN:
7318 		return "ICE_AQ_RC_ESVN";
7319 	case ICE_AQ_RC_EBADMAN:
7320 		return "ICE_AQ_RC_EBADMAN";
7321 	case ICE_AQ_RC_EBADBUF:
7322 		return "ICE_AQ_RC_EBADBUF";
7323 	}
7324 
7325 	return "ICE_AQ_RC_UNKNOWN";
7326 }
7327 
7328 /**
7329  * ice_set_rss_lut - Set RSS LUT
7330  * @vsi: Pointer to VSI structure
7331  * @lut: Lookup table
7332  * @lut_size: Lookup table size
7333  *
7334  * Returns 0 on success, negative on failure
7335  */
7336 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7337 {
7338 	struct ice_aq_get_set_rss_lut_params params = {};
7339 	struct ice_hw *hw = &vsi->back->hw;
7340 	int status;
7341 
7342 	if (!lut)
7343 		return -EINVAL;
7344 
7345 	params.vsi_handle = vsi->idx;
7346 	params.lut_size = lut_size;
7347 	params.lut_type = vsi->rss_lut_type;
7348 	params.lut = lut;
7349 
7350 	status = ice_aq_set_rss_lut(hw, &params);
7351 	if (status)
7352 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7353 			status, ice_aq_str(hw->adminq.sq_last_status));
7354 
7355 	return status;
7356 }
7357 
7358 /**
7359  * ice_set_rss_key - Set RSS key
7360  * @vsi: Pointer to the VSI structure
7361  * @seed: RSS hash seed
7362  *
7363  * Returns 0 on success, negative on failure
7364  */
7365 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7366 {
7367 	struct ice_hw *hw = &vsi->back->hw;
7368 	int status;
7369 
7370 	if (!seed)
7371 		return -EINVAL;
7372 
7373 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7374 	if (status)
7375 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7376 			status, ice_aq_str(hw->adminq.sq_last_status));
7377 
7378 	return status;
7379 }
7380 
7381 /**
7382  * ice_get_rss_lut - Get RSS LUT
7383  * @vsi: Pointer to VSI structure
7384  * @lut: Buffer to store the lookup table entries
7385  * @lut_size: Size of buffer to store the lookup table entries
7386  *
7387  * Returns 0 on success, negative on failure
7388  */
7389 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7390 {
7391 	struct ice_aq_get_set_rss_lut_params params = {};
7392 	struct ice_hw *hw = &vsi->back->hw;
7393 	int status;
7394 
7395 	if (!lut)
7396 		return -EINVAL;
7397 
7398 	params.vsi_handle = vsi->idx;
7399 	params.lut_size = lut_size;
7400 	params.lut_type = vsi->rss_lut_type;
7401 	params.lut = lut;
7402 
7403 	status = ice_aq_get_rss_lut(hw, &params);
7404 	if (status)
7405 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7406 			status, ice_aq_str(hw->adminq.sq_last_status));
7407 
7408 	return status;
7409 }
7410 
7411 /**
7412  * ice_get_rss_key - Get RSS key
7413  * @vsi: Pointer to VSI structure
7414  * @seed: Buffer to store the key in
7415  *
7416  * Returns 0 on success, negative on failure
7417  */
7418 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7419 {
7420 	struct ice_hw *hw = &vsi->back->hw;
7421 	int status;
7422 
7423 	if (!seed)
7424 		return -EINVAL;
7425 
7426 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7427 	if (status)
7428 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7429 			status, ice_aq_str(hw->adminq.sq_last_status));
7430 
7431 	return status;
7432 }
7433 
7434 /**
7435  * ice_bridge_getlink - Get the hardware bridge mode
7436  * @skb: skb buff
7437  * @pid: process ID
7438  * @seq: RTNL message seq
7439  * @dev: the netdev being configured
7440  * @filter_mask: filter mask passed in
7441  * @nlflags: netlink flags passed in
7442  *
7443  * Return the bridge mode (VEB/VEPA)
7444  */
7445 static int
7446 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7447 		   struct net_device *dev, u32 filter_mask, int nlflags)
7448 {
7449 	struct ice_netdev_priv *np = netdev_priv(dev);
7450 	struct ice_vsi *vsi = np->vsi;
7451 	struct ice_pf *pf = vsi->back;
7452 	u16 bmode;
7453 
7454 	bmode = pf->first_sw->bridge_mode;
7455 
7456 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7457 				       filter_mask, NULL);
7458 }
7459 
7460 /**
7461  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7462  * @vsi: Pointer to VSI structure
7463  * @bmode: Hardware bridge mode (VEB/VEPA)
7464  *
7465  * Returns 0 on success, negative on failure
7466  */
7467 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7468 {
7469 	struct ice_aqc_vsi_props *vsi_props;
7470 	struct ice_hw *hw = &vsi->back->hw;
7471 	struct ice_vsi_ctx *ctxt;
7472 	int ret;
7473 
7474 	vsi_props = &vsi->info;
7475 
7476 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7477 	if (!ctxt)
7478 		return -ENOMEM;
7479 
7480 	ctxt->info = vsi->info;
7481 
7482 	if (bmode == BRIDGE_MODE_VEB)
7483 		/* change from VEPA to VEB mode */
7484 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7485 	else
7486 		/* change from VEB to VEPA mode */
7487 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7488 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7489 
7490 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7491 	if (ret) {
7492 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7493 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7494 		goto out;
7495 	}
7496 	/* Update sw flags for book keeping */
7497 	vsi_props->sw_flags = ctxt->info.sw_flags;
7498 
7499 out:
7500 	kfree(ctxt);
7501 	return ret;
7502 }
7503 
7504 /**
7505  * ice_bridge_setlink - Set the hardware bridge mode
7506  * @dev: the netdev being configured
7507  * @nlh: RTNL message
7508  * @flags: bridge setlink flags
7509  * @extack: netlink extended ack
7510  *
7511  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7512  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7513  * not already set for all VSIs connected to this switch. And also update the
7514  * unicast switch filter rules for the corresponding switch of the netdev.
7515  */
7516 static int
7517 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7518 		   u16 __always_unused flags,
7519 		   struct netlink_ext_ack __always_unused *extack)
7520 {
7521 	struct ice_netdev_priv *np = netdev_priv(dev);
7522 	struct ice_pf *pf = np->vsi->back;
7523 	struct nlattr *attr, *br_spec;
7524 	struct ice_hw *hw = &pf->hw;
7525 	struct ice_sw *pf_sw;
7526 	int rem, v, err = 0;
7527 
7528 	pf_sw = pf->first_sw;
7529 	/* find the attribute in the netlink message */
7530 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7531 
7532 	nla_for_each_nested(attr, br_spec, rem) {
7533 		__u16 mode;
7534 
7535 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7536 			continue;
7537 		mode = nla_get_u16(attr);
7538 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7539 			return -EINVAL;
7540 		/* Continue  if bridge mode is not being flipped */
7541 		if (mode == pf_sw->bridge_mode)
7542 			continue;
7543 		/* Iterates through the PF VSI list and update the loopback
7544 		 * mode of the VSI
7545 		 */
7546 		ice_for_each_vsi(pf, v) {
7547 			if (!pf->vsi[v])
7548 				continue;
7549 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7550 			if (err)
7551 				return err;
7552 		}
7553 
7554 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7555 		/* Update the unicast switch filter rules for the corresponding
7556 		 * switch of the netdev
7557 		 */
7558 		err = ice_update_sw_rule_bridge_mode(hw);
7559 		if (err) {
7560 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7561 				   mode, err,
7562 				   ice_aq_str(hw->adminq.sq_last_status));
7563 			/* revert hw->evb_veb */
7564 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7565 			return err;
7566 		}
7567 
7568 		pf_sw->bridge_mode = mode;
7569 	}
7570 
7571 	return 0;
7572 }
7573 
7574 /**
7575  * ice_tx_timeout - Respond to a Tx Hang
7576  * @netdev: network interface device structure
7577  * @txqueue: Tx queue
7578  */
7579 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7580 {
7581 	struct ice_netdev_priv *np = netdev_priv(netdev);
7582 	struct ice_tx_ring *tx_ring = NULL;
7583 	struct ice_vsi *vsi = np->vsi;
7584 	struct ice_pf *pf = vsi->back;
7585 	u32 i;
7586 
7587 	pf->tx_timeout_count++;
7588 
7589 	/* Check if PFC is enabled for the TC to which the queue belongs
7590 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7591 	 * need to reset and rebuild
7592 	 */
7593 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7594 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7595 			 txqueue);
7596 		return;
7597 	}
7598 
7599 	/* now that we have an index, find the tx_ring struct */
7600 	ice_for_each_txq(vsi, i)
7601 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7602 			if (txqueue == vsi->tx_rings[i]->q_index) {
7603 				tx_ring = vsi->tx_rings[i];
7604 				break;
7605 			}
7606 
7607 	/* Reset recovery level if enough time has elapsed after last timeout.
7608 	 * Also ensure no new reset action happens before next timeout period.
7609 	 */
7610 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7611 		pf->tx_timeout_recovery_level = 1;
7612 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7613 				       netdev->watchdog_timeo)))
7614 		return;
7615 
7616 	if (tx_ring) {
7617 		struct ice_hw *hw = &pf->hw;
7618 		u32 head, val = 0;
7619 
7620 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7621 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7622 		/* Read interrupt register */
7623 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7624 
7625 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7626 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7627 			    head, tx_ring->next_to_use, val);
7628 	}
7629 
7630 	pf->tx_timeout_last_recovery = jiffies;
7631 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7632 		    pf->tx_timeout_recovery_level, txqueue);
7633 
7634 	switch (pf->tx_timeout_recovery_level) {
7635 	case 1:
7636 		set_bit(ICE_PFR_REQ, pf->state);
7637 		break;
7638 	case 2:
7639 		set_bit(ICE_CORER_REQ, pf->state);
7640 		break;
7641 	case 3:
7642 		set_bit(ICE_GLOBR_REQ, pf->state);
7643 		break;
7644 	default:
7645 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7646 		set_bit(ICE_DOWN, pf->state);
7647 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7648 		set_bit(ICE_SERVICE_DIS, pf->state);
7649 		break;
7650 	}
7651 
7652 	ice_service_task_schedule(pf);
7653 	pf->tx_timeout_recovery_level++;
7654 }
7655 
7656 /**
7657  * ice_setup_tc_cls_flower - flower classifier offloads
7658  * @np: net device to configure
7659  * @filter_dev: device on which filter is added
7660  * @cls_flower: offload data
7661  */
7662 static int
7663 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7664 			struct net_device *filter_dev,
7665 			struct flow_cls_offload *cls_flower)
7666 {
7667 	struct ice_vsi *vsi = np->vsi;
7668 
7669 	if (cls_flower->common.chain_index)
7670 		return -EOPNOTSUPP;
7671 
7672 	switch (cls_flower->command) {
7673 	case FLOW_CLS_REPLACE:
7674 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7675 	case FLOW_CLS_DESTROY:
7676 		return ice_del_cls_flower(vsi, cls_flower);
7677 	default:
7678 		return -EINVAL;
7679 	}
7680 }
7681 
7682 /**
7683  * ice_setup_tc_block_cb - callback handler registered for TC block
7684  * @type: TC SETUP type
7685  * @type_data: TC flower offload data that contains user input
7686  * @cb_priv: netdev private data
7687  */
7688 static int
7689 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7690 {
7691 	struct ice_netdev_priv *np = cb_priv;
7692 
7693 	switch (type) {
7694 	case TC_SETUP_CLSFLOWER:
7695 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7696 					       type_data);
7697 	default:
7698 		return -EOPNOTSUPP;
7699 	}
7700 }
7701 
7702 /**
7703  * ice_validate_mqprio_qopt - Validate TCF input parameters
7704  * @vsi: Pointer to VSI
7705  * @mqprio_qopt: input parameters for mqprio queue configuration
7706  *
7707  * This function validates MQPRIO params, such as qcount (power of 2 wherever
7708  * needed), and make sure user doesn't specify qcount and BW rate limit
7709  * for TCs, which are more than "num_tc"
7710  */
7711 static int
7712 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7713 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
7714 {
7715 	u64 sum_max_rate = 0, sum_min_rate = 0;
7716 	int non_power_of_2_qcount = 0;
7717 	struct ice_pf *pf = vsi->back;
7718 	int max_rss_q_cnt = 0;
7719 	struct device *dev;
7720 	int i, speed;
7721 	u8 num_tc;
7722 
7723 	if (vsi->type != ICE_VSI_PF)
7724 		return -EINVAL;
7725 
7726 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7727 	    mqprio_qopt->qopt.num_tc < 1 ||
7728 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7729 		return -EINVAL;
7730 
7731 	dev = ice_pf_to_dev(pf);
7732 	vsi->ch_rss_size = 0;
7733 	num_tc = mqprio_qopt->qopt.num_tc;
7734 
7735 	for (i = 0; num_tc; i++) {
7736 		int qcount = mqprio_qopt->qopt.count[i];
7737 		u64 max_rate, min_rate, rem;
7738 
7739 		if (!qcount)
7740 			return -EINVAL;
7741 
7742 		if (is_power_of_2(qcount)) {
7743 			if (non_power_of_2_qcount &&
7744 			    qcount > non_power_of_2_qcount) {
7745 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7746 					qcount, non_power_of_2_qcount);
7747 				return -EINVAL;
7748 			}
7749 			if (qcount > max_rss_q_cnt)
7750 				max_rss_q_cnt = qcount;
7751 		} else {
7752 			if (non_power_of_2_qcount &&
7753 			    qcount != non_power_of_2_qcount) {
7754 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7755 					qcount, non_power_of_2_qcount);
7756 				return -EINVAL;
7757 			}
7758 			if (qcount < max_rss_q_cnt) {
7759 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7760 					qcount, max_rss_q_cnt);
7761 				return -EINVAL;
7762 			}
7763 			max_rss_q_cnt = qcount;
7764 			non_power_of_2_qcount = qcount;
7765 		}
7766 
7767 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7768 		 * converts the bandwidth rate limit into Bytes/s when
7769 		 * passing it down to the driver. So convert input bandwidth
7770 		 * from Bytes/s to Kbps
7771 		 */
7772 		max_rate = mqprio_qopt->max_rate[i];
7773 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7774 		sum_max_rate += max_rate;
7775 
7776 		/* min_rate is minimum guaranteed rate and it can't be zero */
7777 		min_rate = mqprio_qopt->min_rate[i];
7778 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7779 		sum_min_rate += min_rate;
7780 
7781 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7782 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7783 				min_rate, ICE_MIN_BW_LIMIT);
7784 			return -EINVAL;
7785 		}
7786 
7787 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7788 		if (rem) {
7789 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7790 				i, ICE_MIN_BW_LIMIT);
7791 			return -EINVAL;
7792 		}
7793 
7794 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7795 		if (rem) {
7796 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7797 				i, ICE_MIN_BW_LIMIT);
7798 			return -EINVAL;
7799 		}
7800 
7801 		/* min_rate can't be more than max_rate, except when max_rate
7802 		 * is zero (implies max_rate sought is max line rate). In such
7803 		 * a case min_rate can be more than max.
7804 		 */
7805 		if (max_rate && min_rate > max_rate) {
7806 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7807 				min_rate, max_rate);
7808 			return -EINVAL;
7809 		}
7810 
7811 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7812 			break;
7813 		if (mqprio_qopt->qopt.offset[i + 1] !=
7814 		    (mqprio_qopt->qopt.offset[i] + qcount))
7815 			return -EINVAL;
7816 	}
7817 	if (vsi->num_rxq <
7818 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7819 		return -EINVAL;
7820 	if (vsi->num_txq <
7821 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7822 		return -EINVAL;
7823 
7824 	speed = ice_get_link_speed_kbps(vsi);
7825 	if (sum_max_rate && sum_max_rate > (u64)speed) {
7826 		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7827 			sum_max_rate, speed);
7828 		return -EINVAL;
7829 	}
7830 	if (sum_min_rate && sum_min_rate > (u64)speed) {
7831 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7832 			sum_min_rate, speed);
7833 		return -EINVAL;
7834 	}
7835 
7836 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7837 	vsi->ch_rss_size = max_rss_q_cnt;
7838 
7839 	return 0;
7840 }
7841 
7842 /**
7843  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7844  * @pf: ptr to PF device
7845  * @vsi: ptr to VSI
7846  */
7847 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
7848 {
7849 	struct device *dev = ice_pf_to_dev(pf);
7850 	bool added = false;
7851 	struct ice_hw *hw;
7852 	int flow;
7853 
7854 	if (!(vsi->num_gfltr || vsi->num_bfltr))
7855 		return -EINVAL;
7856 
7857 	hw = &pf->hw;
7858 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
7859 		struct ice_fd_hw_prof *prof;
7860 		int tun, status;
7861 		u64 entry_h;
7862 
7863 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
7864 		      hw->fdir_prof[flow]->cnt))
7865 			continue;
7866 
7867 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
7868 			enum ice_flow_priority prio;
7869 			u64 prof_id;
7870 
7871 			/* add this VSI to FDir profile for this flow */
7872 			prio = ICE_FLOW_PRIO_NORMAL;
7873 			prof = hw->fdir_prof[flow];
7874 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
7875 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
7876 						    prof->vsi_h[0], vsi->idx,
7877 						    prio, prof->fdir_seg[tun],
7878 						    &entry_h);
7879 			if (status) {
7880 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
7881 					vsi->idx, flow);
7882 				continue;
7883 			}
7884 
7885 			prof->entry_h[prof->cnt][tun] = entry_h;
7886 		}
7887 
7888 		/* store VSI for filter replay and delete */
7889 		prof->vsi_h[prof->cnt] = vsi->idx;
7890 		prof->cnt++;
7891 
7892 		added = true;
7893 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
7894 			flow);
7895 	}
7896 
7897 	if (!added)
7898 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
7899 
7900 	return 0;
7901 }
7902 
7903 /**
7904  * ice_add_channel - add a channel by adding VSI
7905  * @pf: ptr to PF device
7906  * @sw_id: underlying HW switching element ID
7907  * @ch: ptr to channel structure
7908  *
7909  * Add a channel (VSI) using add_vsi and queue_map
7910  */
7911 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
7912 {
7913 	struct device *dev = ice_pf_to_dev(pf);
7914 	struct ice_vsi *vsi;
7915 
7916 	if (ch->type != ICE_VSI_CHNL) {
7917 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
7918 		return -EINVAL;
7919 	}
7920 
7921 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
7922 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
7923 		dev_err(dev, "create chnl VSI failure\n");
7924 		return -EINVAL;
7925 	}
7926 
7927 	ice_add_vsi_to_fdir(pf, vsi);
7928 
7929 	ch->sw_id = sw_id;
7930 	ch->vsi_num = vsi->vsi_num;
7931 	ch->info.mapping_flags = vsi->info.mapping_flags;
7932 	ch->ch_vsi = vsi;
7933 	/* set the back pointer of channel for newly created VSI */
7934 	vsi->ch = ch;
7935 
7936 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
7937 	       sizeof(vsi->info.q_mapping));
7938 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
7939 	       sizeof(vsi->info.tc_mapping));
7940 
7941 	return 0;
7942 }
7943 
7944 /**
7945  * ice_chnl_cfg_res
7946  * @vsi: the VSI being setup
7947  * @ch: ptr to channel structure
7948  *
7949  * Configure channel specific resources such as rings, vector.
7950  */
7951 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
7952 {
7953 	int i;
7954 
7955 	for (i = 0; i < ch->num_txq; i++) {
7956 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
7957 		struct ice_ring_container *rc;
7958 		struct ice_tx_ring *tx_ring;
7959 		struct ice_rx_ring *rx_ring;
7960 
7961 		tx_ring = vsi->tx_rings[ch->base_q + i];
7962 		rx_ring = vsi->rx_rings[ch->base_q + i];
7963 		if (!tx_ring || !rx_ring)
7964 			continue;
7965 
7966 		/* setup ring being channel enabled */
7967 		tx_ring->ch = ch;
7968 		rx_ring->ch = ch;
7969 
7970 		/* following code block sets up vector specific attributes */
7971 		tx_q_vector = tx_ring->q_vector;
7972 		rx_q_vector = rx_ring->q_vector;
7973 		if (!tx_q_vector && !rx_q_vector)
7974 			continue;
7975 
7976 		if (tx_q_vector) {
7977 			tx_q_vector->ch = ch;
7978 			/* setup Tx and Rx ITR setting if DIM is off */
7979 			rc = &tx_q_vector->tx;
7980 			if (!ITR_IS_DYNAMIC(rc))
7981 				ice_write_itr(rc, rc->itr_setting);
7982 		}
7983 		if (rx_q_vector) {
7984 			rx_q_vector->ch = ch;
7985 			/* setup Tx and Rx ITR setting if DIM is off */
7986 			rc = &rx_q_vector->rx;
7987 			if (!ITR_IS_DYNAMIC(rc))
7988 				ice_write_itr(rc, rc->itr_setting);
7989 		}
7990 	}
7991 
7992 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
7993 	 * GLINT_ITR register would have written to perform in-context
7994 	 * update, hence perform flush
7995 	 */
7996 	if (ch->num_txq || ch->num_rxq)
7997 		ice_flush(&vsi->back->hw);
7998 }
7999 
8000 /**
8001  * ice_cfg_chnl_all_res - configure channel resources
8002  * @vsi: pte to main_vsi
8003  * @ch: ptr to channel structure
8004  *
8005  * This function configures channel specific resources such as flow-director
8006  * counter index, and other resources such as queues, vectors, ITR settings
8007  */
8008 static void
8009 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8010 {
8011 	/* configure channel (aka ADQ) resources such as queues, vectors,
8012 	 * ITR settings for channel specific vectors and anything else
8013 	 */
8014 	ice_chnl_cfg_res(vsi, ch);
8015 }
8016 
8017 /**
8018  * ice_setup_hw_channel - setup new channel
8019  * @pf: ptr to PF device
8020  * @vsi: the VSI being setup
8021  * @ch: ptr to channel structure
8022  * @sw_id: underlying HW switching element ID
8023  * @type: type of channel to be created (VMDq2/VF)
8024  *
8025  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8026  * and configures Tx rings accordingly
8027  */
8028 static int
8029 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8030 		     struct ice_channel *ch, u16 sw_id, u8 type)
8031 {
8032 	struct device *dev = ice_pf_to_dev(pf);
8033 	int ret;
8034 
8035 	ch->base_q = vsi->next_base_q;
8036 	ch->type = type;
8037 
8038 	ret = ice_add_channel(pf, sw_id, ch);
8039 	if (ret) {
8040 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8041 		return ret;
8042 	}
8043 
8044 	/* configure/setup ADQ specific resources */
8045 	ice_cfg_chnl_all_res(vsi, ch);
8046 
8047 	/* make sure to update the next_base_q so that subsequent channel's
8048 	 * (aka ADQ) VSI queue map is correct
8049 	 */
8050 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8051 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8052 		ch->num_rxq);
8053 
8054 	return 0;
8055 }
8056 
8057 /**
8058  * ice_setup_channel - setup new channel using uplink element
8059  * @pf: ptr to PF device
8060  * @vsi: the VSI being setup
8061  * @ch: ptr to channel structure
8062  *
8063  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8064  * and uplink switching element
8065  */
8066 static bool
8067 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8068 		  struct ice_channel *ch)
8069 {
8070 	struct device *dev = ice_pf_to_dev(pf);
8071 	u16 sw_id;
8072 	int ret;
8073 
8074 	if (vsi->type != ICE_VSI_PF) {
8075 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8076 		return false;
8077 	}
8078 
8079 	sw_id = pf->first_sw->sw_id;
8080 
8081 	/* create channel (VSI) */
8082 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8083 	if (ret) {
8084 		dev_err(dev, "failed to setup hw_channel\n");
8085 		return false;
8086 	}
8087 	dev_dbg(dev, "successfully created channel()\n");
8088 
8089 	return ch->ch_vsi ? true : false;
8090 }
8091 
8092 /**
8093  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8094  * @vsi: VSI to be configured
8095  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8096  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8097  */
8098 static int
8099 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8100 {
8101 	int err;
8102 
8103 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8104 	if (err)
8105 		return err;
8106 
8107 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8108 }
8109 
8110 /**
8111  * ice_create_q_channel - function to create channel
8112  * @vsi: VSI to be configured
8113  * @ch: ptr to channel (it contains channel specific params)
8114  *
8115  * This function creates channel (VSI) using num_queues specified by user,
8116  * reconfigs RSS if needed.
8117  */
8118 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8119 {
8120 	struct ice_pf *pf = vsi->back;
8121 	struct device *dev;
8122 
8123 	if (!ch)
8124 		return -EINVAL;
8125 
8126 	dev = ice_pf_to_dev(pf);
8127 	if (!ch->num_txq || !ch->num_rxq) {
8128 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8129 		return -EINVAL;
8130 	}
8131 
8132 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8133 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8134 			vsi->cnt_q_avail, ch->num_txq);
8135 		return -EINVAL;
8136 	}
8137 
8138 	if (!ice_setup_channel(pf, vsi, ch)) {
8139 		dev_info(dev, "Failed to setup channel\n");
8140 		return -EINVAL;
8141 	}
8142 	/* configure BW rate limit */
8143 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8144 		int ret;
8145 
8146 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8147 				       ch->min_tx_rate);
8148 		if (ret)
8149 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8150 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8151 		else
8152 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8153 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8154 	}
8155 
8156 	vsi->cnt_q_avail -= ch->num_txq;
8157 
8158 	return 0;
8159 }
8160 
8161 /**
8162  * ice_rem_all_chnl_fltrs - removes all channel filters
8163  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8164  *
8165  * Remove all advanced switch filters only if they are channel specific
8166  * tc-flower based filter
8167  */
8168 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8169 {
8170 	struct ice_tc_flower_fltr *fltr;
8171 	struct hlist_node *node;
8172 
8173 	/* to remove all channel filters, iterate an ordered list of filters */
8174 	hlist_for_each_entry_safe(fltr, node,
8175 				  &pf->tc_flower_fltr_list,
8176 				  tc_flower_node) {
8177 		struct ice_rule_query_data rule;
8178 		int status;
8179 
8180 		/* for now process only channel specific filters */
8181 		if (!ice_is_chnl_fltr(fltr))
8182 			continue;
8183 
8184 		rule.rid = fltr->rid;
8185 		rule.rule_id = fltr->rule_id;
8186 		rule.vsi_handle = fltr->dest_id;
8187 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8188 		if (status) {
8189 			if (status == -ENOENT)
8190 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8191 					rule.rule_id);
8192 			else
8193 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8194 					status);
8195 		} else if (fltr->dest_vsi) {
8196 			/* update advanced switch filter count */
8197 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8198 				u32 flags = fltr->flags;
8199 
8200 				fltr->dest_vsi->num_chnl_fltr--;
8201 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8202 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8203 					pf->num_dmac_chnl_fltrs--;
8204 			}
8205 		}
8206 
8207 		hlist_del(&fltr->tc_flower_node);
8208 		kfree(fltr);
8209 	}
8210 }
8211 
8212 /**
8213  * ice_remove_q_channels - Remove queue channels for the TCs
8214  * @vsi: VSI to be configured
8215  * @rem_fltr: delete advanced switch filter or not
8216  *
8217  * Remove queue channels for the TCs
8218  */
8219 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8220 {
8221 	struct ice_channel *ch, *ch_tmp;
8222 	struct ice_pf *pf = vsi->back;
8223 	int i;
8224 
8225 	/* remove all tc-flower based filter if they are channel filters only */
8226 	if (rem_fltr)
8227 		ice_rem_all_chnl_fltrs(pf);
8228 
8229 	/* remove ntuple filters since queue configuration is being changed */
8230 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8231 		struct ice_hw *hw = &pf->hw;
8232 
8233 		mutex_lock(&hw->fdir_fltr_lock);
8234 		ice_fdir_del_all_fltrs(vsi);
8235 		mutex_unlock(&hw->fdir_fltr_lock);
8236 	}
8237 
8238 	/* perform cleanup for channels if they exist */
8239 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8240 		struct ice_vsi *ch_vsi;
8241 
8242 		list_del(&ch->list);
8243 		ch_vsi = ch->ch_vsi;
8244 		if (!ch_vsi) {
8245 			kfree(ch);
8246 			continue;
8247 		}
8248 
8249 		/* Reset queue contexts */
8250 		for (i = 0; i < ch->num_rxq; i++) {
8251 			struct ice_tx_ring *tx_ring;
8252 			struct ice_rx_ring *rx_ring;
8253 
8254 			tx_ring = vsi->tx_rings[ch->base_q + i];
8255 			rx_ring = vsi->rx_rings[ch->base_q + i];
8256 			if (tx_ring) {
8257 				tx_ring->ch = NULL;
8258 				if (tx_ring->q_vector)
8259 					tx_ring->q_vector->ch = NULL;
8260 			}
8261 			if (rx_ring) {
8262 				rx_ring->ch = NULL;
8263 				if (rx_ring->q_vector)
8264 					rx_ring->q_vector->ch = NULL;
8265 			}
8266 		}
8267 
8268 		/* Release FD resources for the channel VSI */
8269 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8270 
8271 		/* clear the VSI from scheduler tree */
8272 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8273 
8274 		/* Delete VSI from FW */
8275 		ice_vsi_delete(ch->ch_vsi);
8276 
8277 		/* Delete VSI from PF and HW VSI arrays */
8278 		ice_vsi_clear(ch->ch_vsi);
8279 
8280 		/* free the channel */
8281 		kfree(ch);
8282 	}
8283 
8284 	/* clear the channel VSI map which is stored in main VSI */
8285 	ice_for_each_chnl_tc(i)
8286 		vsi->tc_map_vsi[i] = NULL;
8287 
8288 	/* reset main VSI's all TC information */
8289 	vsi->all_enatc = 0;
8290 	vsi->all_numtc = 0;
8291 }
8292 
8293 /**
8294  * ice_rebuild_channels - rebuild channel
8295  * @pf: ptr to PF
8296  *
8297  * Recreate channel VSIs and replay filters
8298  */
8299 static int ice_rebuild_channels(struct ice_pf *pf)
8300 {
8301 	struct device *dev = ice_pf_to_dev(pf);
8302 	struct ice_vsi *main_vsi;
8303 	bool rem_adv_fltr = true;
8304 	struct ice_channel *ch;
8305 	struct ice_vsi *vsi;
8306 	int tc_idx = 1;
8307 	int i, err;
8308 
8309 	main_vsi = ice_get_main_vsi(pf);
8310 	if (!main_vsi)
8311 		return 0;
8312 
8313 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8314 	    main_vsi->old_numtc == 1)
8315 		return 0; /* nothing to be done */
8316 
8317 	/* reconfigure main VSI based on old value of TC and cached values
8318 	 * for MQPRIO opts
8319 	 */
8320 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8321 	if (err) {
8322 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8323 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8324 		return err;
8325 	}
8326 
8327 	/* rebuild ADQ VSIs */
8328 	ice_for_each_vsi(pf, i) {
8329 		enum ice_vsi_type type;
8330 
8331 		vsi = pf->vsi[i];
8332 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8333 			continue;
8334 
8335 		type = vsi->type;
8336 
8337 		/* rebuild ADQ VSI */
8338 		err = ice_vsi_rebuild(vsi, true);
8339 		if (err) {
8340 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8341 				ice_vsi_type_str(type), vsi->idx, err);
8342 			goto cleanup;
8343 		}
8344 
8345 		/* Re-map HW VSI number, using VSI handle that has been
8346 		 * previously validated in ice_replay_vsi() call above
8347 		 */
8348 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8349 
8350 		/* replay filters for the VSI */
8351 		err = ice_replay_vsi(&pf->hw, vsi->idx);
8352 		if (err) {
8353 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8354 				ice_vsi_type_str(type), err, vsi->idx);
8355 			rem_adv_fltr = false;
8356 			goto cleanup;
8357 		}
8358 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8359 			 ice_vsi_type_str(type), vsi->idx);
8360 
8361 		/* store ADQ VSI at correct TC index in main VSI's
8362 		 * map of TC to VSI
8363 		 */
8364 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8365 	}
8366 
8367 	/* ADQ VSI(s) has been rebuilt successfully, so setup
8368 	 * channel for main VSI's Tx and Rx rings
8369 	 */
8370 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8371 		struct ice_vsi *ch_vsi;
8372 
8373 		ch_vsi = ch->ch_vsi;
8374 		if (!ch_vsi)
8375 			continue;
8376 
8377 		/* reconfig channel resources */
8378 		ice_cfg_chnl_all_res(main_vsi, ch);
8379 
8380 		/* replay BW rate limit if it is non-zero */
8381 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8382 			continue;
8383 
8384 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8385 				       ch->min_tx_rate);
8386 		if (err)
8387 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8388 				err, ch->max_tx_rate, ch->min_tx_rate,
8389 				ch_vsi->vsi_num);
8390 		else
8391 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8392 				ch->max_tx_rate, ch->min_tx_rate,
8393 				ch_vsi->vsi_num);
8394 	}
8395 
8396 	/* reconfig RSS for main VSI */
8397 	if (main_vsi->ch_rss_size)
8398 		ice_vsi_cfg_rss_lut_key(main_vsi);
8399 
8400 	return 0;
8401 
8402 cleanup:
8403 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8404 	return err;
8405 }
8406 
8407 /**
8408  * ice_create_q_channels - Add queue channel for the given TCs
8409  * @vsi: VSI to be configured
8410  *
8411  * Configures queue channel mapping to the given TCs
8412  */
8413 static int ice_create_q_channels(struct ice_vsi *vsi)
8414 {
8415 	struct ice_pf *pf = vsi->back;
8416 	struct ice_channel *ch;
8417 	int ret = 0, i;
8418 
8419 	ice_for_each_chnl_tc(i) {
8420 		if (!(vsi->all_enatc & BIT(i)))
8421 			continue;
8422 
8423 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8424 		if (!ch) {
8425 			ret = -ENOMEM;
8426 			goto err_free;
8427 		}
8428 		INIT_LIST_HEAD(&ch->list);
8429 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8430 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8431 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8432 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8433 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8434 
8435 		/* convert to Kbits/s */
8436 		if (ch->max_tx_rate)
8437 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8438 						  ICE_BW_KBPS_DIVISOR);
8439 		if (ch->min_tx_rate)
8440 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8441 						  ICE_BW_KBPS_DIVISOR);
8442 
8443 		ret = ice_create_q_channel(vsi, ch);
8444 		if (ret) {
8445 			dev_err(ice_pf_to_dev(pf),
8446 				"failed creating channel TC:%d\n", i);
8447 			kfree(ch);
8448 			goto err_free;
8449 		}
8450 		list_add_tail(&ch->list, &vsi->ch_list);
8451 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8452 		dev_dbg(ice_pf_to_dev(pf),
8453 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8454 	}
8455 	return 0;
8456 
8457 err_free:
8458 	ice_remove_q_channels(vsi, false);
8459 
8460 	return ret;
8461 }
8462 
8463 /**
8464  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8465  * @netdev: net device to configure
8466  * @type_data: TC offload data
8467  */
8468 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8469 {
8470 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8471 	struct ice_netdev_priv *np = netdev_priv(netdev);
8472 	struct ice_vsi *vsi = np->vsi;
8473 	struct ice_pf *pf = vsi->back;
8474 	u16 mode, ena_tc_qdisc = 0;
8475 	int cur_txq, cur_rxq;
8476 	u8 hw = 0, num_tcf;
8477 	struct device *dev;
8478 	int ret, i;
8479 
8480 	dev = ice_pf_to_dev(pf);
8481 	num_tcf = mqprio_qopt->qopt.num_tc;
8482 	hw = mqprio_qopt->qopt.hw;
8483 	mode = mqprio_qopt->mode;
8484 	if (!hw) {
8485 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8486 		vsi->ch_rss_size = 0;
8487 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8488 		goto config_tcf;
8489 	}
8490 
8491 	/* Generate queue region map for number of TCF requested */
8492 	for (i = 0; i < num_tcf; i++)
8493 		ena_tc_qdisc |= BIT(i);
8494 
8495 	switch (mode) {
8496 	case TC_MQPRIO_MODE_CHANNEL:
8497 
8498 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8499 		if (ret) {
8500 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8501 				   ret);
8502 			return ret;
8503 		}
8504 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8505 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8506 		/* don't assume state of hw_tc_offload during driver load
8507 		 * and set the flag for TC flower filter if hw_tc_offload
8508 		 * already ON
8509 		 */
8510 		if (vsi->netdev->features & NETIF_F_HW_TC)
8511 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8512 		break;
8513 	default:
8514 		return -EINVAL;
8515 	}
8516 
8517 config_tcf:
8518 
8519 	/* Requesting same TCF configuration as already enabled */
8520 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8521 	    mode != TC_MQPRIO_MODE_CHANNEL)
8522 		return 0;
8523 
8524 	/* Pause VSI queues */
8525 	ice_dis_vsi(vsi, true);
8526 
8527 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8528 		ice_remove_q_channels(vsi, true);
8529 
8530 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8531 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8532 				     num_online_cpus());
8533 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8534 				     num_online_cpus());
8535 	} else {
8536 		/* logic to rebuild VSI, same like ethtool -L */
8537 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8538 
8539 		for (i = 0; i < num_tcf; i++) {
8540 			if (!(ena_tc_qdisc & BIT(i)))
8541 				continue;
8542 
8543 			offset = vsi->mqprio_qopt.qopt.offset[i];
8544 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8545 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8546 		}
8547 		vsi->req_txq = offset + qcount_tx;
8548 		vsi->req_rxq = offset + qcount_rx;
8549 
8550 		/* store away original rss_size info, so that it gets reused
8551 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8552 		 * determine, what should be the rss_sizefor main VSI
8553 		 */
8554 		vsi->orig_rss_size = vsi->rss_size;
8555 	}
8556 
8557 	/* save current values of Tx and Rx queues before calling VSI rebuild
8558 	 * for fallback option
8559 	 */
8560 	cur_txq = vsi->num_txq;
8561 	cur_rxq = vsi->num_rxq;
8562 
8563 	/* proceed with rebuild main VSI using correct number of queues */
8564 	ret = ice_vsi_rebuild(vsi, false);
8565 	if (ret) {
8566 		/* fallback to current number of queues */
8567 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8568 		vsi->req_txq = cur_txq;
8569 		vsi->req_rxq = cur_rxq;
8570 		clear_bit(ICE_RESET_FAILED, pf->state);
8571 		if (ice_vsi_rebuild(vsi, false)) {
8572 			dev_err(dev, "Rebuild of main VSI failed again\n");
8573 			return ret;
8574 		}
8575 	}
8576 
8577 	vsi->all_numtc = num_tcf;
8578 	vsi->all_enatc = ena_tc_qdisc;
8579 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8580 	if (ret) {
8581 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8582 			   vsi->vsi_num);
8583 		goto exit;
8584 	}
8585 
8586 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8587 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8588 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8589 
8590 		/* set TC0 rate limit if specified */
8591 		if (max_tx_rate || min_tx_rate) {
8592 			/* convert to Kbits/s */
8593 			if (max_tx_rate)
8594 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8595 			if (min_tx_rate)
8596 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8597 
8598 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8599 			if (!ret) {
8600 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8601 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8602 			} else {
8603 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8604 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8605 				goto exit;
8606 			}
8607 		}
8608 		ret = ice_create_q_channels(vsi);
8609 		if (ret) {
8610 			netdev_err(netdev, "failed configuring queue channels\n");
8611 			goto exit;
8612 		} else {
8613 			netdev_dbg(netdev, "successfully configured channels\n");
8614 		}
8615 	}
8616 
8617 	if (vsi->ch_rss_size)
8618 		ice_vsi_cfg_rss_lut_key(vsi);
8619 
8620 exit:
8621 	/* if error, reset the all_numtc and all_enatc */
8622 	if (ret) {
8623 		vsi->all_numtc = 0;
8624 		vsi->all_enatc = 0;
8625 	}
8626 	/* resume VSI */
8627 	ice_ena_vsi(vsi, true);
8628 
8629 	return ret;
8630 }
8631 
8632 static LIST_HEAD(ice_block_cb_list);
8633 
8634 static int
8635 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8636 	     void *type_data)
8637 {
8638 	struct ice_netdev_priv *np = netdev_priv(netdev);
8639 	struct ice_pf *pf = np->vsi->back;
8640 	int err;
8641 
8642 	switch (type) {
8643 	case TC_SETUP_BLOCK:
8644 		return flow_block_cb_setup_simple(type_data,
8645 						  &ice_block_cb_list,
8646 						  ice_setup_tc_block_cb,
8647 						  np, np, true);
8648 	case TC_SETUP_QDISC_MQPRIO:
8649 		/* setup traffic classifier for receive side */
8650 		mutex_lock(&pf->tc_mutex);
8651 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8652 		mutex_unlock(&pf->tc_mutex);
8653 		return err;
8654 	default:
8655 		return -EOPNOTSUPP;
8656 	}
8657 	return -EOPNOTSUPP;
8658 }
8659 
8660 static struct ice_indr_block_priv *
8661 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8662 			   struct net_device *netdev)
8663 {
8664 	struct ice_indr_block_priv *cb_priv;
8665 
8666 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8667 		if (!cb_priv->netdev)
8668 			return NULL;
8669 		if (cb_priv->netdev == netdev)
8670 			return cb_priv;
8671 	}
8672 	return NULL;
8673 }
8674 
8675 static int
8676 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8677 			void *indr_priv)
8678 {
8679 	struct ice_indr_block_priv *priv = indr_priv;
8680 	struct ice_netdev_priv *np = priv->np;
8681 
8682 	switch (type) {
8683 	case TC_SETUP_CLSFLOWER:
8684 		return ice_setup_tc_cls_flower(np, priv->netdev,
8685 					       (struct flow_cls_offload *)
8686 					       type_data);
8687 	default:
8688 		return -EOPNOTSUPP;
8689 	}
8690 }
8691 
8692 static int
8693 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8694 			struct ice_netdev_priv *np,
8695 			struct flow_block_offload *f, void *data,
8696 			void (*cleanup)(struct flow_block_cb *block_cb))
8697 {
8698 	struct ice_indr_block_priv *indr_priv;
8699 	struct flow_block_cb *block_cb;
8700 
8701 	if (!ice_is_tunnel_supported(netdev) &&
8702 	    !(is_vlan_dev(netdev) &&
8703 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
8704 		return -EOPNOTSUPP;
8705 
8706 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8707 		return -EOPNOTSUPP;
8708 
8709 	switch (f->command) {
8710 	case FLOW_BLOCK_BIND:
8711 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8712 		if (indr_priv)
8713 			return -EEXIST;
8714 
8715 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8716 		if (!indr_priv)
8717 			return -ENOMEM;
8718 
8719 		indr_priv->netdev = netdev;
8720 		indr_priv->np = np;
8721 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8722 
8723 		block_cb =
8724 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8725 						 indr_priv, indr_priv,
8726 						 ice_rep_indr_tc_block_unbind,
8727 						 f, netdev, sch, data, np,
8728 						 cleanup);
8729 
8730 		if (IS_ERR(block_cb)) {
8731 			list_del(&indr_priv->list);
8732 			kfree(indr_priv);
8733 			return PTR_ERR(block_cb);
8734 		}
8735 		flow_block_cb_add(block_cb, f);
8736 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8737 		break;
8738 	case FLOW_BLOCK_UNBIND:
8739 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8740 		if (!indr_priv)
8741 			return -ENOENT;
8742 
8743 		block_cb = flow_block_cb_lookup(f->block,
8744 						ice_indr_setup_block_cb,
8745 						indr_priv);
8746 		if (!block_cb)
8747 			return -ENOENT;
8748 
8749 		flow_indr_block_cb_remove(block_cb, f);
8750 
8751 		list_del(&block_cb->driver_list);
8752 		break;
8753 	default:
8754 		return -EOPNOTSUPP;
8755 	}
8756 	return 0;
8757 }
8758 
8759 static int
8760 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8761 		     void *cb_priv, enum tc_setup_type type, void *type_data,
8762 		     void *data,
8763 		     void (*cleanup)(struct flow_block_cb *block_cb))
8764 {
8765 	switch (type) {
8766 	case TC_SETUP_BLOCK:
8767 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8768 					       data, cleanup);
8769 
8770 	default:
8771 		return -EOPNOTSUPP;
8772 	}
8773 }
8774 
8775 /**
8776  * ice_open - Called when a network interface becomes active
8777  * @netdev: network interface device structure
8778  *
8779  * The open entry point is called when a network interface is made
8780  * active by the system (IFF_UP). At this point all resources needed
8781  * for transmit and receive operations are allocated, the interrupt
8782  * handler is registered with the OS, the netdev watchdog is enabled,
8783  * and the stack is notified that the interface is ready.
8784  *
8785  * Returns 0 on success, negative value on failure
8786  */
8787 int ice_open(struct net_device *netdev)
8788 {
8789 	struct ice_netdev_priv *np = netdev_priv(netdev);
8790 	struct ice_pf *pf = np->vsi->back;
8791 
8792 	if (ice_is_reset_in_progress(pf->state)) {
8793 		netdev_err(netdev, "can't open net device while reset is in progress");
8794 		return -EBUSY;
8795 	}
8796 
8797 	return ice_open_internal(netdev);
8798 }
8799 
8800 /**
8801  * ice_open_internal - Called when a network interface becomes active
8802  * @netdev: network interface device structure
8803  *
8804  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8805  * handling routine
8806  *
8807  * Returns 0 on success, negative value on failure
8808  */
8809 int ice_open_internal(struct net_device *netdev)
8810 {
8811 	struct ice_netdev_priv *np = netdev_priv(netdev);
8812 	struct ice_vsi *vsi = np->vsi;
8813 	struct ice_pf *pf = vsi->back;
8814 	struct ice_port_info *pi;
8815 	int err;
8816 
8817 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8818 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8819 		return -EIO;
8820 	}
8821 
8822 	netif_carrier_off(netdev);
8823 
8824 	pi = vsi->port_info;
8825 	err = ice_update_link_info(pi);
8826 	if (err) {
8827 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
8828 		return err;
8829 	}
8830 
8831 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
8832 
8833 	/* Set PHY if there is media, otherwise, turn off PHY */
8834 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
8835 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8836 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
8837 			err = ice_init_phy_user_cfg(pi);
8838 			if (err) {
8839 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
8840 					   err);
8841 				return err;
8842 			}
8843 		}
8844 
8845 		err = ice_configure_phy(vsi);
8846 		if (err) {
8847 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
8848 				   err);
8849 			return err;
8850 		}
8851 	} else {
8852 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8853 		ice_set_link(vsi, false);
8854 	}
8855 
8856 	err = ice_vsi_open(vsi);
8857 	if (err)
8858 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
8859 			   vsi->vsi_num, vsi->vsw->sw_id);
8860 
8861 	/* Update existing tunnels information */
8862 	udp_tunnel_get_rx_info(netdev);
8863 
8864 	return err;
8865 }
8866 
8867 /**
8868  * ice_stop - Disables a network interface
8869  * @netdev: network interface device structure
8870  *
8871  * The stop entry point is called when an interface is de-activated by the OS,
8872  * and the netdevice enters the DOWN state. The hardware is still under the
8873  * driver's control, but the netdev interface is disabled.
8874  *
8875  * Returns success only - not allowed to fail
8876  */
8877 int ice_stop(struct net_device *netdev)
8878 {
8879 	struct ice_netdev_priv *np = netdev_priv(netdev);
8880 	struct ice_vsi *vsi = np->vsi;
8881 	struct ice_pf *pf = vsi->back;
8882 
8883 	if (ice_is_reset_in_progress(pf->state)) {
8884 		netdev_err(netdev, "can't stop net device while reset is in progress");
8885 		return -EBUSY;
8886 	}
8887 
8888 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
8889 		int link_err = ice_force_phys_link_state(vsi, false);
8890 
8891 		if (link_err) {
8892 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
8893 				   vsi->vsi_num, link_err);
8894 			return -EIO;
8895 		}
8896 	}
8897 
8898 	ice_vsi_close(vsi);
8899 
8900 	return 0;
8901 }
8902 
8903 /**
8904  * ice_features_check - Validate encapsulated packet conforms to limits
8905  * @skb: skb buffer
8906  * @netdev: This port's netdev
8907  * @features: Offload features that the stack believes apply
8908  */
8909 static netdev_features_t
8910 ice_features_check(struct sk_buff *skb,
8911 		   struct net_device __always_unused *netdev,
8912 		   netdev_features_t features)
8913 {
8914 	bool gso = skb_is_gso(skb);
8915 	size_t len;
8916 
8917 	/* No point in doing any of this if neither checksum nor GSO are
8918 	 * being requested for this frame. We can rule out both by just
8919 	 * checking for CHECKSUM_PARTIAL
8920 	 */
8921 	if (skb->ip_summed != CHECKSUM_PARTIAL)
8922 		return features;
8923 
8924 	/* We cannot support GSO if the MSS is going to be less than
8925 	 * 64 bytes. If it is then we need to drop support for GSO.
8926 	 */
8927 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
8928 		features &= ~NETIF_F_GSO_MASK;
8929 
8930 	len = skb_network_offset(skb);
8931 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
8932 		goto out_rm_features;
8933 
8934 	len = skb_network_header_len(skb);
8935 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8936 		goto out_rm_features;
8937 
8938 	if (skb->encapsulation) {
8939 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
8940 		 * the case of IPIP frames, the transport header pointer is
8941 		 * after the inner header! So check to make sure that this
8942 		 * is a GRE or UDP_TUNNEL frame before doing that math.
8943 		 */
8944 		if (gso && (skb_shinfo(skb)->gso_type &
8945 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
8946 			len = skb_inner_network_header(skb) -
8947 			      skb_transport_header(skb);
8948 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
8949 				goto out_rm_features;
8950 		}
8951 
8952 		len = skb_inner_network_header_len(skb);
8953 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8954 			goto out_rm_features;
8955 	}
8956 
8957 	return features;
8958 out_rm_features:
8959 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8960 }
8961 
8962 static const struct net_device_ops ice_netdev_safe_mode_ops = {
8963 	.ndo_open = ice_open,
8964 	.ndo_stop = ice_stop,
8965 	.ndo_start_xmit = ice_start_xmit,
8966 	.ndo_set_mac_address = ice_set_mac_address,
8967 	.ndo_validate_addr = eth_validate_addr,
8968 	.ndo_change_mtu = ice_change_mtu,
8969 	.ndo_get_stats64 = ice_get_stats64,
8970 	.ndo_tx_timeout = ice_tx_timeout,
8971 	.ndo_bpf = ice_xdp_safe_mode,
8972 };
8973 
8974 static const struct net_device_ops ice_netdev_ops = {
8975 	.ndo_open = ice_open,
8976 	.ndo_stop = ice_stop,
8977 	.ndo_start_xmit = ice_start_xmit,
8978 	.ndo_select_queue = ice_select_queue,
8979 	.ndo_features_check = ice_features_check,
8980 	.ndo_fix_features = ice_fix_features,
8981 	.ndo_set_rx_mode = ice_set_rx_mode,
8982 	.ndo_set_mac_address = ice_set_mac_address,
8983 	.ndo_validate_addr = eth_validate_addr,
8984 	.ndo_change_mtu = ice_change_mtu,
8985 	.ndo_get_stats64 = ice_get_stats64,
8986 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
8987 	.ndo_eth_ioctl = ice_eth_ioctl,
8988 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
8989 	.ndo_set_vf_mac = ice_set_vf_mac,
8990 	.ndo_get_vf_config = ice_get_vf_cfg,
8991 	.ndo_set_vf_trust = ice_set_vf_trust,
8992 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
8993 	.ndo_set_vf_link_state = ice_set_vf_link_state,
8994 	.ndo_get_vf_stats = ice_get_vf_stats,
8995 	.ndo_set_vf_rate = ice_set_vf_bw,
8996 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
8997 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
8998 	.ndo_setup_tc = ice_setup_tc,
8999 	.ndo_set_features = ice_set_features,
9000 	.ndo_bridge_getlink = ice_bridge_getlink,
9001 	.ndo_bridge_setlink = ice_bridge_setlink,
9002 	.ndo_fdb_add = ice_fdb_add,
9003 	.ndo_fdb_del = ice_fdb_del,
9004 #ifdef CONFIG_RFS_ACCEL
9005 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9006 #endif
9007 	.ndo_tx_timeout = ice_tx_timeout,
9008 	.ndo_bpf = ice_xdp,
9009 	.ndo_xdp_xmit = ice_xdp_xmit,
9010 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9011 	.ndo_get_devlink_port = ice_get_devlink_port,
9012 };
9013