xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_main.c (revision 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 
17 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
18 static const char ice_driver_string[] = DRV_SUMMARY;
19 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20 
21 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
22 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
23 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
24 
25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26 MODULE_DESCRIPTION(DRV_SUMMARY);
27 MODULE_LICENSE("GPL v2");
28 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29 
30 static int debug = -1;
31 module_param(debug, int, 0644);
32 #ifndef CONFIG_DYNAMIC_DEBUG
33 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34 #else
35 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36 #endif /* !CONFIG_DYNAMIC_DEBUG */
37 
38 static struct workqueue_struct *ice_wq;
39 static const struct net_device_ops ice_netdev_safe_mode_ops;
40 static const struct net_device_ops ice_netdev_ops;
41 static int ice_vsi_open(struct ice_vsi *vsi);
42 
43 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
44 
45 static void ice_vsi_release_all(struct ice_pf *pf);
46 
47 bool netif_is_ice(struct net_device *dev)
48 {
49 	return dev && (dev->netdev_ops == &ice_netdev_ops);
50 }
51 
52 /**
53  * ice_get_tx_pending - returns number of Tx descriptors not processed
54  * @ring: the ring of descriptors
55  */
56 static u16 ice_get_tx_pending(struct ice_ring *ring)
57 {
58 	u16 head, tail;
59 
60 	head = ring->next_to_clean;
61 	tail = ring->next_to_use;
62 
63 	if (head != tail)
64 		return (head < tail) ?
65 			tail - head : (tail + ring->count - head);
66 	return 0;
67 }
68 
69 /**
70  * ice_check_for_hang_subtask - check for and recover hung queues
71  * @pf: pointer to PF struct
72  */
73 static void ice_check_for_hang_subtask(struct ice_pf *pf)
74 {
75 	struct ice_vsi *vsi = NULL;
76 	struct ice_hw *hw;
77 	unsigned int i;
78 	int packets;
79 	u32 v;
80 
81 	ice_for_each_vsi(pf, v)
82 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
83 			vsi = pf->vsi[v];
84 			break;
85 		}
86 
87 	if (!vsi || test_bit(__ICE_DOWN, vsi->state))
88 		return;
89 
90 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
91 		return;
92 
93 	hw = &vsi->back->hw;
94 
95 	for (i = 0; i < vsi->num_txq; i++) {
96 		struct ice_ring *tx_ring = vsi->tx_rings[i];
97 
98 		if (tx_ring && tx_ring->desc) {
99 			/* If packet counter has not changed the queue is
100 			 * likely stalled, so force an interrupt for this
101 			 * queue.
102 			 *
103 			 * prev_pkt would be negative if there was no
104 			 * pending work.
105 			 */
106 			packets = tx_ring->stats.pkts & INT_MAX;
107 			if (tx_ring->tx_stats.prev_pkt == packets) {
108 				/* Trigger sw interrupt to revive the queue */
109 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
110 				continue;
111 			}
112 
113 			/* Memory barrier between read of packet count and call
114 			 * to ice_get_tx_pending()
115 			 */
116 			smp_rmb();
117 			tx_ring->tx_stats.prev_pkt =
118 			    ice_get_tx_pending(tx_ring) ? packets : -1;
119 		}
120 	}
121 }
122 
123 /**
124  * ice_init_mac_fltr - Set initial MAC filters
125  * @pf: board private structure
126  *
127  * Set initial set of MAC filters for PF VSI; configure filters for permanent
128  * address and broadcast address. If an error is encountered, netdevice will be
129  * unregistered.
130  */
131 static int ice_init_mac_fltr(struct ice_pf *pf)
132 {
133 	enum ice_status status;
134 	struct ice_vsi *vsi;
135 	u8 *perm_addr;
136 
137 	vsi = ice_get_main_vsi(pf);
138 	if (!vsi)
139 		return -EINVAL;
140 
141 	perm_addr = vsi->port_info->mac.perm_addr;
142 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
143 	if (!status)
144 		return 0;
145 
146 	/* We aren't useful with no MAC filters, so unregister if we
147 	 * had an error
148 	 */
149 	if (vsi->netdev->reg_state == NETREG_REGISTERED) {
150 		dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
151 			ice_stat_str(status));
152 		unregister_netdev(vsi->netdev);
153 		free_netdev(vsi->netdev);
154 		vsi->netdev = NULL;
155 	}
156 
157 	return -EIO;
158 }
159 
160 /**
161  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
162  * @netdev: the net device on which the sync is happening
163  * @addr: MAC address to sync
164  *
165  * This is a callback function which is called by the in kernel device sync
166  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
167  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
168  * MAC filters from the hardware.
169  */
170 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
171 {
172 	struct ice_netdev_priv *np = netdev_priv(netdev);
173 	struct ice_vsi *vsi = np->vsi;
174 
175 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
176 				     ICE_FWD_TO_VSI))
177 		return -EINVAL;
178 
179 	return 0;
180 }
181 
182 /**
183  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
184  * @netdev: the net device on which the unsync is happening
185  * @addr: MAC address to unsync
186  *
187  * This is a callback function which is called by the in kernel device unsync
188  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
189  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
190  * delete the MAC filters from the hardware.
191  */
192 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
193 {
194 	struct ice_netdev_priv *np = netdev_priv(netdev);
195 	struct ice_vsi *vsi = np->vsi;
196 
197 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
198 				     ICE_FWD_TO_VSI))
199 		return -EINVAL;
200 
201 	return 0;
202 }
203 
204 /**
205  * ice_vsi_fltr_changed - check if filter state changed
206  * @vsi: VSI to be checked
207  *
208  * returns true if filter state has changed, false otherwise.
209  */
210 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
211 {
212 	return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
213 	       test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
214 	       test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
215 }
216 
217 /**
218  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
219  * @vsi: the VSI being configured
220  * @promisc_m: mask of promiscuous config bits
221  * @set_promisc: enable or disable promisc flag request
222  *
223  */
224 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
225 {
226 	struct ice_hw *hw = &vsi->back->hw;
227 	enum ice_status status = 0;
228 
229 	if (vsi->type != ICE_VSI_PF)
230 		return 0;
231 
232 	if (vsi->num_vlan > 1) {
233 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
234 						  set_promisc);
235 	} else {
236 		if (set_promisc)
237 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
238 						     0);
239 		else
240 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
241 						       0);
242 	}
243 
244 	if (status)
245 		return -EIO;
246 
247 	return 0;
248 }
249 
250 /**
251  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
252  * @vsi: ptr to the VSI
253  *
254  * Push any outstanding VSI filter changes through the AdminQ.
255  */
256 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
257 {
258 	struct device *dev = ice_pf_to_dev(vsi->back);
259 	struct net_device *netdev = vsi->netdev;
260 	bool promisc_forced_on = false;
261 	struct ice_pf *pf = vsi->back;
262 	struct ice_hw *hw = &pf->hw;
263 	enum ice_status status = 0;
264 	u32 changed_flags = 0;
265 	u8 promisc_m;
266 	int err = 0;
267 
268 	if (!vsi->netdev)
269 		return -EINVAL;
270 
271 	while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
272 		usleep_range(1000, 2000);
273 
274 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
275 	vsi->current_netdev_flags = vsi->netdev->flags;
276 
277 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
278 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
279 
280 	if (ice_vsi_fltr_changed(vsi)) {
281 		clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
282 		clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
283 		clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
284 
285 		/* grab the netdev's addr_list_lock */
286 		netif_addr_lock_bh(netdev);
287 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
288 			      ice_add_mac_to_unsync_list);
289 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
290 			      ice_add_mac_to_unsync_list);
291 		/* our temp lists are populated. release lock */
292 		netif_addr_unlock_bh(netdev);
293 	}
294 
295 	/* Remove MAC addresses in the unsync list */
296 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
297 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
298 	if (status) {
299 		netdev_err(netdev, "Failed to delete MAC filters\n");
300 		/* if we failed because of alloc failures, just bail */
301 		if (status == ICE_ERR_NO_MEMORY) {
302 			err = -ENOMEM;
303 			goto out;
304 		}
305 	}
306 
307 	/* Add MAC addresses in the sync list */
308 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
309 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
310 	/* If filter is added successfully or already exists, do not go into
311 	 * 'if' condition and report it as error. Instead continue processing
312 	 * rest of the function.
313 	 */
314 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
315 		netdev_err(netdev, "Failed to add MAC filters\n");
316 		/* If there is no more space for new umac filters, VSI
317 		 * should go into promiscuous mode. There should be some
318 		 * space reserved for promiscuous filters.
319 		 */
320 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
321 		    !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
322 				      vsi->state)) {
323 			promisc_forced_on = true;
324 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
325 				    vsi->vsi_num);
326 		} else {
327 			err = -EIO;
328 			goto out;
329 		}
330 	}
331 	/* check for changes in promiscuous modes */
332 	if (changed_flags & IFF_ALLMULTI) {
333 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
334 			if (vsi->num_vlan > 1)
335 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
336 			else
337 				promisc_m = ICE_MCAST_PROMISC_BITS;
338 
339 			err = ice_cfg_promisc(vsi, promisc_m, true);
340 			if (err) {
341 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
342 					   vsi->vsi_num);
343 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
344 				goto out_promisc;
345 			}
346 		} else {
347 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
348 			if (vsi->num_vlan > 1)
349 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
350 			else
351 				promisc_m = ICE_MCAST_PROMISC_BITS;
352 
353 			err = ice_cfg_promisc(vsi, promisc_m, false);
354 			if (err) {
355 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
356 					   vsi->vsi_num);
357 				vsi->current_netdev_flags |= IFF_ALLMULTI;
358 				goto out_promisc;
359 			}
360 		}
361 	}
362 
363 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
364 	    test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
365 		clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
366 		if (vsi->current_netdev_flags & IFF_PROMISC) {
367 			/* Apply Rx filter rule to get traffic from wire */
368 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
369 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
370 				if (err && err != -EEXIST) {
371 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
372 						   err, vsi->vsi_num);
373 					vsi->current_netdev_flags &=
374 						~IFF_PROMISC;
375 					goto out_promisc;
376 				}
377 				ice_cfg_vlan_pruning(vsi, false, false);
378 			}
379 		} else {
380 			/* Clear Rx filter to remove traffic from wire */
381 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
382 				err = ice_clear_dflt_vsi(pf->first_sw);
383 				if (err) {
384 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
385 						   err, vsi->vsi_num);
386 					vsi->current_netdev_flags |=
387 						IFF_PROMISC;
388 					goto out_promisc;
389 				}
390 				if (vsi->num_vlan > 1)
391 					ice_cfg_vlan_pruning(vsi, true, false);
392 			}
393 		}
394 	}
395 	goto exit;
396 
397 out_promisc:
398 	set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
399 	goto exit;
400 out:
401 	/* if something went wrong then set the changed flag so we try again */
402 	set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
403 	set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
404 exit:
405 	clear_bit(__ICE_CFG_BUSY, vsi->state);
406 	return err;
407 }
408 
409 /**
410  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
411  * @pf: board private structure
412  */
413 static void ice_sync_fltr_subtask(struct ice_pf *pf)
414 {
415 	int v;
416 
417 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
418 		return;
419 
420 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
421 
422 	ice_for_each_vsi(pf, v)
423 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
424 		    ice_vsi_sync_fltr(pf->vsi[v])) {
425 			/* come back and try again later */
426 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
427 			break;
428 		}
429 }
430 
431 /**
432  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
433  * @pf: the PF
434  * @locked: is the rtnl_lock already held
435  */
436 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
437 {
438 	int node;
439 	int v;
440 
441 	ice_for_each_vsi(pf, v)
442 		if (pf->vsi[v])
443 			ice_dis_vsi(pf->vsi[v], locked);
444 
445 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
446 		pf->pf_agg_node[node].num_vsis = 0;
447 
448 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
449 		pf->vf_agg_node[node].num_vsis = 0;
450 
451 }
452 
453 /**
454  * ice_prepare_for_reset - prep for the core to reset
455  * @pf: board private structure
456  *
457  * Inform or close all dependent features in prep for reset.
458  */
459 static void
460 ice_prepare_for_reset(struct ice_pf *pf)
461 {
462 	struct ice_hw *hw = &pf->hw;
463 	unsigned int i;
464 
465 	/* already prepared for reset */
466 	if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
467 		return;
468 
469 	/* Notify VFs of impending reset */
470 	if (ice_check_sq_alive(hw, &hw->mailboxq))
471 		ice_vc_notify_reset(pf);
472 
473 	/* Disable VFs until reset is completed */
474 	ice_for_each_vf(pf, i)
475 		ice_set_vf_state_qs_dis(&pf->vf[i]);
476 
477 	/* clear SW filtering DB */
478 	ice_clear_hw_tbls(hw);
479 	/* disable the VSIs and their queues that are not already DOWN */
480 	ice_pf_dis_all_vsi(pf, false);
481 
482 	if (hw->port_info)
483 		ice_sched_clear_port(hw->port_info);
484 
485 	ice_shutdown_all_ctrlq(hw);
486 
487 	set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
488 }
489 
490 /**
491  * ice_do_reset - Initiate one of many types of resets
492  * @pf: board private structure
493  * @reset_type: reset type requested
494  * before this function was called.
495  */
496 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
497 {
498 	struct device *dev = ice_pf_to_dev(pf);
499 	struct ice_hw *hw = &pf->hw;
500 
501 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
502 
503 	ice_prepare_for_reset(pf);
504 
505 	/* trigger the reset */
506 	if (ice_reset(hw, reset_type)) {
507 		dev_err(dev, "reset %d failed\n", reset_type);
508 		set_bit(__ICE_RESET_FAILED, pf->state);
509 		clear_bit(__ICE_RESET_OICR_RECV, pf->state);
510 		clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
511 		clear_bit(__ICE_PFR_REQ, pf->state);
512 		clear_bit(__ICE_CORER_REQ, pf->state);
513 		clear_bit(__ICE_GLOBR_REQ, pf->state);
514 		return;
515 	}
516 
517 	/* PFR is a bit of a special case because it doesn't result in an OICR
518 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
519 	 * associated state bits.
520 	 */
521 	if (reset_type == ICE_RESET_PFR) {
522 		pf->pfr_count++;
523 		ice_rebuild(pf, reset_type);
524 		clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
525 		clear_bit(__ICE_PFR_REQ, pf->state);
526 		ice_reset_all_vfs(pf, true);
527 	}
528 }
529 
530 /**
531  * ice_reset_subtask - Set up for resetting the device and driver
532  * @pf: board private structure
533  */
534 static void ice_reset_subtask(struct ice_pf *pf)
535 {
536 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
537 
538 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
539 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
540 	 * of reset is pending and sets bits in pf->state indicating the reset
541 	 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
542 	 * prepare for pending reset if not already (for PF software-initiated
543 	 * global resets the software should already be prepared for it as
544 	 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
545 	 * by firmware or software on other PFs, that bit is not set so prepare
546 	 * for the reset now), poll for reset done, rebuild and return.
547 	 */
548 	if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
549 		/* Perform the largest reset requested */
550 		if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
551 			reset_type = ICE_RESET_CORER;
552 		if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
553 			reset_type = ICE_RESET_GLOBR;
554 		if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
555 			reset_type = ICE_RESET_EMPR;
556 		/* return if no valid reset type requested */
557 		if (reset_type == ICE_RESET_INVAL)
558 			return;
559 		ice_prepare_for_reset(pf);
560 
561 		/* make sure we are ready to rebuild */
562 		if (ice_check_reset(&pf->hw)) {
563 			set_bit(__ICE_RESET_FAILED, pf->state);
564 		} else {
565 			/* done with reset. start rebuild */
566 			pf->hw.reset_ongoing = false;
567 			ice_rebuild(pf, reset_type);
568 			/* clear bit to resume normal operations, but
569 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
570 			 */
571 			clear_bit(__ICE_RESET_OICR_RECV, pf->state);
572 			clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
573 			clear_bit(__ICE_PFR_REQ, pf->state);
574 			clear_bit(__ICE_CORER_REQ, pf->state);
575 			clear_bit(__ICE_GLOBR_REQ, pf->state);
576 			ice_reset_all_vfs(pf, true);
577 		}
578 
579 		return;
580 	}
581 
582 	/* No pending resets to finish processing. Check for new resets */
583 	if (test_bit(__ICE_PFR_REQ, pf->state))
584 		reset_type = ICE_RESET_PFR;
585 	if (test_bit(__ICE_CORER_REQ, pf->state))
586 		reset_type = ICE_RESET_CORER;
587 	if (test_bit(__ICE_GLOBR_REQ, pf->state))
588 		reset_type = ICE_RESET_GLOBR;
589 	/* If no valid reset type requested just return */
590 	if (reset_type == ICE_RESET_INVAL)
591 		return;
592 
593 	/* reset if not already down or busy */
594 	if (!test_bit(__ICE_DOWN, pf->state) &&
595 	    !test_bit(__ICE_CFG_BUSY, pf->state)) {
596 		ice_do_reset(pf, reset_type);
597 	}
598 }
599 
600 /**
601  * ice_print_topo_conflict - print topology conflict message
602  * @vsi: the VSI whose topology status is being checked
603  */
604 static void ice_print_topo_conflict(struct ice_vsi *vsi)
605 {
606 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
607 	case ICE_AQ_LINK_TOPO_CONFLICT:
608 	case ICE_AQ_LINK_MEDIA_CONFLICT:
609 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
610 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
611 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
612 		netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
613 		break;
614 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
615 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
616 		break;
617 	default:
618 		break;
619 	}
620 }
621 
622 /**
623  * ice_print_link_msg - print link up or down message
624  * @vsi: the VSI whose link status is being queried
625  * @isup: boolean for if the link is now up or down
626  */
627 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
628 {
629 	struct ice_aqc_get_phy_caps_data *caps;
630 	const char *an_advertised;
631 	enum ice_status status;
632 	const char *fec_req;
633 	const char *speed;
634 	const char *fec;
635 	const char *fc;
636 	const char *an;
637 
638 	if (!vsi)
639 		return;
640 
641 	if (vsi->current_isup == isup)
642 		return;
643 
644 	vsi->current_isup = isup;
645 
646 	if (!isup) {
647 		netdev_info(vsi->netdev, "NIC Link is Down\n");
648 		return;
649 	}
650 
651 	switch (vsi->port_info->phy.link_info.link_speed) {
652 	case ICE_AQ_LINK_SPEED_100GB:
653 		speed = "100 G";
654 		break;
655 	case ICE_AQ_LINK_SPEED_50GB:
656 		speed = "50 G";
657 		break;
658 	case ICE_AQ_LINK_SPEED_40GB:
659 		speed = "40 G";
660 		break;
661 	case ICE_AQ_LINK_SPEED_25GB:
662 		speed = "25 G";
663 		break;
664 	case ICE_AQ_LINK_SPEED_20GB:
665 		speed = "20 G";
666 		break;
667 	case ICE_AQ_LINK_SPEED_10GB:
668 		speed = "10 G";
669 		break;
670 	case ICE_AQ_LINK_SPEED_5GB:
671 		speed = "5 G";
672 		break;
673 	case ICE_AQ_LINK_SPEED_2500MB:
674 		speed = "2.5 G";
675 		break;
676 	case ICE_AQ_LINK_SPEED_1000MB:
677 		speed = "1 G";
678 		break;
679 	case ICE_AQ_LINK_SPEED_100MB:
680 		speed = "100 M";
681 		break;
682 	default:
683 		speed = "Unknown ";
684 		break;
685 	}
686 
687 	switch (vsi->port_info->fc.current_mode) {
688 	case ICE_FC_FULL:
689 		fc = "Rx/Tx";
690 		break;
691 	case ICE_FC_TX_PAUSE:
692 		fc = "Tx";
693 		break;
694 	case ICE_FC_RX_PAUSE:
695 		fc = "Rx";
696 		break;
697 	case ICE_FC_NONE:
698 		fc = "None";
699 		break;
700 	default:
701 		fc = "Unknown";
702 		break;
703 	}
704 
705 	/* Get FEC mode based on negotiated link info */
706 	switch (vsi->port_info->phy.link_info.fec_info) {
707 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
708 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
709 		fec = "RS-FEC";
710 		break;
711 	case ICE_AQ_LINK_25G_KR_FEC_EN:
712 		fec = "FC-FEC/BASE-R";
713 		break;
714 	default:
715 		fec = "NONE";
716 		break;
717 	}
718 
719 	/* check if autoneg completed, might be false due to not supported */
720 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
721 		an = "True";
722 	else
723 		an = "False";
724 
725 	/* Get FEC mode requested based on PHY caps last SW configuration */
726 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
727 	if (!caps) {
728 		fec_req = "Unknown";
729 		an_advertised = "Unknown";
730 		goto done;
731 	}
732 
733 	status = ice_aq_get_phy_caps(vsi->port_info, false,
734 				     ICE_AQC_REPORT_SW_CFG, caps, NULL);
735 	if (status)
736 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
737 
738 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
739 
740 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
741 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
742 		fec_req = "RS-FEC";
743 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
744 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
745 		fec_req = "FC-FEC/BASE-R";
746 	else
747 		fec_req = "NONE";
748 
749 	kfree(caps);
750 
751 done:
752 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
753 		    speed, fec_req, fec, an_advertised, an, fc);
754 	ice_print_topo_conflict(vsi);
755 }
756 
757 /**
758  * ice_vsi_link_event - update the VSI's netdev
759  * @vsi: the VSI on which the link event occurred
760  * @link_up: whether or not the VSI needs to be set up or down
761  */
762 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
763 {
764 	if (!vsi)
765 		return;
766 
767 	if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
768 		return;
769 
770 	if (vsi->type == ICE_VSI_PF) {
771 		if (link_up == netif_carrier_ok(vsi->netdev))
772 			return;
773 
774 		if (link_up) {
775 			netif_carrier_on(vsi->netdev);
776 			netif_tx_wake_all_queues(vsi->netdev);
777 		} else {
778 			netif_carrier_off(vsi->netdev);
779 			netif_tx_stop_all_queues(vsi->netdev);
780 		}
781 	}
782 }
783 
784 /**
785  * ice_set_dflt_mib - send a default config MIB to the FW
786  * @pf: private PF struct
787  *
788  * This function sends a default configuration MIB to the FW.
789  *
790  * If this function errors out at any point, the driver is still able to
791  * function.  The main impact is that LFC may not operate as expected.
792  * Therefore an error state in this function should be treated with a DBG
793  * message and continue on with driver rebuild/reenable.
794  */
795 static void ice_set_dflt_mib(struct ice_pf *pf)
796 {
797 	struct device *dev = ice_pf_to_dev(pf);
798 	u8 mib_type, *buf, *lldpmib = NULL;
799 	u16 len, typelen, offset = 0;
800 	struct ice_lldp_org_tlv *tlv;
801 	struct ice_hw *hw = &pf->hw;
802 	u32 ouisubtype;
803 
804 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
805 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
806 	if (!lldpmib) {
807 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
808 			__func__);
809 		return;
810 	}
811 
812 	/* Add ETS CFG TLV */
813 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
814 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
815 		   ICE_IEEE_ETS_TLV_LEN);
816 	tlv->typelen = htons(typelen);
817 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
818 		      ICE_IEEE_SUBTYPE_ETS_CFG);
819 	tlv->ouisubtype = htonl(ouisubtype);
820 
821 	buf = tlv->tlvinfo;
822 	buf[0] = 0;
823 
824 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
825 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
826 	 * Octets 13 - 20 are TSA values - leave as zeros
827 	 */
828 	buf[5] = 0x64;
829 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
830 	offset += len + 2;
831 	tlv = (struct ice_lldp_org_tlv *)
832 		((char *)tlv + sizeof(tlv->typelen) + len);
833 
834 	/* Add ETS REC TLV */
835 	buf = tlv->tlvinfo;
836 	tlv->typelen = htons(typelen);
837 
838 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
839 		      ICE_IEEE_SUBTYPE_ETS_REC);
840 	tlv->ouisubtype = htonl(ouisubtype);
841 
842 	/* First octet of buf is reserved
843 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
844 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
845 	 * Octets 13 - 20 are TSA value - leave as zeros
846 	 */
847 	buf[5] = 0x64;
848 	offset += len + 2;
849 	tlv = (struct ice_lldp_org_tlv *)
850 		((char *)tlv + sizeof(tlv->typelen) + len);
851 
852 	/* Add PFC CFG TLV */
853 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
854 		   ICE_IEEE_PFC_TLV_LEN);
855 	tlv->typelen = htons(typelen);
856 
857 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
858 		      ICE_IEEE_SUBTYPE_PFC_CFG);
859 	tlv->ouisubtype = htonl(ouisubtype);
860 
861 	/* Octet 1 left as all zeros - PFC disabled */
862 	buf[0] = 0x08;
863 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
864 	offset += len + 2;
865 
866 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
867 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
868 
869 	kfree(lldpmib);
870 }
871 
872 /**
873  * ice_link_event - process the link event
874  * @pf: PF that the link event is associated with
875  * @pi: port_info for the port that the link event is associated with
876  * @link_up: true if the physical link is up and false if it is down
877  * @link_speed: current link speed received from the link event
878  *
879  * Returns 0 on success and negative on failure
880  */
881 static int
882 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
883 	       u16 link_speed)
884 {
885 	struct device *dev = ice_pf_to_dev(pf);
886 	struct ice_phy_info *phy_info;
887 	struct ice_vsi *vsi;
888 	u16 old_link_speed;
889 	bool old_link;
890 	int result;
891 
892 	phy_info = &pi->phy;
893 	phy_info->link_info_old = phy_info->link_info;
894 
895 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
896 	old_link_speed = phy_info->link_info_old.link_speed;
897 
898 	/* update the link info structures and re-enable link events,
899 	 * don't bail on failure due to other book keeping needed
900 	 */
901 	result = ice_update_link_info(pi);
902 	if (result)
903 		dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
904 			pi->lport);
905 
906 	/* Check if the link state is up after updating link info, and treat
907 	 * this event as an UP event since the link is actually UP now.
908 	 */
909 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
910 		link_up = true;
911 
912 	vsi = ice_get_main_vsi(pf);
913 	if (!vsi || !vsi->port_info)
914 		return -EINVAL;
915 
916 	/* turn off PHY if media was removed */
917 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
918 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
919 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
920 
921 		result = ice_aq_set_link_restart_an(pi, false, NULL);
922 		if (result) {
923 			dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
924 				vsi->vsi_num, result);
925 			return result;
926 		}
927 	}
928 
929 	/* if the old link up/down and speed is the same as the new */
930 	if (link_up == old_link && link_speed == old_link_speed)
931 		return result;
932 
933 	if (ice_is_dcb_active(pf)) {
934 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
935 			ice_dcb_rebuild(pf);
936 	} else {
937 		if (link_up)
938 			ice_set_dflt_mib(pf);
939 	}
940 	ice_vsi_link_event(vsi, link_up);
941 	ice_print_link_msg(vsi, link_up);
942 
943 	ice_vc_notify_link_state(pf);
944 
945 	return result;
946 }
947 
948 /**
949  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
950  * @pf: board private structure
951  */
952 static void ice_watchdog_subtask(struct ice_pf *pf)
953 {
954 	int i;
955 
956 	/* if interface is down do nothing */
957 	if (test_bit(__ICE_DOWN, pf->state) ||
958 	    test_bit(__ICE_CFG_BUSY, pf->state))
959 		return;
960 
961 	/* make sure we don't do these things too often */
962 	if (time_before(jiffies,
963 			pf->serv_tmr_prev + pf->serv_tmr_period))
964 		return;
965 
966 	pf->serv_tmr_prev = jiffies;
967 
968 	/* Update the stats for active netdevs so the network stack
969 	 * can look at updated numbers whenever it cares to
970 	 */
971 	ice_update_pf_stats(pf);
972 	ice_for_each_vsi(pf, i)
973 		if (pf->vsi[i] && pf->vsi[i]->netdev)
974 			ice_update_vsi_stats(pf->vsi[i]);
975 }
976 
977 /**
978  * ice_init_link_events - enable/initialize link events
979  * @pi: pointer to the port_info instance
980  *
981  * Returns -EIO on failure, 0 on success
982  */
983 static int ice_init_link_events(struct ice_port_info *pi)
984 {
985 	u16 mask;
986 
987 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
988 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
989 
990 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
991 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
992 			pi->lport);
993 		return -EIO;
994 	}
995 
996 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
997 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
998 			pi->lport);
999 		return -EIO;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 /**
1006  * ice_handle_link_event - handle link event via ARQ
1007  * @pf: PF that the link event is associated with
1008  * @event: event structure containing link status info
1009  */
1010 static int
1011 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1012 {
1013 	struct ice_aqc_get_link_status_data *link_data;
1014 	struct ice_port_info *port_info;
1015 	int status;
1016 
1017 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1018 	port_info = pf->hw.port_info;
1019 	if (!port_info)
1020 		return -EINVAL;
1021 
1022 	status = ice_link_event(pf, port_info,
1023 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1024 				le16_to_cpu(link_data->link_speed));
1025 	if (status)
1026 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1027 			status);
1028 
1029 	return status;
1030 }
1031 
1032 enum ice_aq_task_state {
1033 	ICE_AQ_TASK_WAITING = 0,
1034 	ICE_AQ_TASK_COMPLETE,
1035 	ICE_AQ_TASK_CANCELED,
1036 };
1037 
1038 struct ice_aq_task {
1039 	struct hlist_node entry;
1040 
1041 	u16 opcode;
1042 	struct ice_rq_event_info *event;
1043 	enum ice_aq_task_state state;
1044 };
1045 
1046 /**
1047  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1048  * @pf: pointer to the PF private structure
1049  * @opcode: the opcode to wait for
1050  * @timeout: how long to wait, in jiffies
1051  * @event: storage for the event info
1052  *
1053  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1054  * current thread will be put to sleep until the specified event occurs or
1055  * until the given timeout is reached.
1056  *
1057  * To obtain only the descriptor contents, pass an event without an allocated
1058  * msg_buf. If the complete data buffer is desired, allocate the
1059  * event->msg_buf with enough space ahead of time.
1060  *
1061  * Returns: zero on success, or a negative error code on failure.
1062  */
1063 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1064 			  struct ice_rq_event_info *event)
1065 {
1066 	struct device *dev = ice_pf_to_dev(pf);
1067 	struct ice_aq_task *task;
1068 	unsigned long start;
1069 	long ret;
1070 	int err;
1071 
1072 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1073 	if (!task)
1074 		return -ENOMEM;
1075 
1076 	INIT_HLIST_NODE(&task->entry);
1077 	task->opcode = opcode;
1078 	task->event = event;
1079 	task->state = ICE_AQ_TASK_WAITING;
1080 
1081 	spin_lock_bh(&pf->aq_wait_lock);
1082 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1083 	spin_unlock_bh(&pf->aq_wait_lock);
1084 
1085 	start = jiffies;
1086 
1087 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1088 					       timeout);
1089 	switch (task->state) {
1090 	case ICE_AQ_TASK_WAITING:
1091 		err = ret < 0 ? ret : -ETIMEDOUT;
1092 		break;
1093 	case ICE_AQ_TASK_CANCELED:
1094 		err = ret < 0 ? ret : -ECANCELED;
1095 		break;
1096 	case ICE_AQ_TASK_COMPLETE:
1097 		err = ret < 0 ? ret : 0;
1098 		break;
1099 	default:
1100 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1101 		err = -EINVAL;
1102 		break;
1103 	}
1104 
1105 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1106 		jiffies_to_msecs(jiffies - start),
1107 		jiffies_to_msecs(timeout),
1108 		opcode);
1109 
1110 	spin_lock_bh(&pf->aq_wait_lock);
1111 	hlist_del(&task->entry);
1112 	spin_unlock_bh(&pf->aq_wait_lock);
1113 	kfree(task);
1114 
1115 	return err;
1116 }
1117 
1118 /**
1119  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1120  * @pf: pointer to the PF private structure
1121  * @opcode: the opcode of the event
1122  * @event: the event to check
1123  *
1124  * Loops over the current list of pending threads waiting for an AdminQ event.
1125  * For each matching task, copy the contents of the event into the task
1126  * structure and wake up the thread.
1127  *
1128  * If multiple threads wait for the same opcode, they will all be woken up.
1129  *
1130  * Note that event->msg_buf will only be duplicated if the event has a buffer
1131  * with enough space already allocated. Otherwise, only the descriptor and
1132  * message length will be copied.
1133  *
1134  * Returns: true if an event was found, false otherwise
1135  */
1136 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1137 				struct ice_rq_event_info *event)
1138 {
1139 	struct ice_aq_task *task;
1140 	bool found = false;
1141 
1142 	spin_lock_bh(&pf->aq_wait_lock);
1143 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1144 		if (task->state || task->opcode != opcode)
1145 			continue;
1146 
1147 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1148 		task->event->msg_len = event->msg_len;
1149 
1150 		/* Only copy the data buffer if a destination was set */
1151 		if (task->event->msg_buf &&
1152 		    task->event->buf_len > event->buf_len) {
1153 			memcpy(task->event->msg_buf, event->msg_buf,
1154 			       event->buf_len);
1155 			task->event->buf_len = event->buf_len;
1156 		}
1157 
1158 		task->state = ICE_AQ_TASK_COMPLETE;
1159 		found = true;
1160 	}
1161 	spin_unlock_bh(&pf->aq_wait_lock);
1162 
1163 	if (found)
1164 		wake_up(&pf->aq_wait_queue);
1165 }
1166 
1167 /**
1168  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1169  * @pf: the PF private structure
1170  *
1171  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1172  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1173  */
1174 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1175 {
1176 	struct ice_aq_task *task;
1177 
1178 	spin_lock_bh(&pf->aq_wait_lock);
1179 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1180 		task->state = ICE_AQ_TASK_CANCELED;
1181 	spin_unlock_bh(&pf->aq_wait_lock);
1182 
1183 	wake_up(&pf->aq_wait_queue);
1184 }
1185 
1186 /**
1187  * __ice_clean_ctrlq - helper function to clean controlq rings
1188  * @pf: ptr to struct ice_pf
1189  * @q_type: specific Control queue type
1190  */
1191 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1192 {
1193 	struct device *dev = ice_pf_to_dev(pf);
1194 	struct ice_rq_event_info event;
1195 	struct ice_hw *hw = &pf->hw;
1196 	struct ice_ctl_q_info *cq;
1197 	u16 pending, i = 0;
1198 	const char *qtype;
1199 	u32 oldval, val;
1200 
1201 	/* Do not clean control queue if/when PF reset fails */
1202 	if (test_bit(__ICE_RESET_FAILED, pf->state))
1203 		return 0;
1204 
1205 	switch (q_type) {
1206 	case ICE_CTL_Q_ADMIN:
1207 		cq = &hw->adminq;
1208 		qtype = "Admin";
1209 		break;
1210 	case ICE_CTL_Q_MAILBOX:
1211 		cq = &hw->mailboxq;
1212 		qtype = "Mailbox";
1213 		break;
1214 	default:
1215 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1216 		return 0;
1217 	}
1218 
1219 	/* check for error indications - PF_xx_AxQLEN register layout for
1220 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1221 	 */
1222 	val = rd32(hw, cq->rq.len);
1223 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1224 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1225 		oldval = val;
1226 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1227 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1228 				qtype);
1229 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1230 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1231 				qtype);
1232 		}
1233 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1234 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1235 				qtype);
1236 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1237 			 PF_FW_ARQLEN_ARQCRIT_M);
1238 		if (oldval != val)
1239 			wr32(hw, cq->rq.len, val);
1240 	}
1241 
1242 	val = rd32(hw, cq->sq.len);
1243 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1244 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1245 		oldval = val;
1246 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1247 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1248 				qtype);
1249 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1250 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1251 				qtype);
1252 		}
1253 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1254 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1255 				qtype);
1256 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1257 			 PF_FW_ATQLEN_ATQCRIT_M);
1258 		if (oldval != val)
1259 			wr32(hw, cq->sq.len, val);
1260 	}
1261 
1262 	event.buf_len = cq->rq_buf_size;
1263 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1264 	if (!event.msg_buf)
1265 		return 0;
1266 
1267 	do {
1268 		enum ice_status ret;
1269 		u16 opcode;
1270 
1271 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1272 		if (ret == ICE_ERR_AQ_NO_WORK)
1273 			break;
1274 		if (ret) {
1275 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1276 				ice_stat_str(ret));
1277 			break;
1278 		}
1279 
1280 		opcode = le16_to_cpu(event.desc.opcode);
1281 
1282 		/* Notify any thread that might be waiting for this event */
1283 		ice_aq_check_events(pf, opcode, &event);
1284 
1285 		switch (opcode) {
1286 		case ice_aqc_opc_get_link_status:
1287 			if (ice_handle_link_event(pf, &event))
1288 				dev_err(dev, "Could not handle link event\n");
1289 			break;
1290 		case ice_aqc_opc_event_lan_overflow:
1291 			ice_vf_lan_overflow_event(pf, &event);
1292 			break;
1293 		case ice_mbx_opc_send_msg_to_pf:
1294 			ice_vc_process_vf_msg(pf, &event);
1295 			break;
1296 		case ice_aqc_opc_fw_logging:
1297 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1298 			break;
1299 		case ice_aqc_opc_lldp_set_mib_change:
1300 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1301 			break;
1302 		default:
1303 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1304 				qtype, opcode);
1305 			break;
1306 		}
1307 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1308 
1309 	kfree(event.msg_buf);
1310 
1311 	return pending && (i == ICE_DFLT_IRQ_WORK);
1312 }
1313 
1314 /**
1315  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1316  * @hw: pointer to hardware info
1317  * @cq: control queue information
1318  *
1319  * returns true if there are pending messages in a queue, false if there aren't
1320  */
1321 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1322 {
1323 	u16 ntu;
1324 
1325 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1326 	return cq->rq.next_to_clean != ntu;
1327 }
1328 
1329 /**
1330  * ice_clean_adminq_subtask - clean the AdminQ rings
1331  * @pf: board private structure
1332  */
1333 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1334 {
1335 	struct ice_hw *hw = &pf->hw;
1336 
1337 	if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1338 		return;
1339 
1340 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1341 		return;
1342 
1343 	clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1344 
1345 	/* There might be a situation where new messages arrive to a control
1346 	 * queue between processing the last message and clearing the
1347 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1348 	 * ice_ctrlq_pending) and process new messages if any.
1349 	 */
1350 	if (ice_ctrlq_pending(hw, &hw->adminq))
1351 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1352 
1353 	ice_flush(hw);
1354 }
1355 
1356 /**
1357  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1358  * @pf: board private structure
1359  */
1360 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1361 {
1362 	struct ice_hw *hw = &pf->hw;
1363 
1364 	if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1365 		return;
1366 
1367 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1368 		return;
1369 
1370 	clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1371 
1372 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1373 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1374 
1375 	ice_flush(hw);
1376 }
1377 
1378 /**
1379  * ice_service_task_schedule - schedule the service task to wake up
1380  * @pf: board private structure
1381  *
1382  * If not already scheduled, this puts the task into the work queue.
1383  */
1384 void ice_service_task_schedule(struct ice_pf *pf)
1385 {
1386 	if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1387 	    !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1388 	    !test_bit(__ICE_NEEDS_RESTART, pf->state))
1389 		queue_work(ice_wq, &pf->serv_task);
1390 }
1391 
1392 /**
1393  * ice_service_task_complete - finish up the service task
1394  * @pf: board private structure
1395  */
1396 static void ice_service_task_complete(struct ice_pf *pf)
1397 {
1398 	WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1399 
1400 	/* force memory (pf->state) to sync before next service task */
1401 	smp_mb__before_atomic();
1402 	clear_bit(__ICE_SERVICE_SCHED, pf->state);
1403 }
1404 
1405 /**
1406  * ice_service_task_stop - stop service task and cancel works
1407  * @pf: board private structure
1408  *
1409  * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
1410  * 1 otherwise.
1411  */
1412 static int ice_service_task_stop(struct ice_pf *pf)
1413 {
1414 	int ret;
1415 
1416 	ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
1417 
1418 	if (pf->serv_tmr.function)
1419 		del_timer_sync(&pf->serv_tmr);
1420 	if (pf->serv_task.func)
1421 		cancel_work_sync(&pf->serv_task);
1422 
1423 	clear_bit(__ICE_SERVICE_SCHED, pf->state);
1424 	return ret;
1425 }
1426 
1427 /**
1428  * ice_service_task_restart - restart service task and schedule works
1429  * @pf: board private structure
1430  *
1431  * This function is needed for suspend and resume works (e.g WoL scenario)
1432  */
1433 static void ice_service_task_restart(struct ice_pf *pf)
1434 {
1435 	clear_bit(__ICE_SERVICE_DIS, pf->state);
1436 	ice_service_task_schedule(pf);
1437 }
1438 
1439 /**
1440  * ice_service_timer - timer callback to schedule service task
1441  * @t: pointer to timer_list
1442  */
1443 static void ice_service_timer(struct timer_list *t)
1444 {
1445 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1446 
1447 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1448 	ice_service_task_schedule(pf);
1449 }
1450 
1451 /**
1452  * ice_handle_mdd_event - handle malicious driver detect event
1453  * @pf: pointer to the PF structure
1454  *
1455  * Called from service task. OICR interrupt handler indicates MDD event.
1456  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1457  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1458  * disable the queue, the PF can be configured to reset the VF using ethtool
1459  * private flag mdd-auto-reset-vf.
1460  */
1461 static void ice_handle_mdd_event(struct ice_pf *pf)
1462 {
1463 	struct device *dev = ice_pf_to_dev(pf);
1464 	struct ice_hw *hw = &pf->hw;
1465 	unsigned int i;
1466 	u32 reg;
1467 
1468 	if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
1469 		/* Since the VF MDD event logging is rate limited, check if
1470 		 * there are pending MDD events.
1471 		 */
1472 		ice_print_vfs_mdd_events(pf);
1473 		return;
1474 	}
1475 
1476 	/* find what triggered an MDD event */
1477 	reg = rd32(hw, GL_MDET_TX_PQM);
1478 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1479 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1480 				GL_MDET_TX_PQM_PF_NUM_S;
1481 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1482 				GL_MDET_TX_PQM_VF_NUM_S;
1483 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1484 				GL_MDET_TX_PQM_MAL_TYPE_S;
1485 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1486 				GL_MDET_TX_PQM_QNUM_S);
1487 
1488 		if (netif_msg_tx_err(pf))
1489 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1490 				 event, queue, pf_num, vf_num);
1491 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1492 	}
1493 
1494 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1495 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1496 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1497 				GL_MDET_TX_TCLAN_PF_NUM_S;
1498 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1499 				GL_MDET_TX_TCLAN_VF_NUM_S;
1500 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1501 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1502 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1503 				GL_MDET_TX_TCLAN_QNUM_S);
1504 
1505 		if (netif_msg_tx_err(pf))
1506 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1507 				 event, queue, pf_num, vf_num);
1508 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1509 	}
1510 
1511 	reg = rd32(hw, GL_MDET_RX);
1512 	if (reg & GL_MDET_RX_VALID_M) {
1513 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1514 				GL_MDET_RX_PF_NUM_S;
1515 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1516 				GL_MDET_RX_VF_NUM_S;
1517 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1518 				GL_MDET_RX_MAL_TYPE_S;
1519 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1520 				GL_MDET_RX_QNUM_S);
1521 
1522 		if (netif_msg_rx_err(pf))
1523 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1524 				 event, queue, pf_num, vf_num);
1525 		wr32(hw, GL_MDET_RX, 0xffffffff);
1526 	}
1527 
1528 	/* check to see if this PF caused an MDD event */
1529 	reg = rd32(hw, PF_MDET_TX_PQM);
1530 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1531 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1532 		if (netif_msg_tx_err(pf))
1533 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1534 	}
1535 
1536 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1537 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1538 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1539 		if (netif_msg_tx_err(pf))
1540 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1541 	}
1542 
1543 	reg = rd32(hw, PF_MDET_RX);
1544 	if (reg & PF_MDET_RX_VALID_M) {
1545 		wr32(hw, PF_MDET_RX, 0xFFFF);
1546 		if (netif_msg_rx_err(pf))
1547 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1548 	}
1549 
1550 	/* Check to see if one of the VFs caused an MDD event, and then
1551 	 * increment counters and set print pending
1552 	 */
1553 	ice_for_each_vf(pf, i) {
1554 		struct ice_vf *vf = &pf->vf[i];
1555 
1556 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1557 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1558 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1559 			vf->mdd_tx_events.count++;
1560 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1561 			if (netif_msg_tx_err(pf))
1562 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1563 					 i);
1564 		}
1565 
1566 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1567 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1568 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1569 			vf->mdd_tx_events.count++;
1570 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1571 			if (netif_msg_tx_err(pf))
1572 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1573 					 i);
1574 		}
1575 
1576 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1577 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1578 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1579 			vf->mdd_tx_events.count++;
1580 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1581 			if (netif_msg_tx_err(pf))
1582 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1583 					 i);
1584 		}
1585 
1586 		reg = rd32(hw, VP_MDET_RX(i));
1587 		if (reg & VP_MDET_RX_VALID_M) {
1588 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1589 			vf->mdd_rx_events.count++;
1590 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1591 			if (netif_msg_rx_err(pf))
1592 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1593 					 i);
1594 
1595 			/* Since the queue is disabled on VF Rx MDD events, the
1596 			 * PF can be configured to reset the VF through ethtool
1597 			 * private flag mdd-auto-reset-vf.
1598 			 */
1599 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1600 				/* VF MDD event counters will be cleared by
1601 				 * reset, so print the event prior to reset.
1602 				 */
1603 				ice_print_vf_rx_mdd_event(vf);
1604 				ice_reset_vf(&pf->vf[i], false);
1605 			}
1606 		}
1607 	}
1608 
1609 	ice_print_vfs_mdd_events(pf);
1610 }
1611 
1612 /**
1613  * ice_force_phys_link_state - Force the physical link state
1614  * @vsi: VSI to force the physical link state to up/down
1615  * @link_up: true/false indicates to set the physical link to up/down
1616  *
1617  * Force the physical link state by getting the current PHY capabilities from
1618  * hardware and setting the PHY config based on the determined capabilities. If
1619  * link changes a link event will be triggered because both the Enable Automatic
1620  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1621  *
1622  * Returns 0 on success, negative on failure
1623  */
1624 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1625 {
1626 	struct ice_aqc_get_phy_caps_data *pcaps;
1627 	struct ice_aqc_set_phy_cfg_data *cfg;
1628 	struct ice_port_info *pi;
1629 	struct device *dev;
1630 	int retcode;
1631 
1632 	if (!vsi || !vsi->port_info || !vsi->back)
1633 		return -EINVAL;
1634 	if (vsi->type != ICE_VSI_PF)
1635 		return 0;
1636 
1637 	dev = ice_pf_to_dev(vsi->back);
1638 
1639 	pi = vsi->port_info;
1640 
1641 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1642 	if (!pcaps)
1643 		return -ENOMEM;
1644 
1645 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1646 				      NULL);
1647 	if (retcode) {
1648 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1649 			vsi->vsi_num, retcode);
1650 		retcode = -EIO;
1651 		goto out;
1652 	}
1653 
1654 	/* No change in link */
1655 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1656 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1657 		goto out;
1658 
1659 	/* Use the current user PHY configuration. The current user PHY
1660 	 * configuration is initialized during probe from PHY capabilities
1661 	 * software mode, and updated on set PHY configuration.
1662 	 */
1663 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1664 	if (!cfg) {
1665 		retcode = -ENOMEM;
1666 		goto out;
1667 	}
1668 
1669 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1670 	if (link_up)
1671 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1672 	else
1673 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1674 
1675 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1676 	if (retcode) {
1677 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1678 			vsi->vsi_num, retcode);
1679 		retcode = -EIO;
1680 	}
1681 
1682 	kfree(cfg);
1683 out:
1684 	kfree(pcaps);
1685 	return retcode;
1686 }
1687 
1688 /**
1689  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1690  * @pi: port info structure
1691  *
1692  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1693  */
1694 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1695 {
1696 	struct ice_aqc_get_phy_caps_data *pcaps;
1697 	struct ice_pf *pf = pi->hw->back;
1698 	enum ice_status status;
1699 	int err = 0;
1700 
1701 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1702 	if (!pcaps)
1703 		return -ENOMEM;
1704 
1705 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
1706 				     NULL);
1707 
1708 	if (status) {
1709 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1710 		err = -EIO;
1711 		goto out;
1712 	}
1713 
1714 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1715 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1716 
1717 out:
1718 	kfree(pcaps);
1719 	return err;
1720 }
1721 
1722 /**
1723  * ice_init_link_dflt_override - Initialize link default override
1724  * @pi: port info structure
1725  *
1726  * Initialize link default override and PHY total port shutdown during probe
1727  */
1728 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1729 {
1730 	struct ice_link_default_override_tlv *ldo;
1731 	struct ice_pf *pf = pi->hw->back;
1732 
1733 	ldo = &pf->link_dflt_override;
1734 	if (ice_get_link_default_override(ldo, pi))
1735 		return;
1736 
1737 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1738 		return;
1739 
1740 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1741 	 * ethtool private flag) for ports with Port Disable bit set.
1742 	 */
1743 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1744 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1745 }
1746 
1747 /**
1748  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1749  * @pi: port info structure
1750  *
1751  * If default override is enabled, initialized the user PHY cfg speed and FEC
1752  * settings using the default override mask from the NVM.
1753  *
1754  * The PHY should only be configured with the default override settings the
1755  * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1756  * is used to indicate that the user PHY cfg default override is initialized
1757  * and the PHY has not been configured with the default override settings. The
1758  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1759  * configured.
1760  */
1761 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1762 {
1763 	struct ice_link_default_override_tlv *ldo;
1764 	struct ice_aqc_set_phy_cfg_data *cfg;
1765 	struct ice_phy_info *phy = &pi->phy;
1766 	struct ice_pf *pf = pi->hw->back;
1767 
1768 	ldo = &pf->link_dflt_override;
1769 
1770 	/* If link default override is enabled, use to mask NVM PHY capabilities
1771 	 * for speed and FEC default configuration.
1772 	 */
1773 	cfg = &phy->curr_user_phy_cfg;
1774 
1775 	if (ldo->phy_type_low || ldo->phy_type_high) {
1776 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1777 				    cpu_to_le64(ldo->phy_type_low);
1778 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1779 				     cpu_to_le64(ldo->phy_type_high);
1780 	}
1781 	cfg->link_fec_opt = ldo->fec_options;
1782 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1783 
1784 	set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1785 }
1786 
1787 /**
1788  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1789  * @pi: port info structure
1790  *
1791  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1792  * mode to default. The PHY defaults are from get PHY capabilities topology
1793  * with media so call when media is first available. An error is returned if
1794  * called when media is not available. The PHY initialization completed state is
1795  * set here.
1796  *
1797  * These configurations are used when setting PHY
1798  * configuration. The user PHY configuration is updated on set PHY
1799  * configuration. Returns 0 on success, negative on failure
1800  */
1801 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1802 {
1803 	struct ice_aqc_get_phy_caps_data *pcaps;
1804 	struct ice_phy_info *phy = &pi->phy;
1805 	struct ice_pf *pf = pi->hw->back;
1806 	enum ice_status status;
1807 	struct ice_vsi *vsi;
1808 	int err = 0;
1809 
1810 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1811 		return -EIO;
1812 
1813 	vsi = ice_get_main_vsi(pf);
1814 	if (!vsi)
1815 		return -EINVAL;
1816 
1817 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1818 	if (!pcaps)
1819 		return -ENOMEM;
1820 
1821 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1822 				     NULL);
1823 	if (status) {
1824 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1825 		err = -EIO;
1826 		goto err_out;
1827 	}
1828 
1829 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1830 
1831 	/* check if lenient mode is supported and enabled */
1832 	if (ice_fw_supports_link_override(&vsi->back->hw) &&
1833 	    !(pcaps->module_compliance_enforcement &
1834 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1835 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1836 
1837 		/* if link default override is enabled, initialize user PHY
1838 		 * configuration with link default override values
1839 		 */
1840 		if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
1841 			ice_init_phy_cfg_dflt_override(pi);
1842 			goto out;
1843 		}
1844 	}
1845 
1846 	/* if link default override is not enabled, initialize PHY using
1847 	 * topology with media
1848 	 */
1849 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1850 						      pcaps->link_fec_options);
1851 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1852 
1853 out:
1854 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1855 	set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
1856 err_out:
1857 	kfree(pcaps);
1858 	return err;
1859 }
1860 
1861 /**
1862  * ice_configure_phy - configure PHY
1863  * @vsi: VSI of PHY
1864  *
1865  * Set the PHY configuration. If the current PHY configuration is the same as
1866  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1867  * configure the based get PHY capabilities for topology with media.
1868  */
1869 static int ice_configure_phy(struct ice_vsi *vsi)
1870 {
1871 	struct device *dev = ice_pf_to_dev(vsi->back);
1872 	struct ice_aqc_get_phy_caps_data *pcaps;
1873 	struct ice_aqc_set_phy_cfg_data *cfg;
1874 	struct ice_port_info *pi;
1875 	enum ice_status status;
1876 	int err = 0;
1877 
1878 	pi = vsi->port_info;
1879 	if (!pi)
1880 		return -EINVAL;
1881 
1882 	/* Ensure we have media as we cannot configure a medialess port */
1883 	if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1884 		return -EPERM;
1885 
1886 	ice_print_topo_conflict(vsi);
1887 
1888 	if (vsi->port_info->phy.link_info.topo_media_conflict ==
1889 	    ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1890 		return -EPERM;
1891 
1892 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
1893 		return ice_force_phys_link_state(vsi, true);
1894 
1895 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1896 	if (!pcaps)
1897 		return -ENOMEM;
1898 
1899 	/* Get current PHY config */
1900 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1901 				     NULL);
1902 	if (status) {
1903 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1904 			vsi->vsi_num, ice_stat_str(status));
1905 		err = -EIO;
1906 		goto done;
1907 	}
1908 
1909 	/* If PHY enable link is configured and configuration has not changed,
1910 	 * there's nothing to do
1911 	 */
1912 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1913 	    ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
1914 		goto done;
1915 
1916 	/* Use PHY topology as baseline for configuration */
1917 	memset(pcaps, 0, sizeof(*pcaps));
1918 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1919 				     NULL);
1920 	if (status) {
1921 		dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
1922 			vsi->vsi_num, ice_stat_str(status));
1923 		err = -EIO;
1924 		goto done;
1925 	}
1926 
1927 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1928 	if (!cfg) {
1929 		err = -ENOMEM;
1930 		goto done;
1931 	}
1932 
1933 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1934 
1935 	/* Speed - If default override pending, use curr_user_phy_cfg set in
1936 	 * ice_init_phy_user_cfg_ldo.
1937 	 */
1938 	if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1939 			       vsi->back->state)) {
1940 		cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
1941 		cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
1942 	} else {
1943 		u64 phy_low = 0, phy_high = 0;
1944 
1945 		ice_update_phy_type(&phy_low, &phy_high,
1946 				    pi->phy.curr_user_speed_req);
1947 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1948 		cfg->phy_type_high = pcaps->phy_type_high &
1949 				     cpu_to_le64(phy_high);
1950 	}
1951 
1952 	/* Can't provide what was requested; use PHY capabilities */
1953 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
1954 		cfg->phy_type_low = pcaps->phy_type_low;
1955 		cfg->phy_type_high = pcaps->phy_type_high;
1956 	}
1957 
1958 	/* FEC */
1959 	ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
1960 
1961 	/* Can't provide what was requested; use PHY capabilities */
1962 	if (cfg->link_fec_opt !=
1963 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
1964 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1965 		cfg->link_fec_opt = pcaps->link_fec_options;
1966 	}
1967 
1968 	/* Flow Control - always supported; no need to check against
1969 	 * capabilities
1970 	 */
1971 	ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
1972 
1973 	/* Enable link and link update */
1974 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
1975 
1976 	status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1977 	if (status) {
1978 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
1979 			vsi->vsi_num, ice_stat_str(status));
1980 		err = -EIO;
1981 	}
1982 
1983 	kfree(cfg);
1984 done:
1985 	kfree(pcaps);
1986 	return err;
1987 }
1988 
1989 /**
1990  * ice_check_media_subtask - Check for media
1991  * @pf: pointer to PF struct
1992  *
1993  * If media is available, then initialize PHY user configuration if it is not
1994  * been, and configure the PHY if the interface is up.
1995  */
1996 static void ice_check_media_subtask(struct ice_pf *pf)
1997 {
1998 	struct ice_port_info *pi;
1999 	struct ice_vsi *vsi;
2000 	int err;
2001 
2002 	/* No need to check for media if it's already present */
2003 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2004 		return;
2005 
2006 	vsi = ice_get_main_vsi(pf);
2007 	if (!vsi)
2008 		return;
2009 
2010 	/* Refresh link info and check if media is present */
2011 	pi = vsi->port_info;
2012 	err = ice_update_link_info(pi);
2013 	if (err)
2014 		return;
2015 
2016 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2017 		if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
2018 			ice_init_phy_user_cfg(pi);
2019 
2020 		/* PHY settings are reset on media insertion, reconfigure
2021 		 * PHY to preserve settings.
2022 		 */
2023 		if (test_bit(__ICE_DOWN, vsi->state) &&
2024 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2025 			return;
2026 
2027 		err = ice_configure_phy(vsi);
2028 		if (!err)
2029 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2030 
2031 		/* A Link Status Event will be generated; the event handler
2032 		 * will complete bringing the interface up
2033 		 */
2034 	}
2035 }
2036 
2037 /**
2038  * ice_service_task - manage and run subtasks
2039  * @work: pointer to work_struct contained by the PF struct
2040  */
2041 static void ice_service_task(struct work_struct *work)
2042 {
2043 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2044 	unsigned long start_time = jiffies;
2045 
2046 	/* subtasks */
2047 
2048 	/* process reset requests first */
2049 	ice_reset_subtask(pf);
2050 
2051 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2052 	if (ice_is_reset_in_progress(pf->state) ||
2053 	    test_bit(__ICE_SUSPENDED, pf->state) ||
2054 	    test_bit(__ICE_NEEDS_RESTART, pf->state)) {
2055 		ice_service_task_complete(pf);
2056 		return;
2057 	}
2058 
2059 	ice_clean_adminq_subtask(pf);
2060 	ice_check_media_subtask(pf);
2061 	ice_check_for_hang_subtask(pf);
2062 	ice_sync_fltr_subtask(pf);
2063 	ice_handle_mdd_event(pf);
2064 	ice_watchdog_subtask(pf);
2065 
2066 	if (ice_is_safe_mode(pf)) {
2067 		ice_service_task_complete(pf);
2068 		return;
2069 	}
2070 
2071 	ice_process_vflr_event(pf);
2072 	ice_clean_mailboxq_subtask(pf);
2073 	ice_sync_arfs_fltrs(pf);
2074 	ice_flush_fdir_ctx(pf);
2075 	/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
2076 	ice_service_task_complete(pf);
2077 
2078 	/* If the tasks have taken longer than one service timer period
2079 	 * or there is more work to be done, reset the service timer to
2080 	 * schedule the service task now.
2081 	 */
2082 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2083 	    test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
2084 	    test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
2085 	    test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2086 	    test_bit(__ICE_FD_VF_FLUSH_CTX, pf->state) ||
2087 	    test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
2088 		mod_timer(&pf->serv_tmr, jiffies);
2089 }
2090 
2091 /**
2092  * ice_set_ctrlq_len - helper function to set controlq length
2093  * @hw: pointer to the HW instance
2094  */
2095 static void ice_set_ctrlq_len(struct ice_hw *hw)
2096 {
2097 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2098 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2099 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2100 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2101 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2102 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2103 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2104 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2105 }
2106 
2107 /**
2108  * ice_schedule_reset - schedule a reset
2109  * @pf: board private structure
2110  * @reset: reset being requested
2111  */
2112 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2113 {
2114 	struct device *dev = ice_pf_to_dev(pf);
2115 
2116 	/* bail out if earlier reset has failed */
2117 	if (test_bit(__ICE_RESET_FAILED, pf->state)) {
2118 		dev_dbg(dev, "earlier reset has failed\n");
2119 		return -EIO;
2120 	}
2121 	/* bail if reset/recovery already in progress */
2122 	if (ice_is_reset_in_progress(pf->state)) {
2123 		dev_dbg(dev, "Reset already in progress\n");
2124 		return -EBUSY;
2125 	}
2126 
2127 	switch (reset) {
2128 	case ICE_RESET_PFR:
2129 		set_bit(__ICE_PFR_REQ, pf->state);
2130 		break;
2131 	case ICE_RESET_CORER:
2132 		set_bit(__ICE_CORER_REQ, pf->state);
2133 		break;
2134 	case ICE_RESET_GLOBR:
2135 		set_bit(__ICE_GLOBR_REQ, pf->state);
2136 		break;
2137 	default:
2138 		return -EINVAL;
2139 	}
2140 
2141 	ice_service_task_schedule(pf);
2142 	return 0;
2143 }
2144 
2145 /**
2146  * ice_irq_affinity_notify - Callback for affinity changes
2147  * @notify: context as to what irq was changed
2148  * @mask: the new affinity mask
2149  *
2150  * This is a callback function used by the irq_set_affinity_notifier function
2151  * so that we may register to receive changes to the irq affinity masks.
2152  */
2153 static void
2154 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2155 			const cpumask_t *mask)
2156 {
2157 	struct ice_q_vector *q_vector =
2158 		container_of(notify, struct ice_q_vector, affinity_notify);
2159 
2160 	cpumask_copy(&q_vector->affinity_mask, mask);
2161 }
2162 
2163 /**
2164  * ice_irq_affinity_release - Callback for affinity notifier release
2165  * @ref: internal core kernel usage
2166  *
2167  * This is a callback function used by the irq_set_affinity_notifier function
2168  * to inform the current notification subscriber that they will no longer
2169  * receive notifications.
2170  */
2171 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2172 
2173 /**
2174  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2175  * @vsi: the VSI being configured
2176  */
2177 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2178 {
2179 	struct ice_hw *hw = &vsi->back->hw;
2180 	int i;
2181 
2182 	ice_for_each_q_vector(vsi, i)
2183 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2184 
2185 	ice_flush(hw);
2186 	return 0;
2187 }
2188 
2189 /**
2190  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2191  * @vsi: the VSI being configured
2192  * @basename: name for the vector
2193  */
2194 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2195 {
2196 	int q_vectors = vsi->num_q_vectors;
2197 	struct ice_pf *pf = vsi->back;
2198 	int base = vsi->base_vector;
2199 	struct device *dev;
2200 	int rx_int_idx = 0;
2201 	int tx_int_idx = 0;
2202 	int vector, err;
2203 	int irq_num;
2204 
2205 	dev = ice_pf_to_dev(pf);
2206 	for (vector = 0; vector < q_vectors; vector++) {
2207 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2208 
2209 		irq_num = pf->msix_entries[base + vector].vector;
2210 
2211 		if (q_vector->tx.ring && q_vector->rx.ring) {
2212 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2213 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2214 			tx_int_idx++;
2215 		} else if (q_vector->rx.ring) {
2216 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2217 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2218 		} else if (q_vector->tx.ring) {
2219 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2220 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2221 		} else {
2222 			/* skip this unused q_vector */
2223 			continue;
2224 		}
2225 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2226 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2227 					       IRQF_SHARED, q_vector->name,
2228 					       q_vector);
2229 		else
2230 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2231 					       0, q_vector->name, q_vector);
2232 		if (err) {
2233 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2234 				   err);
2235 			goto free_q_irqs;
2236 		}
2237 
2238 		/* register for affinity change notifications */
2239 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2240 			struct irq_affinity_notify *affinity_notify;
2241 
2242 			affinity_notify = &q_vector->affinity_notify;
2243 			affinity_notify->notify = ice_irq_affinity_notify;
2244 			affinity_notify->release = ice_irq_affinity_release;
2245 			irq_set_affinity_notifier(irq_num, affinity_notify);
2246 		}
2247 
2248 		/* assign the mask for this irq */
2249 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2250 	}
2251 
2252 	vsi->irqs_ready = true;
2253 	return 0;
2254 
2255 free_q_irqs:
2256 	while (vector) {
2257 		vector--;
2258 		irq_num = pf->msix_entries[base + vector].vector;
2259 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2260 			irq_set_affinity_notifier(irq_num, NULL);
2261 		irq_set_affinity_hint(irq_num, NULL);
2262 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2263 	}
2264 	return err;
2265 }
2266 
2267 /**
2268  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2269  * @vsi: VSI to setup Tx rings used by XDP
2270  *
2271  * Return 0 on success and negative value on error
2272  */
2273 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2274 {
2275 	struct device *dev = ice_pf_to_dev(vsi->back);
2276 	int i;
2277 
2278 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2279 		u16 xdp_q_idx = vsi->alloc_txq + i;
2280 		struct ice_ring *xdp_ring;
2281 
2282 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2283 
2284 		if (!xdp_ring)
2285 			goto free_xdp_rings;
2286 
2287 		xdp_ring->q_index = xdp_q_idx;
2288 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2289 		xdp_ring->ring_active = false;
2290 		xdp_ring->vsi = vsi;
2291 		xdp_ring->netdev = NULL;
2292 		xdp_ring->dev = dev;
2293 		xdp_ring->count = vsi->num_tx_desc;
2294 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2295 		if (ice_setup_tx_ring(xdp_ring))
2296 			goto free_xdp_rings;
2297 		ice_set_ring_xdp(xdp_ring);
2298 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2299 	}
2300 
2301 	return 0;
2302 
2303 free_xdp_rings:
2304 	for (; i >= 0; i--)
2305 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2306 			ice_free_tx_ring(vsi->xdp_rings[i]);
2307 	return -ENOMEM;
2308 }
2309 
2310 /**
2311  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2312  * @vsi: VSI to set the bpf prog on
2313  * @prog: the bpf prog pointer
2314  */
2315 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2316 {
2317 	struct bpf_prog *old_prog;
2318 	int i;
2319 
2320 	old_prog = xchg(&vsi->xdp_prog, prog);
2321 	if (old_prog)
2322 		bpf_prog_put(old_prog);
2323 
2324 	ice_for_each_rxq(vsi, i)
2325 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2326 }
2327 
2328 /**
2329  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2330  * @vsi: VSI to bring up Tx rings used by XDP
2331  * @prog: bpf program that will be assigned to VSI
2332  *
2333  * Return 0 on success and negative value on error
2334  */
2335 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2336 {
2337 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2338 	int xdp_rings_rem = vsi->num_xdp_txq;
2339 	struct ice_pf *pf = vsi->back;
2340 	struct ice_qs_cfg xdp_qs_cfg = {
2341 		.qs_mutex = &pf->avail_q_mutex,
2342 		.pf_map = pf->avail_txqs,
2343 		.pf_map_size = pf->max_pf_txqs,
2344 		.q_count = vsi->num_xdp_txq,
2345 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2346 		.vsi_map = vsi->txq_map,
2347 		.vsi_map_offset = vsi->alloc_txq,
2348 		.mapping_mode = ICE_VSI_MAP_CONTIG
2349 	};
2350 	enum ice_status status;
2351 	struct device *dev;
2352 	int i, v_idx;
2353 
2354 	dev = ice_pf_to_dev(pf);
2355 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2356 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2357 	if (!vsi->xdp_rings)
2358 		return -ENOMEM;
2359 
2360 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2361 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2362 		goto err_map_xdp;
2363 
2364 	if (ice_xdp_alloc_setup_rings(vsi))
2365 		goto clear_xdp_rings;
2366 
2367 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2368 	ice_for_each_q_vector(vsi, v_idx) {
2369 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2370 		int xdp_rings_per_v, q_id, q_base;
2371 
2372 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2373 					       vsi->num_q_vectors - v_idx);
2374 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2375 
2376 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2377 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2378 
2379 			xdp_ring->q_vector = q_vector;
2380 			xdp_ring->next = q_vector->tx.ring;
2381 			q_vector->tx.ring = xdp_ring;
2382 		}
2383 		xdp_rings_rem -= xdp_rings_per_v;
2384 	}
2385 
2386 	/* omit the scheduler update if in reset path; XDP queues will be
2387 	 * taken into account at the end of ice_vsi_rebuild, where
2388 	 * ice_cfg_vsi_lan is being called
2389 	 */
2390 	if (ice_is_reset_in_progress(pf->state))
2391 		return 0;
2392 
2393 	/* tell the Tx scheduler that right now we have
2394 	 * additional queues
2395 	 */
2396 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2397 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2398 
2399 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2400 				 max_txqs);
2401 	if (status) {
2402 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2403 			ice_stat_str(status));
2404 		goto clear_xdp_rings;
2405 	}
2406 	ice_vsi_assign_bpf_prog(vsi, prog);
2407 
2408 	return 0;
2409 clear_xdp_rings:
2410 	for (i = 0; i < vsi->num_xdp_txq; i++)
2411 		if (vsi->xdp_rings[i]) {
2412 			kfree_rcu(vsi->xdp_rings[i], rcu);
2413 			vsi->xdp_rings[i] = NULL;
2414 		}
2415 
2416 err_map_xdp:
2417 	mutex_lock(&pf->avail_q_mutex);
2418 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2419 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2420 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2421 	}
2422 	mutex_unlock(&pf->avail_q_mutex);
2423 
2424 	devm_kfree(dev, vsi->xdp_rings);
2425 	return -ENOMEM;
2426 }
2427 
2428 /**
2429  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2430  * @vsi: VSI to remove XDP rings
2431  *
2432  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2433  * resources
2434  */
2435 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2436 {
2437 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2438 	struct ice_pf *pf = vsi->back;
2439 	int i, v_idx;
2440 
2441 	/* q_vectors are freed in reset path so there's no point in detaching
2442 	 * rings; in case of rebuild being triggered not from reset bits
2443 	 * in pf->state won't be set, so additionally check first q_vector
2444 	 * against NULL
2445 	 */
2446 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2447 		goto free_qmap;
2448 
2449 	ice_for_each_q_vector(vsi, v_idx) {
2450 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2451 		struct ice_ring *ring;
2452 
2453 		ice_for_each_ring(ring, q_vector->tx)
2454 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2455 				break;
2456 
2457 		/* restore the value of last node prior to XDP setup */
2458 		q_vector->tx.ring = ring;
2459 	}
2460 
2461 free_qmap:
2462 	mutex_lock(&pf->avail_q_mutex);
2463 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2464 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2465 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2466 	}
2467 	mutex_unlock(&pf->avail_q_mutex);
2468 
2469 	for (i = 0; i < vsi->num_xdp_txq; i++)
2470 		if (vsi->xdp_rings[i]) {
2471 			if (vsi->xdp_rings[i]->desc)
2472 				ice_free_tx_ring(vsi->xdp_rings[i]);
2473 			kfree_rcu(vsi->xdp_rings[i], rcu);
2474 			vsi->xdp_rings[i] = NULL;
2475 		}
2476 
2477 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2478 	vsi->xdp_rings = NULL;
2479 
2480 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2481 		return 0;
2482 
2483 	ice_vsi_assign_bpf_prog(vsi, NULL);
2484 
2485 	/* notify Tx scheduler that we destroyed XDP queues and bring
2486 	 * back the old number of child nodes
2487 	 */
2488 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2489 		max_txqs[i] = vsi->num_txq;
2490 
2491 	/* change number of XDP Tx queues to 0 */
2492 	vsi->num_xdp_txq = 0;
2493 
2494 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2495 			       max_txqs);
2496 }
2497 
2498 /**
2499  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2500  * @vsi: VSI to schedule napi on
2501  */
2502 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2503 {
2504 	int i;
2505 
2506 	ice_for_each_rxq(vsi, i) {
2507 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2508 
2509 		if (rx_ring->xsk_pool)
2510 			napi_schedule(&rx_ring->q_vector->napi);
2511 	}
2512 }
2513 
2514 /**
2515  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2516  * @vsi: VSI to setup XDP for
2517  * @prog: XDP program
2518  * @extack: netlink extended ack
2519  */
2520 static int
2521 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2522 		   struct netlink_ext_ack *extack)
2523 {
2524 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2525 	bool if_running = netif_running(vsi->netdev);
2526 	int ret = 0, xdp_ring_err = 0;
2527 
2528 	if (frame_size > vsi->rx_buf_len) {
2529 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2530 		return -EOPNOTSUPP;
2531 	}
2532 
2533 	/* need to stop netdev while setting up the program for Rx rings */
2534 	if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
2535 		ret = ice_down(vsi);
2536 		if (ret) {
2537 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2538 			return ret;
2539 		}
2540 	}
2541 
2542 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2543 		vsi->num_xdp_txq = vsi->alloc_rxq;
2544 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2545 		if (xdp_ring_err)
2546 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2547 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2548 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2549 		if (xdp_ring_err)
2550 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2551 	} else {
2552 		ice_vsi_assign_bpf_prog(vsi, prog);
2553 	}
2554 
2555 	if (if_running)
2556 		ret = ice_up(vsi);
2557 
2558 	if (!ret && prog)
2559 		ice_vsi_rx_napi_schedule(vsi);
2560 
2561 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2562 }
2563 
2564 /**
2565  * ice_xdp - implements XDP handler
2566  * @dev: netdevice
2567  * @xdp: XDP command
2568  */
2569 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2570 {
2571 	struct ice_netdev_priv *np = netdev_priv(dev);
2572 	struct ice_vsi *vsi = np->vsi;
2573 
2574 	if (vsi->type != ICE_VSI_PF) {
2575 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2576 		return -EINVAL;
2577 	}
2578 
2579 	switch (xdp->command) {
2580 	case XDP_SETUP_PROG:
2581 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2582 	case XDP_SETUP_XSK_POOL:
2583 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2584 					  xdp->xsk.queue_id);
2585 	default:
2586 		return -EINVAL;
2587 	}
2588 }
2589 
2590 /**
2591  * ice_ena_misc_vector - enable the non-queue interrupts
2592  * @pf: board private structure
2593  */
2594 static void ice_ena_misc_vector(struct ice_pf *pf)
2595 {
2596 	struct ice_hw *hw = &pf->hw;
2597 	u32 val;
2598 
2599 	/* Disable anti-spoof detection interrupt to prevent spurious event
2600 	 * interrupts during a function reset. Anti-spoof functionally is
2601 	 * still supported.
2602 	 */
2603 	val = rd32(hw, GL_MDCK_TX_TDPU);
2604 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2605 	wr32(hw, GL_MDCK_TX_TDPU, val);
2606 
2607 	/* clear things first */
2608 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2609 	rd32(hw, PFINT_OICR);		/* read to clear */
2610 
2611 	val = (PFINT_OICR_ECC_ERR_M |
2612 	       PFINT_OICR_MAL_DETECT_M |
2613 	       PFINT_OICR_GRST_M |
2614 	       PFINT_OICR_PCI_EXCEPTION_M |
2615 	       PFINT_OICR_VFLR_M |
2616 	       PFINT_OICR_HMC_ERR_M |
2617 	       PFINT_OICR_PE_CRITERR_M);
2618 
2619 	wr32(hw, PFINT_OICR_ENA, val);
2620 
2621 	/* SW_ITR_IDX = 0, but don't change INTENA */
2622 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2623 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2624 }
2625 
2626 /**
2627  * ice_misc_intr - misc interrupt handler
2628  * @irq: interrupt number
2629  * @data: pointer to a q_vector
2630  */
2631 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2632 {
2633 	struct ice_pf *pf = (struct ice_pf *)data;
2634 	struct ice_hw *hw = &pf->hw;
2635 	irqreturn_t ret = IRQ_NONE;
2636 	struct device *dev;
2637 	u32 oicr, ena_mask;
2638 
2639 	dev = ice_pf_to_dev(pf);
2640 	set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
2641 	set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2642 
2643 	oicr = rd32(hw, PFINT_OICR);
2644 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2645 
2646 	if (oicr & PFINT_OICR_SWINT_M) {
2647 		ena_mask &= ~PFINT_OICR_SWINT_M;
2648 		pf->sw_int_count++;
2649 	}
2650 
2651 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2652 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2653 		set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
2654 	}
2655 	if (oicr & PFINT_OICR_VFLR_M) {
2656 		/* disable any further VFLR event notifications */
2657 		if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
2658 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2659 
2660 			reg &= ~PFINT_OICR_VFLR_M;
2661 			wr32(hw, PFINT_OICR_ENA, reg);
2662 		} else {
2663 			ena_mask &= ~PFINT_OICR_VFLR_M;
2664 			set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
2665 		}
2666 	}
2667 
2668 	if (oicr & PFINT_OICR_GRST_M) {
2669 		u32 reset;
2670 
2671 		/* we have a reset warning */
2672 		ena_mask &= ~PFINT_OICR_GRST_M;
2673 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2674 			GLGEN_RSTAT_RESET_TYPE_S;
2675 
2676 		if (reset == ICE_RESET_CORER)
2677 			pf->corer_count++;
2678 		else if (reset == ICE_RESET_GLOBR)
2679 			pf->globr_count++;
2680 		else if (reset == ICE_RESET_EMPR)
2681 			pf->empr_count++;
2682 		else
2683 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2684 
2685 		/* If a reset cycle isn't already in progress, we set a bit in
2686 		 * pf->state so that the service task can start a reset/rebuild.
2687 		 * We also make note of which reset happened so that peer
2688 		 * devices/drivers can be informed.
2689 		 */
2690 		if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
2691 			if (reset == ICE_RESET_CORER)
2692 				set_bit(__ICE_CORER_RECV, pf->state);
2693 			else if (reset == ICE_RESET_GLOBR)
2694 				set_bit(__ICE_GLOBR_RECV, pf->state);
2695 			else
2696 				set_bit(__ICE_EMPR_RECV, pf->state);
2697 
2698 			/* There are couple of different bits at play here.
2699 			 * hw->reset_ongoing indicates whether the hardware is
2700 			 * in reset. This is set to true when a reset interrupt
2701 			 * is received and set back to false after the driver
2702 			 * has determined that the hardware is out of reset.
2703 			 *
2704 			 * __ICE_RESET_OICR_RECV in pf->state indicates
2705 			 * that a post reset rebuild is required before the
2706 			 * driver is operational again. This is set above.
2707 			 *
2708 			 * As this is the start of the reset/rebuild cycle, set
2709 			 * both to indicate that.
2710 			 */
2711 			hw->reset_ongoing = true;
2712 		}
2713 	}
2714 
2715 	if (oicr & PFINT_OICR_HMC_ERR_M) {
2716 		ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2717 		dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
2718 			rd32(hw, PFHMC_ERRORINFO),
2719 			rd32(hw, PFHMC_ERRORDATA));
2720 	}
2721 
2722 	/* Report any remaining unexpected interrupts */
2723 	oicr &= ena_mask;
2724 	if (oicr) {
2725 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2726 		/* If a critical error is pending there is no choice but to
2727 		 * reset the device.
2728 		 */
2729 		if (oicr & (PFINT_OICR_PE_CRITERR_M |
2730 			    PFINT_OICR_PCI_EXCEPTION_M |
2731 			    PFINT_OICR_ECC_ERR_M)) {
2732 			set_bit(__ICE_PFR_REQ, pf->state);
2733 			ice_service_task_schedule(pf);
2734 		}
2735 	}
2736 	ret = IRQ_HANDLED;
2737 
2738 	ice_service_task_schedule(pf);
2739 	ice_irq_dynamic_ena(hw, NULL, NULL);
2740 
2741 	return ret;
2742 }
2743 
2744 /**
2745  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2746  * @hw: pointer to HW structure
2747  */
2748 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2749 {
2750 	/* disable Admin queue Interrupt causes */
2751 	wr32(hw, PFINT_FW_CTL,
2752 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2753 
2754 	/* disable Mailbox queue Interrupt causes */
2755 	wr32(hw, PFINT_MBX_CTL,
2756 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2757 
2758 	/* disable Control queue Interrupt causes */
2759 	wr32(hw, PFINT_OICR_CTL,
2760 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2761 
2762 	ice_flush(hw);
2763 }
2764 
2765 /**
2766  * ice_free_irq_msix_misc - Unroll misc vector setup
2767  * @pf: board private structure
2768  */
2769 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2770 {
2771 	struct ice_hw *hw = &pf->hw;
2772 
2773 	ice_dis_ctrlq_interrupts(hw);
2774 
2775 	/* disable OICR interrupt */
2776 	wr32(hw, PFINT_OICR_ENA, 0);
2777 	ice_flush(hw);
2778 
2779 	if (pf->msix_entries) {
2780 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2781 		devm_free_irq(ice_pf_to_dev(pf),
2782 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2783 	}
2784 
2785 	pf->num_avail_sw_msix += 1;
2786 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2787 }
2788 
2789 /**
2790  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2791  * @hw: pointer to HW structure
2792  * @reg_idx: HW vector index to associate the control queue interrupts with
2793  */
2794 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2795 {
2796 	u32 val;
2797 
2798 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2799 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2800 	wr32(hw, PFINT_OICR_CTL, val);
2801 
2802 	/* enable Admin queue Interrupt causes */
2803 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2804 	       PFINT_FW_CTL_CAUSE_ENA_M);
2805 	wr32(hw, PFINT_FW_CTL, val);
2806 
2807 	/* enable Mailbox queue Interrupt causes */
2808 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2809 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2810 	wr32(hw, PFINT_MBX_CTL, val);
2811 
2812 	ice_flush(hw);
2813 }
2814 
2815 /**
2816  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2817  * @pf: board private structure
2818  *
2819  * This sets up the handler for MSIX 0, which is used to manage the
2820  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2821  * when in MSI or Legacy interrupt mode.
2822  */
2823 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2824 {
2825 	struct device *dev = ice_pf_to_dev(pf);
2826 	struct ice_hw *hw = &pf->hw;
2827 	int oicr_idx, err = 0;
2828 
2829 	if (!pf->int_name[0])
2830 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2831 			 dev_driver_string(dev), dev_name(dev));
2832 
2833 	/* Do not request IRQ but do enable OICR interrupt since settings are
2834 	 * lost during reset. Note that this function is called only during
2835 	 * rebuild path and not while reset is in progress.
2836 	 */
2837 	if (ice_is_reset_in_progress(pf->state))
2838 		goto skip_req_irq;
2839 
2840 	/* reserve one vector in irq_tracker for misc interrupts */
2841 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2842 	if (oicr_idx < 0)
2843 		return oicr_idx;
2844 
2845 	pf->num_avail_sw_msix -= 1;
2846 	pf->oicr_idx = (u16)oicr_idx;
2847 
2848 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2849 			       ice_misc_intr, 0, pf->int_name, pf);
2850 	if (err) {
2851 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2852 			pf->int_name, err);
2853 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2854 		pf->num_avail_sw_msix += 1;
2855 		return err;
2856 	}
2857 
2858 skip_req_irq:
2859 	ice_ena_misc_vector(pf);
2860 
2861 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2862 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2863 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2864 
2865 	ice_flush(hw);
2866 	ice_irq_dynamic_ena(hw, NULL, NULL);
2867 
2868 	return 0;
2869 }
2870 
2871 /**
2872  * ice_napi_add - register NAPI handler for the VSI
2873  * @vsi: VSI for which NAPI handler is to be registered
2874  *
2875  * This function is only called in the driver's load path. Registering the NAPI
2876  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2877  * reset/rebuild, etc.)
2878  */
2879 static void ice_napi_add(struct ice_vsi *vsi)
2880 {
2881 	int v_idx;
2882 
2883 	if (!vsi->netdev)
2884 		return;
2885 
2886 	ice_for_each_q_vector(vsi, v_idx)
2887 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2888 			       ice_napi_poll, NAPI_POLL_WEIGHT);
2889 }
2890 
2891 /**
2892  * ice_set_ops - set netdev and ethtools ops for the given netdev
2893  * @netdev: netdev instance
2894  */
2895 static void ice_set_ops(struct net_device *netdev)
2896 {
2897 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2898 
2899 	if (ice_is_safe_mode(pf)) {
2900 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2901 		ice_set_ethtool_safe_mode_ops(netdev);
2902 		return;
2903 	}
2904 
2905 	netdev->netdev_ops = &ice_netdev_ops;
2906 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
2907 	ice_set_ethtool_ops(netdev);
2908 }
2909 
2910 /**
2911  * ice_set_netdev_features - set features for the given netdev
2912  * @netdev: netdev instance
2913  */
2914 static void ice_set_netdev_features(struct net_device *netdev)
2915 {
2916 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2917 	netdev_features_t csumo_features;
2918 	netdev_features_t vlano_features;
2919 	netdev_features_t dflt_features;
2920 	netdev_features_t tso_features;
2921 
2922 	if (ice_is_safe_mode(pf)) {
2923 		/* safe mode */
2924 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2925 		netdev->hw_features = netdev->features;
2926 		return;
2927 	}
2928 
2929 	dflt_features = NETIF_F_SG	|
2930 			NETIF_F_HIGHDMA	|
2931 			NETIF_F_NTUPLE	|
2932 			NETIF_F_RXHASH;
2933 
2934 	csumo_features = NETIF_F_RXCSUM	  |
2935 			 NETIF_F_IP_CSUM  |
2936 			 NETIF_F_SCTP_CRC |
2937 			 NETIF_F_IPV6_CSUM;
2938 
2939 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2940 			 NETIF_F_HW_VLAN_CTAG_TX     |
2941 			 NETIF_F_HW_VLAN_CTAG_RX;
2942 
2943 	tso_features = NETIF_F_TSO			|
2944 		       NETIF_F_TSO_ECN			|
2945 		       NETIF_F_TSO6			|
2946 		       NETIF_F_GSO_GRE			|
2947 		       NETIF_F_GSO_UDP_TUNNEL		|
2948 		       NETIF_F_GSO_GRE_CSUM		|
2949 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
2950 		       NETIF_F_GSO_PARTIAL		|
2951 		       NETIF_F_GSO_IPXIP4		|
2952 		       NETIF_F_GSO_IPXIP6		|
2953 		       NETIF_F_GSO_UDP_L4;
2954 
2955 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
2956 					NETIF_F_GSO_GRE_CSUM;
2957 	/* set features that user can change */
2958 	netdev->hw_features = dflt_features | csumo_features |
2959 			      vlano_features | tso_features;
2960 
2961 	/* add support for HW_CSUM on packets with MPLS header */
2962 	netdev->mpls_features =  NETIF_F_HW_CSUM;
2963 
2964 	/* enable features */
2965 	netdev->features |= netdev->hw_features;
2966 	/* encap and VLAN devices inherit default, csumo and tso features */
2967 	netdev->hw_enc_features |= dflt_features | csumo_features |
2968 				   tso_features;
2969 	netdev->vlan_features |= dflt_features | csumo_features |
2970 				 tso_features;
2971 }
2972 
2973 /**
2974  * ice_cfg_netdev - Allocate, configure and register a netdev
2975  * @vsi: the VSI associated with the new netdev
2976  *
2977  * Returns 0 on success, negative value on failure
2978  */
2979 static int ice_cfg_netdev(struct ice_vsi *vsi)
2980 {
2981 	struct ice_pf *pf = vsi->back;
2982 	struct ice_netdev_priv *np;
2983 	struct net_device *netdev;
2984 	u8 mac_addr[ETH_ALEN];
2985 	int err;
2986 
2987 	err = ice_devlink_create_port(vsi);
2988 	if (err)
2989 		return err;
2990 
2991 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
2992 				    vsi->alloc_rxq);
2993 	if (!netdev) {
2994 		err = -ENOMEM;
2995 		goto err_destroy_devlink_port;
2996 	}
2997 
2998 	vsi->netdev = netdev;
2999 	np = netdev_priv(netdev);
3000 	np->vsi = vsi;
3001 
3002 	ice_set_netdev_features(netdev);
3003 
3004 	ice_set_ops(netdev);
3005 
3006 	if (vsi->type == ICE_VSI_PF) {
3007 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
3008 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3009 		ether_addr_copy(netdev->dev_addr, mac_addr);
3010 		ether_addr_copy(netdev->perm_addr, mac_addr);
3011 	}
3012 
3013 	netdev->priv_flags |= IFF_UNICAST_FLT;
3014 
3015 	/* Setup netdev TC information */
3016 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3017 
3018 	/* setup watchdog timeout value to be 5 second */
3019 	netdev->watchdog_timeo = 5 * HZ;
3020 
3021 	netdev->min_mtu = ETH_MIN_MTU;
3022 	netdev->max_mtu = ICE_MAX_MTU;
3023 
3024 	err = register_netdev(vsi->netdev);
3025 	if (err)
3026 		goto err_free_netdev;
3027 
3028 	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
3029 
3030 	netif_carrier_off(vsi->netdev);
3031 
3032 	/* make sure transmit queues start off as stopped */
3033 	netif_tx_stop_all_queues(vsi->netdev);
3034 
3035 	return 0;
3036 
3037 err_free_netdev:
3038 	free_netdev(vsi->netdev);
3039 	vsi->netdev = NULL;
3040 err_destroy_devlink_port:
3041 	ice_devlink_destroy_port(vsi);
3042 	return err;
3043 }
3044 
3045 /**
3046  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3047  * @lut: Lookup table
3048  * @rss_table_size: Lookup table size
3049  * @rss_size: Range of queue number for hashing
3050  */
3051 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3052 {
3053 	u16 i;
3054 
3055 	for (i = 0; i < rss_table_size; i++)
3056 		lut[i] = i % rss_size;
3057 }
3058 
3059 /**
3060  * ice_pf_vsi_setup - Set up a PF VSI
3061  * @pf: board private structure
3062  * @pi: pointer to the port_info instance
3063  *
3064  * Returns pointer to the successfully allocated VSI software struct
3065  * on success, otherwise returns NULL on failure.
3066  */
3067 static struct ice_vsi *
3068 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3069 {
3070 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3071 }
3072 
3073 /**
3074  * ice_ctrl_vsi_setup - Set up a control VSI
3075  * @pf: board private structure
3076  * @pi: pointer to the port_info instance
3077  *
3078  * Returns pointer to the successfully allocated VSI software struct
3079  * on success, otherwise returns NULL on failure.
3080  */
3081 static struct ice_vsi *
3082 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3083 {
3084 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3085 }
3086 
3087 /**
3088  * ice_lb_vsi_setup - Set up a loopback VSI
3089  * @pf: board private structure
3090  * @pi: pointer to the port_info instance
3091  *
3092  * Returns pointer to the successfully allocated VSI software struct
3093  * on success, otherwise returns NULL on failure.
3094  */
3095 struct ice_vsi *
3096 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3097 {
3098 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3099 }
3100 
3101 /**
3102  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3103  * @netdev: network interface to be adjusted
3104  * @proto: unused protocol
3105  * @vid: VLAN ID to be added
3106  *
3107  * net_device_ops implementation for adding VLAN IDs
3108  */
3109 static int
3110 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3111 		    u16 vid)
3112 {
3113 	struct ice_netdev_priv *np = netdev_priv(netdev);
3114 	struct ice_vsi *vsi = np->vsi;
3115 	int ret;
3116 
3117 	if (vid >= VLAN_N_VID) {
3118 		netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3119 			   vid, VLAN_N_VID);
3120 		return -EINVAL;
3121 	}
3122 
3123 	if (vsi->info.pvid)
3124 		return -EINVAL;
3125 
3126 	/* VLAN 0 is added by default during load/reset */
3127 	if (!vid)
3128 		return 0;
3129 
3130 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3131 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3132 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3133 		if (ret)
3134 			return ret;
3135 	}
3136 
3137 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3138 	 * packets aren't pruned by the device's internal switch on Rx
3139 	 */
3140 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3141 	if (!ret)
3142 		set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3143 
3144 	return ret;
3145 }
3146 
3147 /**
3148  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3149  * @netdev: network interface to be adjusted
3150  * @proto: unused protocol
3151  * @vid: VLAN ID to be removed
3152  *
3153  * net_device_ops implementation for removing VLAN IDs
3154  */
3155 static int
3156 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3157 		     u16 vid)
3158 {
3159 	struct ice_netdev_priv *np = netdev_priv(netdev);
3160 	struct ice_vsi *vsi = np->vsi;
3161 	int ret;
3162 
3163 	if (vsi->info.pvid)
3164 		return -EINVAL;
3165 
3166 	/* don't allow removal of VLAN 0 */
3167 	if (!vid)
3168 		return 0;
3169 
3170 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3171 	 * information
3172 	 */
3173 	ret = ice_vsi_kill_vlan(vsi, vid);
3174 	if (ret)
3175 		return ret;
3176 
3177 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3178 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3179 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3180 
3181 	set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3182 	return ret;
3183 }
3184 
3185 /**
3186  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3187  * @pf: board private structure
3188  *
3189  * Returns 0 on success, negative value on failure
3190  */
3191 static int ice_setup_pf_sw(struct ice_pf *pf)
3192 {
3193 	struct ice_vsi *vsi;
3194 	int status = 0;
3195 
3196 	if (ice_is_reset_in_progress(pf->state))
3197 		return -EBUSY;
3198 
3199 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3200 	if (!vsi)
3201 		return -ENOMEM;
3202 
3203 	status = ice_cfg_netdev(vsi);
3204 	if (status) {
3205 		status = -ENODEV;
3206 		goto unroll_vsi_setup;
3207 	}
3208 	/* netdev has to be configured before setting frame size */
3209 	ice_vsi_cfg_frame_size(vsi);
3210 
3211 	/* Setup DCB netlink interface */
3212 	ice_dcbnl_setup(vsi);
3213 
3214 	/* registering the NAPI handler requires both the queues and
3215 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3216 	 * and ice_cfg_netdev() respectively
3217 	 */
3218 	ice_napi_add(vsi);
3219 
3220 	status = ice_set_cpu_rx_rmap(vsi);
3221 	if (status) {
3222 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3223 			vsi->vsi_num, status);
3224 		status = -EINVAL;
3225 		goto unroll_napi_add;
3226 	}
3227 	status = ice_init_mac_fltr(pf);
3228 	if (status)
3229 		goto free_cpu_rx_map;
3230 
3231 	return status;
3232 
3233 free_cpu_rx_map:
3234 	ice_free_cpu_rx_rmap(vsi);
3235 
3236 unroll_napi_add:
3237 	if (vsi) {
3238 		ice_napi_del(vsi);
3239 		if (vsi->netdev) {
3240 			if (vsi->netdev->reg_state == NETREG_REGISTERED)
3241 				unregister_netdev(vsi->netdev);
3242 			free_netdev(vsi->netdev);
3243 			vsi->netdev = NULL;
3244 		}
3245 	}
3246 
3247 unroll_vsi_setup:
3248 	ice_vsi_release(vsi);
3249 	return status;
3250 }
3251 
3252 /**
3253  * ice_get_avail_q_count - Get count of queues in use
3254  * @pf_qmap: bitmap to get queue use count from
3255  * @lock: pointer to a mutex that protects access to pf_qmap
3256  * @size: size of the bitmap
3257  */
3258 static u16
3259 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3260 {
3261 	unsigned long bit;
3262 	u16 count = 0;
3263 
3264 	mutex_lock(lock);
3265 	for_each_clear_bit(bit, pf_qmap, size)
3266 		count++;
3267 	mutex_unlock(lock);
3268 
3269 	return count;
3270 }
3271 
3272 /**
3273  * ice_get_avail_txq_count - Get count of Tx queues in use
3274  * @pf: pointer to an ice_pf instance
3275  */
3276 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3277 {
3278 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3279 				     pf->max_pf_txqs);
3280 }
3281 
3282 /**
3283  * ice_get_avail_rxq_count - Get count of Rx queues in use
3284  * @pf: pointer to an ice_pf instance
3285  */
3286 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3287 {
3288 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3289 				     pf->max_pf_rxqs);
3290 }
3291 
3292 /**
3293  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3294  * @pf: board private structure to initialize
3295  */
3296 static void ice_deinit_pf(struct ice_pf *pf)
3297 {
3298 	ice_service_task_stop(pf);
3299 	mutex_destroy(&pf->sw_mutex);
3300 	mutex_destroy(&pf->tc_mutex);
3301 	mutex_destroy(&pf->avail_q_mutex);
3302 
3303 	if (pf->avail_txqs) {
3304 		bitmap_free(pf->avail_txqs);
3305 		pf->avail_txqs = NULL;
3306 	}
3307 
3308 	if (pf->avail_rxqs) {
3309 		bitmap_free(pf->avail_rxqs);
3310 		pf->avail_rxqs = NULL;
3311 	}
3312 }
3313 
3314 /**
3315  * ice_set_pf_caps - set PFs capability flags
3316  * @pf: pointer to the PF instance
3317  */
3318 static void ice_set_pf_caps(struct ice_pf *pf)
3319 {
3320 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3321 
3322 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3323 	if (func_caps->common_cap.dcb)
3324 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3325 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3326 	if (func_caps->common_cap.sr_iov_1_1) {
3327 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3328 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3329 					      ICE_MAX_VF_COUNT);
3330 	}
3331 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3332 	if (func_caps->common_cap.rss_table_size)
3333 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3334 
3335 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3336 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3337 		u16 unused;
3338 
3339 		/* ctrl_vsi_idx will be set to a valid value when flow director
3340 		 * is setup by ice_init_fdir
3341 		 */
3342 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3343 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3344 		/* force guaranteed filter pool for PF */
3345 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3346 				       func_caps->fd_fltr_guar);
3347 		/* force shared filter pool for PF */
3348 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3349 				       func_caps->fd_fltr_best_effort);
3350 	}
3351 
3352 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3353 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3354 }
3355 
3356 /**
3357  * ice_init_pf - Initialize general software structures (struct ice_pf)
3358  * @pf: board private structure to initialize
3359  */
3360 static int ice_init_pf(struct ice_pf *pf)
3361 {
3362 	ice_set_pf_caps(pf);
3363 
3364 	mutex_init(&pf->sw_mutex);
3365 	mutex_init(&pf->tc_mutex);
3366 
3367 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3368 	spin_lock_init(&pf->aq_wait_lock);
3369 	init_waitqueue_head(&pf->aq_wait_queue);
3370 
3371 	/* setup service timer and periodic service task */
3372 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3373 	pf->serv_tmr_period = HZ;
3374 	INIT_WORK(&pf->serv_task, ice_service_task);
3375 	clear_bit(__ICE_SERVICE_SCHED, pf->state);
3376 
3377 	mutex_init(&pf->avail_q_mutex);
3378 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3379 	if (!pf->avail_txqs)
3380 		return -ENOMEM;
3381 
3382 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3383 	if (!pf->avail_rxqs) {
3384 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3385 		pf->avail_txqs = NULL;
3386 		return -ENOMEM;
3387 	}
3388 
3389 	return 0;
3390 }
3391 
3392 /**
3393  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3394  * @pf: board private structure
3395  *
3396  * compute the number of MSIX vectors required (v_budget) and request from
3397  * the OS. Return the number of vectors reserved or negative on failure
3398  */
3399 static int ice_ena_msix_range(struct ice_pf *pf)
3400 {
3401 	int v_left, v_actual, v_other, v_budget = 0;
3402 	struct device *dev = ice_pf_to_dev(pf);
3403 	int needed, err, i;
3404 
3405 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3406 
3407 	/* reserve for LAN miscellaneous handler */
3408 	needed = ICE_MIN_LAN_OICR_MSIX;
3409 	if (v_left < needed)
3410 		goto no_hw_vecs_left_err;
3411 	v_budget += needed;
3412 	v_left -= needed;
3413 
3414 	/* reserve for flow director */
3415 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3416 		needed = ICE_FDIR_MSIX;
3417 		if (v_left < needed)
3418 			goto no_hw_vecs_left_err;
3419 		v_budget += needed;
3420 		v_left -= needed;
3421 	}
3422 
3423 	/* total used for non-traffic vectors */
3424 	v_other = v_budget;
3425 
3426 	/* reserve vectors for LAN traffic */
3427 	needed = min_t(int, num_online_cpus(), v_left);
3428 	if (v_left < needed)
3429 		goto no_hw_vecs_left_err;
3430 	pf->num_lan_msix = needed;
3431 	v_budget += needed;
3432 	v_left -= needed;
3433 
3434 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3435 					sizeof(*pf->msix_entries), GFP_KERNEL);
3436 	if (!pf->msix_entries) {
3437 		err = -ENOMEM;
3438 		goto exit_err;
3439 	}
3440 
3441 	for (i = 0; i < v_budget; i++)
3442 		pf->msix_entries[i].entry = i;
3443 
3444 	/* actually reserve the vectors */
3445 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3446 					 ICE_MIN_MSIX, v_budget);
3447 	if (v_actual < 0) {
3448 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3449 		err = v_actual;
3450 		goto msix_err;
3451 	}
3452 
3453 	if (v_actual < v_budget) {
3454 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3455 			 v_budget, v_actual);
3456 
3457 		if (v_actual < ICE_MIN_MSIX) {
3458 			/* error if we can't get minimum vectors */
3459 			pci_disable_msix(pf->pdev);
3460 			err = -ERANGE;
3461 			goto msix_err;
3462 		} else {
3463 			int v_traffic = v_actual - v_other;
3464 
3465 			if (v_actual == ICE_MIN_MSIX ||
3466 			    v_traffic < ICE_MIN_LAN_TXRX_MSIX)
3467 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3468 			else
3469 				pf->num_lan_msix = v_traffic;
3470 
3471 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3472 				   pf->num_lan_msix);
3473 		}
3474 	}
3475 
3476 	return v_actual;
3477 
3478 msix_err:
3479 	devm_kfree(dev, pf->msix_entries);
3480 	goto exit_err;
3481 
3482 no_hw_vecs_left_err:
3483 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3484 		needed, v_left);
3485 	err = -ERANGE;
3486 exit_err:
3487 	pf->num_lan_msix = 0;
3488 	return err;
3489 }
3490 
3491 /**
3492  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3493  * @pf: board private structure
3494  */
3495 static void ice_dis_msix(struct ice_pf *pf)
3496 {
3497 	pci_disable_msix(pf->pdev);
3498 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3499 	pf->msix_entries = NULL;
3500 }
3501 
3502 /**
3503  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3504  * @pf: board private structure
3505  */
3506 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3507 {
3508 	ice_dis_msix(pf);
3509 
3510 	if (pf->irq_tracker) {
3511 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3512 		pf->irq_tracker = NULL;
3513 	}
3514 }
3515 
3516 /**
3517  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3518  * @pf: board private structure to initialize
3519  */
3520 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3521 {
3522 	int vectors;
3523 
3524 	vectors = ice_ena_msix_range(pf);
3525 
3526 	if (vectors < 0)
3527 		return vectors;
3528 
3529 	/* set up vector assignment tracking */
3530 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3531 				       struct_size(pf->irq_tracker, list, vectors),
3532 				       GFP_KERNEL);
3533 	if (!pf->irq_tracker) {
3534 		ice_dis_msix(pf);
3535 		return -ENOMEM;
3536 	}
3537 
3538 	/* populate SW interrupts pool with number of OS granted IRQs. */
3539 	pf->num_avail_sw_msix = (u16)vectors;
3540 	pf->irq_tracker->num_entries = (u16)vectors;
3541 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3542 
3543 	return 0;
3544 }
3545 
3546 /**
3547  * ice_is_wol_supported - get NVM state of WoL
3548  * @pf: board private structure
3549  *
3550  * Check if WoL is supported based on the HW configuration.
3551  * Returns true if NVM supports and enables WoL for this port, false otherwise
3552  */
3553 bool ice_is_wol_supported(struct ice_pf *pf)
3554 {
3555 	struct ice_hw *hw = &pf->hw;
3556 	u16 wol_ctrl;
3557 
3558 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3559 	 * word) indicates WoL is not supported on the corresponding PF ID.
3560 	 */
3561 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3562 		return false;
3563 
3564 	return !(BIT(hw->pf_id) & wol_ctrl);
3565 }
3566 
3567 /**
3568  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3569  * @vsi: VSI being changed
3570  * @new_rx: new number of Rx queues
3571  * @new_tx: new number of Tx queues
3572  *
3573  * Only change the number of queues if new_tx, or new_rx is non-0.
3574  *
3575  * Returns 0 on success.
3576  */
3577 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3578 {
3579 	struct ice_pf *pf = vsi->back;
3580 	int err = 0, timeout = 50;
3581 
3582 	if (!new_rx && !new_tx)
3583 		return -EINVAL;
3584 
3585 	while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
3586 		timeout--;
3587 		if (!timeout)
3588 			return -EBUSY;
3589 		usleep_range(1000, 2000);
3590 	}
3591 
3592 	if (new_tx)
3593 		vsi->req_txq = (u16)new_tx;
3594 	if (new_rx)
3595 		vsi->req_rxq = (u16)new_rx;
3596 
3597 	/* set for the next time the netdev is started */
3598 	if (!netif_running(vsi->netdev)) {
3599 		ice_vsi_rebuild(vsi, false);
3600 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3601 		goto done;
3602 	}
3603 
3604 	ice_vsi_close(vsi);
3605 	ice_vsi_rebuild(vsi, false);
3606 	ice_pf_dcb_recfg(pf);
3607 	ice_vsi_open(vsi);
3608 done:
3609 	clear_bit(__ICE_CFG_BUSY, pf->state);
3610 	return err;
3611 }
3612 
3613 /**
3614  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3615  * @pf: PF to configure
3616  *
3617  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3618  * VSI can still Tx/Rx VLAN tagged packets.
3619  */
3620 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3621 {
3622 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3623 	struct ice_vsi_ctx *ctxt;
3624 	enum ice_status status;
3625 	struct ice_hw *hw;
3626 
3627 	if (!vsi)
3628 		return;
3629 
3630 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3631 	if (!ctxt)
3632 		return;
3633 
3634 	hw = &pf->hw;
3635 	ctxt->info = vsi->info;
3636 
3637 	ctxt->info.valid_sections =
3638 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3639 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3640 			    ICE_AQ_VSI_PROP_SW_VALID);
3641 
3642 	/* disable VLAN anti-spoof */
3643 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3644 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3645 
3646 	/* disable VLAN pruning and keep all other settings */
3647 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3648 
3649 	/* allow all VLANs on Tx and don't strip on Rx */
3650 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3651 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3652 
3653 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3654 	if (status) {
3655 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3656 			ice_stat_str(status),
3657 			ice_aq_str(hw->adminq.sq_last_status));
3658 	} else {
3659 		vsi->info.sec_flags = ctxt->info.sec_flags;
3660 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3661 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3662 	}
3663 
3664 	kfree(ctxt);
3665 }
3666 
3667 /**
3668  * ice_log_pkg_init - log result of DDP package load
3669  * @hw: pointer to hardware info
3670  * @status: status of package load
3671  */
3672 static void
3673 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3674 {
3675 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3676 	struct device *dev = ice_pf_to_dev(pf);
3677 
3678 	switch (*status) {
3679 	case ICE_SUCCESS:
3680 		/* The package download AdminQ command returned success because
3681 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3682 		 * already a package loaded on the device.
3683 		 */
3684 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3685 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3686 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3687 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3688 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3689 			    sizeof(hw->pkg_name))) {
3690 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3691 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3692 					 hw->active_pkg_name,
3693 					 hw->active_pkg_ver.major,
3694 					 hw->active_pkg_ver.minor,
3695 					 hw->active_pkg_ver.update,
3696 					 hw->active_pkg_ver.draft);
3697 			else
3698 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3699 					 hw->active_pkg_name,
3700 					 hw->active_pkg_ver.major,
3701 					 hw->active_pkg_ver.minor,
3702 					 hw->active_pkg_ver.update,
3703 					 hw->active_pkg_ver.draft);
3704 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3705 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3706 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3707 				hw->active_pkg_name,
3708 				hw->active_pkg_ver.major,
3709 				hw->active_pkg_ver.minor,
3710 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3711 			*status = ICE_ERR_NOT_SUPPORTED;
3712 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3713 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3714 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3715 				 hw->active_pkg_name,
3716 				 hw->active_pkg_ver.major,
3717 				 hw->active_pkg_ver.minor,
3718 				 hw->active_pkg_ver.update,
3719 				 hw->active_pkg_ver.draft,
3720 				 hw->pkg_name,
3721 				 hw->pkg_ver.major,
3722 				 hw->pkg_ver.minor,
3723 				 hw->pkg_ver.update,
3724 				 hw->pkg_ver.draft);
3725 		} else {
3726 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3727 			*status = ICE_ERR_NOT_SUPPORTED;
3728 		}
3729 		break;
3730 	case ICE_ERR_FW_DDP_MISMATCH:
3731 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3732 		break;
3733 	case ICE_ERR_BUF_TOO_SHORT:
3734 	case ICE_ERR_CFG:
3735 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3736 		break;
3737 	case ICE_ERR_NOT_SUPPORTED:
3738 		/* Package File version not supported */
3739 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3740 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3741 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3742 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3743 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3744 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3745 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3746 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3747 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3748 		break;
3749 	case ICE_ERR_AQ_ERROR:
3750 		switch (hw->pkg_dwnld_status) {
3751 		case ICE_AQ_RC_ENOSEC:
3752 		case ICE_AQ_RC_EBADSIG:
3753 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3754 			return;
3755 		case ICE_AQ_RC_ESVN:
3756 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3757 			return;
3758 		case ICE_AQ_RC_EBADMAN:
3759 		case ICE_AQ_RC_EBADBUF:
3760 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3761 			/* poll for reset to complete */
3762 			if (ice_check_reset(hw))
3763 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3764 			return;
3765 		default:
3766 			break;
3767 		}
3768 		fallthrough;
3769 	default:
3770 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3771 			*status);
3772 		break;
3773 	}
3774 }
3775 
3776 /**
3777  * ice_load_pkg - load/reload the DDP Package file
3778  * @firmware: firmware structure when firmware requested or NULL for reload
3779  * @pf: pointer to the PF instance
3780  *
3781  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3782  * initialize HW tables.
3783  */
3784 static void
3785 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3786 {
3787 	enum ice_status status = ICE_ERR_PARAM;
3788 	struct device *dev = ice_pf_to_dev(pf);
3789 	struct ice_hw *hw = &pf->hw;
3790 
3791 	/* Load DDP Package */
3792 	if (firmware && !hw->pkg_copy) {
3793 		status = ice_copy_and_init_pkg(hw, firmware->data,
3794 					       firmware->size);
3795 		ice_log_pkg_init(hw, &status);
3796 	} else if (!firmware && hw->pkg_copy) {
3797 		/* Reload package during rebuild after CORER/GLOBR reset */
3798 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3799 		ice_log_pkg_init(hw, &status);
3800 	} else {
3801 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3802 	}
3803 
3804 	if (status) {
3805 		/* Safe Mode */
3806 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3807 		return;
3808 	}
3809 
3810 	/* Successful download package is the precondition for advanced
3811 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3812 	 */
3813 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3814 }
3815 
3816 /**
3817  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3818  * @pf: pointer to the PF structure
3819  *
3820  * There is no error returned here because the driver should be able to handle
3821  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3822  * specifically with Tx.
3823  */
3824 static void ice_verify_cacheline_size(struct ice_pf *pf)
3825 {
3826 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3827 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3828 			 ICE_CACHE_LINE_BYTES);
3829 }
3830 
3831 /**
3832  * ice_send_version - update firmware with driver version
3833  * @pf: PF struct
3834  *
3835  * Returns ICE_SUCCESS on success, else error code
3836  */
3837 static enum ice_status ice_send_version(struct ice_pf *pf)
3838 {
3839 	struct ice_driver_ver dv;
3840 
3841 	dv.major_ver = 0xff;
3842 	dv.minor_ver = 0xff;
3843 	dv.build_ver = 0xff;
3844 	dv.subbuild_ver = 0;
3845 	strscpy((char *)dv.driver_string, UTS_RELEASE,
3846 		sizeof(dv.driver_string));
3847 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3848 }
3849 
3850 /**
3851  * ice_init_fdir - Initialize flow director VSI and configuration
3852  * @pf: pointer to the PF instance
3853  *
3854  * returns 0 on success, negative on error
3855  */
3856 static int ice_init_fdir(struct ice_pf *pf)
3857 {
3858 	struct device *dev = ice_pf_to_dev(pf);
3859 	struct ice_vsi *ctrl_vsi;
3860 	int err;
3861 
3862 	/* Side Band Flow Director needs to have a control VSI.
3863 	 * Allocate it and store it in the PF.
3864 	 */
3865 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3866 	if (!ctrl_vsi) {
3867 		dev_dbg(dev, "could not create control VSI\n");
3868 		return -ENOMEM;
3869 	}
3870 
3871 	err = ice_vsi_open_ctrl(ctrl_vsi);
3872 	if (err) {
3873 		dev_dbg(dev, "could not open control VSI\n");
3874 		goto err_vsi_open;
3875 	}
3876 
3877 	mutex_init(&pf->hw.fdir_fltr_lock);
3878 
3879 	err = ice_fdir_create_dflt_rules(pf);
3880 	if (err)
3881 		goto err_fdir_rule;
3882 
3883 	return 0;
3884 
3885 err_fdir_rule:
3886 	ice_fdir_release_flows(&pf->hw);
3887 	ice_vsi_close(ctrl_vsi);
3888 err_vsi_open:
3889 	ice_vsi_release(ctrl_vsi);
3890 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3891 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
3892 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3893 	}
3894 	return err;
3895 }
3896 
3897 /**
3898  * ice_get_opt_fw_name - return optional firmware file name or NULL
3899  * @pf: pointer to the PF instance
3900  */
3901 static char *ice_get_opt_fw_name(struct ice_pf *pf)
3902 {
3903 	/* Optional firmware name same as default with additional dash
3904 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
3905 	 */
3906 	struct pci_dev *pdev = pf->pdev;
3907 	char *opt_fw_filename;
3908 	u64 dsn;
3909 
3910 	/* Determine the name of the optional file using the DSN (two
3911 	 * dwords following the start of the DSN Capability).
3912 	 */
3913 	dsn = pci_get_dsn(pdev);
3914 	if (!dsn)
3915 		return NULL;
3916 
3917 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3918 	if (!opt_fw_filename)
3919 		return NULL;
3920 
3921 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3922 		 ICE_DDP_PKG_PATH, dsn);
3923 
3924 	return opt_fw_filename;
3925 }
3926 
3927 /**
3928  * ice_request_fw - Device initialization routine
3929  * @pf: pointer to the PF instance
3930  */
3931 static void ice_request_fw(struct ice_pf *pf)
3932 {
3933 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
3934 	const struct firmware *firmware = NULL;
3935 	struct device *dev = ice_pf_to_dev(pf);
3936 	int err = 0;
3937 
3938 	/* optional device-specific DDP (if present) overrides the default DDP
3939 	 * package file. kernel logs a debug message if the file doesn't exist,
3940 	 * and warning messages for other errors.
3941 	 */
3942 	if (opt_fw_filename) {
3943 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
3944 		if (err) {
3945 			kfree(opt_fw_filename);
3946 			goto dflt_pkg_load;
3947 		}
3948 
3949 		/* request for firmware was successful. Download to device */
3950 		ice_load_pkg(firmware, pf);
3951 		kfree(opt_fw_filename);
3952 		release_firmware(firmware);
3953 		return;
3954 	}
3955 
3956 dflt_pkg_load:
3957 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
3958 	if (err) {
3959 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
3960 		return;
3961 	}
3962 
3963 	/* request for firmware was successful. Download to device */
3964 	ice_load_pkg(firmware, pf);
3965 	release_firmware(firmware);
3966 }
3967 
3968 /**
3969  * ice_print_wake_reason - show the wake up cause in the log
3970  * @pf: pointer to the PF struct
3971  */
3972 static void ice_print_wake_reason(struct ice_pf *pf)
3973 {
3974 	u32 wus = pf->wakeup_reason;
3975 	const char *wake_str;
3976 
3977 	/* if no wake event, nothing to print */
3978 	if (!wus)
3979 		return;
3980 
3981 	if (wus & PFPM_WUS_LNKC_M)
3982 		wake_str = "Link\n";
3983 	else if (wus & PFPM_WUS_MAG_M)
3984 		wake_str = "Magic Packet\n";
3985 	else if (wus & PFPM_WUS_MNG_M)
3986 		wake_str = "Management\n";
3987 	else if (wus & PFPM_WUS_FW_RST_WK_M)
3988 		wake_str = "Firmware Reset\n";
3989 	else
3990 		wake_str = "Unknown\n";
3991 
3992 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
3993 }
3994 
3995 /**
3996  * ice_probe - Device initialization routine
3997  * @pdev: PCI device information struct
3998  * @ent: entry in ice_pci_tbl
3999  *
4000  * Returns 0 on success, negative on failure
4001  */
4002 static int
4003 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4004 {
4005 	struct device *dev = &pdev->dev;
4006 	struct ice_pf *pf;
4007 	struct ice_hw *hw;
4008 	int i, err;
4009 
4010 	/* this driver uses devres, see
4011 	 * Documentation/driver-api/driver-model/devres.rst
4012 	 */
4013 	err = pcim_enable_device(pdev);
4014 	if (err)
4015 		return err;
4016 
4017 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
4018 	if (err) {
4019 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4020 		return err;
4021 	}
4022 
4023 	pf = ice_allocate_pf(dev);
4024 	if (!pf)
4025 		return -ENOMEM;
4026 
4027 	/* set up for high or low DMA */
4028 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4029 	if (err)
4030 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4031 	if (err) {
4032 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4033 		return err;
4034 	}
4035 
4036 	pci_enable_pcie_error_reporting(pdev);
4037 	pci_set_master(pdev);
4038 
4039 	pf->pdev = pdev;
4040 	pci_set_drvdata(pdev, pf);
4041 	set_bit(__ICE_DOWN, pf->state);
4042 	/* Disable service task until DOWN bit is cleared */
4043 	set_bit(__ICE_SERVICE_DIS, pf->state);
4044 
4045 	hw = &pf->hw;
4046 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4047 	pci_save_state(pdev);
4048 
4049 	hw->back = pf;
4050 	hw->vendor_id = pdev->vendor;
4051 	hw->device_id = pdev->device;
4052 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4053 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4054 	hw->subsystem_device_id = pdev->subsystem_device;
4055 	hw->bus.device = PCI_SLOT(pdev->devfn);
4056 	hw->bus.func = PCI_FUNC(pdev->devfn);
4057 	ice_set_ctrlq_len(hw);
4058 
4059 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4060 
4061 	err = ice_devlink_register(pf);
4062 	if (err) {
4063 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4064 		goto err_exit_unroll;
4065 	}
4066 
4067 #ifndef CONFIG_DYNAMIC_DEBUG
4068 	if (debug < -1)
4069 		hw->debug_mask = debug;
4070 #endif
4071 
4072 	err = ice_init_hw(hw);
4073 	if (err) {
4074 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4075 		err = -EIO;
4076 		goto err_exit_unroll;
4077 	}
4078 
4079 	ice_request_fw(pf);
4080 
4081 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4082 	 * set in pf->state, which will cause ice_is_safe_mode to return
4083 	 * true
4084 	 */
4085 	if (ice_is_safe_mode(pf)) {
4086 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4087 		/* we already got function/device capabilities but these don't
4088 		 * reflect what the driver needs to do in safe mode. Instead of
4089 		 * adding conditional logic everywhere to ignore these
4090 		 * device/function capabilities, override them.
4091 		 */
4092 		ice_set_safe_mode_caps(hw);
4093 	}
4094 
4095 	err = ice_init_pf(pf);
4096 	if (err) {
4097 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4098 		goto err_init_pf_unroll;
4099 	}
4100 
4101 	ice_devlink_init_regions(pf);
4102 
4103 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4104 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4105 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4106 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4107 	i = 0;
4108 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4109 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4110 			pf->hw.tnl.valid_count[TNL_VXLAN];
4111 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4112 			UDP_TUNNEL_TYPE_VXLAN;
4113 		i++;
4114 	}
4115 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4116 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4117 			pf->hw.tnl.valid_count[TNL_GENEVE];
4118 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4119 			UDP_TUNNEL_TYPE_GENEVE;
4120 		i++;
4121 	}
4122 
4123 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4124 	if (!pf->num_alloc_vsi) {
4125 		err = -EIO;
4126 		goto err_init_pf_unroll;
4127 	}
4128 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4129 		dev_warn(&pf->pdev->dev,
4130 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4131 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4132 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4133 	}
4134 
4135 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4136 			       GFP_KERNEL);
4137 	if (!pf->vsi) {
4138 		err = -ENOMEM;
4139 		goto err_init_pf_unroll;
4140 	}
4141 
4142 	err = ice_init_interrupt_scheme(pf);
4143 	if (err) {
4144 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4145 		err = -EIO;
4146 		goto err_init_vsi_unroll;
4147 	}
4148 
4149 	/* In case of MSIX we are going to setup the misc vector right here
4150 	 * to handle admin queue events etc. In case of legacy and MSI
4151 	 * the misc functionality and queue processing is combined in
4152 	 * the same vector and that gets setup at open.
4153 	 */
4154 	err = ice_req_irq_msix_misc(pf);
4155 	if (err) {
4156 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4157 		goto err_init_interrupt_unroll;
4158 	}
4159 
4160 	/* create switch struct for the switch element created by FW on boot */
4161 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4162 	if (!pf->first_sw) {
4163 		err = -ENOMEM;
4164 		goto err_msix_misc_unroll;
4165 	}
4166 
4167 	if (hw->evb_veb)
4168 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4169 	else
4170 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4171 
4172 	pf->first_sw->pf = pf;
4173 
4174 	/* record the sw_id available for later use */
4175 	pf->first_sw->sw_id = hw->port_info->sw_id;
4176 
4177 	err = ice_setup_pf_sw(pf);
4178 	if (err) {
4179 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4180 		goto err_alloc_sw_unroll;
4181 	}
4182 
4183 	clear_bit(__ICE_SERVICE_DIS, pf->state);
4184 
4185 	/* tell the firmware we are up */
4186 	err = ice_send_version(pf);
4187 	if (err) {
4188 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4189 			UTS_RELEASE, err);
4190 		goto err_send_version_unroll;
4191 	}
4192 
4193 	/* since everything is good, start the service timer */
4194 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4195 
4196 	err = ice_init_link_events(pf->hw.port_info);
4197 	if (err) {
4198 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4199 		goto err_send_version_unroll;
4200 	}
4201 
4202 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4203 	if (err) {
4204 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4205 		goto err_send_version_unroll;
4206 	}
4207 
4208 	err = ice_update_link_info(pf->hw.port_info);
4209 	if (err) {
4210 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4211 		goto err_send_version_unroll;
4212 	}
4213 
4214 	ice_init_link_dflt_override(pf->hw.port_info);
4215 
4216 	/* if media available, initialize PHY settings */
4217 	if (pf->hw.port_info->phy.link_info.link_info &
4218 	    ICE_AQ_MEDIA_AVAILABLE) {
4219 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4220 		if (err) {
4221 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4222 			goto err_send_version_unroll;
4223 		}
4224 
4225 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4226 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4227 
4228 			if (vsi)
4229 				ice_configure_phy(vsi);
4230 		}
4231 	} else {
4232 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4233 	}
4234 
4235 	ice_verify_cacheline_size(pf);
4236 
4237 	/* Save wakeup reason register for later use */
4238 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4239 
4240 	/* check for a power management event */
4241 	ice_print_wake_reason(pf);
4242 
4243 	/* clear wake status, all bits */
4244 	wr32(hw, PFPM_WUS, U32_MAX);
4245 
4246 	/* Disable WoL at init, wait for user to enable */
4247 	device_set_wakeup_enable(dev, false);
4248 
4249 	if (ice_is_safe_mode(pf)) {
4250 		ice_set_safe_mode_vlan_cfg(pf);
4251 		goto probe_done;
4252 	}
4253 
4254 	/* initialize DDP driven features */
4255 
4256 	/* Note: Flow director init failure is non-fatal to load */
4257 	if (ice_init_fdir(pf))
4258 		dev_err(dev, "could not initialize flow director\n");
4259 
4260 	/* Note: DCB init failure is non-fatal to load */
4261 	if (ice_init_pf_dcb(pf, false)) {
4262 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4263 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4264 	} else {
4265 		ice_cfg_lldp_mib_change(&pf->hw, true);
4266 	}
4267 
4268 	if (ice_init_lag(pf))
4269 		dev_warn(dev, "Failed to init link aggregation support\n");
4270 
4271 	/* print PCI link speed and width */
4272 	pcie_print_link_status(pf->pdev);
4273 
4274 probe_done:
4275 	/* ready to go, so clear down state bit */
4276 	clear_bit(__ICE_DOWN, pf->state);
4277 	return 0;
4278 
4279 err_send_version_unroll:
4280 	ice_vsi_release_all(pf);
4281 err_alloc_sw_unroll:
4282 	set_bit(__ICE_SERVICE_DIS, pf->state);
4283 	set_bit(__ICE_DOWN, pf->state);
4284 	devm_kfree(dev, pf->first_sw);
4285 err_msix_misc_unroll:
4286 	ice_free_irq_msix_misc(pf);
4287 err_init_interrupt_unroll:
4288 	ice_clear_interrupt_scheme(pf);
4289 err_init_vsi_unroll:
4290 	devm_kfree(dev, pf->vsi);
4291 err_init_pf_unroll:
4292 	ice_deinit_pf(pf);
4293 	ice_devlink_destroy_regions(pf);
4294 	ice_deinit_hw(hw);
4295 err_exit_unroll:
4296 	ice_devlink_unregister(pf);
4297 	pci_disable_pcie_error_reporting(pdev);
4298 	pci_disable_device(pdev);
4299 	return err;
4300 }
4301 
4302 /**
4303  * ice_set_wake - enable or disable Wake on LAN
4304  * @pf: pointer to the PF struct
4305  *
4306  * Simple helper for WoL control
4307  */
4308 static void ice_set_wake(struct ice_pf *pf)
4309 {
4310 	struct ice_hw *hw = &pf->hw;
4311 	bool wol = pf->wol_ena;
4312 
4313 	/* clear wake state, otherwise new wake events won't fire */
4314 	wr32(hw, PFPM_WUS, U32_MAX);
4315 
4316 	/* enable / disable APM wake up, no RMW needed */
4317 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4318 
4319 	/* set magic packet filter enabled */
4320 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4321 }
4322 
4323 /**
4324  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4325  * @pf: pointer to the PF struct
4326  *
4327  * Issue firmware command to enable multicast magic wake, making
4328  * sure that any locally administered address (LAA) is used for
4329  * wake, and that PF reset doesn't undo the LAA.
4330  */
4331 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4332 {
4333 	struct device *dev = ice_pf_to_dev(pf);
4334 	struct ice_hw *hw = &pf->hw;
4335 	enum ice_status status;
4336 	u8 mac_addr[ETH_ALEN];
4337 	struct ice_vsi *vsi;
4338 	u8 flags;
4339 
4340 	if (!pf->wol_ena)
4341 		return;
4342 
4343 	vsi = ice_get_main_vsi(pf);
4344 	if (!vsi)
4345 		return;
4346 
4347 	/* Get current MAC address in case it's an LAA */
4348 	if (vsi->netdev)
4349 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4350 	else
4351 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4352 
4353 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4354 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4355 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4356 
4357 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4358 	if (status)
4359 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4360 			ice_stat_str(status),
4361 			ice_aq_str(hw->adminq.sq_last_status));
4362 }
4363 
4364 /**
4365  * ice_remove - Device removal routine
4366  * @pdev: PCI device information struct
4367  */
4368 static void ice_remove(struct pci_dev *pdev)
4369 {
4370 	struct ice_pf *pf = pci_get_drvdata(pdev);
4371 	int i;
4372 
4373 	if (!pf)
4374 		return;
4375 
4376 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4377 		if (!ice_is_reset_in_progress(pf->state))
4378 			break;
4379 		msleep(100);
4380 	}
4381 
4382 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4383 		set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
4384 		ice_free_vfs(pf);
4385 	}
4386 
4387 	set_bit(__ICE_DOWN, pf->state);
4388 	ice_service_task_stop(pf);
4389 
4390 	ice_aq_cancel_waiting_tasks(pf);
4391 
4392 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4393 	ice_deinit_lag(pf);
4394 	if (!ice_is_safe_mode(pf))
4395 		ice_remove_arfs(pf);
4396 	ice_setup_mc_magic_wake(pf);
4397 	ice_vsi_release_all(pf);
4398 	ice_set_wake(pf);
4399 	ice_free_irq_msix_misc(pf);
4400 	ice_for_each_vsi(pf, i) {
4401 		if (!pf->vsi[i])
4402 			continue;
4403 		ice_vsi_free_q_vectors(pf->vsi[i]);
4404 	}
4405 	ice_deinit_pf(pf);
4406 	ice_devlink_destroy_regions(pf);
4407 	ice_deinit_hw(&pf->hw);
4408 	ice_devlink_unregister(pf);
4409 
4410 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4411 	 * do it via ice_schedule_reset() since there is no need to rebuild
4412 	 * and the service task is already stopped.
4413 	 */
4414 	ice_reset(&pf->hw, ICE_RESET_PFR);
4415 	pci_wait_for_pending_transaction(pdev);
4416 	ice_clear_interrupt_scheme(pf);
4417 	pci_disable_pcie_error_reporting(pdev);
4418 	pci_disable_device(pdev);
4419 }
4420 
4421 /**
4422  * ice_shutdown - PCI callback for shutting down device
4423  * @pdev: PCI device information struct
4424  */
4425 static void ice_shutdown(struct pci_dev *pdev)
4426 {
4427 	struct ice_pf *pf = pci_get_drvdata(pdev);
4428 
4429 	ice_remove(pdev);
4430 
4431 	if (system_state == SYSTEM_POWER_OFF) {
4432 		pci_wake_from_d3(pdev, pf->wol_ena);
4433 		pci_set_power_state(pdev, PCI_D3hot);
4434 	}
4435 }
4436 
4437 #ifdef CONFIG_PM
4438 /**
4439  * ice_prepare_for_shutdown - prep for PCI shutdown
4440  * @pf: board private structure
4441  *
4442  * Inform or close all dependent features in prep for PCI device shutdown
4443  */
4444 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4445 {
4446 	struct ice_hw *hw = &pf->hw;
4447 	u32 v;
4448 
4449 	/* Notify VFs of impending reset */
4450 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4451 		ice_vc_notify_reset(pf);
4452 
4453 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4454 
4455 	/* disable the VSIs and their queues that are not already DOWN */
4456 	ice_pf_dis_all_vsi(pf, false);
4457 
4458 	ice_for_each_vsi(pf, v)
4459 		if (pf->vsi[v])
4460 			pf->vsi[v]->vsi_num = 0;
4461 
4462 	ice_shutdown_all_ctrlq(hw);
4463 }
4464 
4465 /**
4466  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4467  * @pf: board private structure to reinitialize
4468  *
4469  * This routine reinitialize interrupt scheme that was cleared during
4470  * power management suspend callback.
4471  *
4472  * This should be called during resume routine to re-allocate the q_vectors
4473  * and reacquire interrupts.
4474  */
4475 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4476 {
4477 	struct device *dev = ice_pf_to_dev(pf);
4478 	int ret, v;
4479 
4480 	/* Since we clear MSIX flag during suspend, we need to
4481 	 * set it back during resume...
4482 	 */
4483 
4484 	ret = ice_init_interrupt_scheme(pf);
4485 	if (ret) {
4486 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4487 		return ret;
4488 	}
4489 
4490 	/* Remap vectors and rings, after successful re-init interrupts */
4491 	ice_for_each_vsi(pf, v) {
4492 		if (!pf->vsi[v])
4493 			continue;
4494 
4495 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4496 		if (ret)
4497 			goto err_reinit;
4498 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4499 	}
4500 
4501 	ret = ice_req_irq_msix_misc(pf);
4502 	if (ret) {
4503 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4504 			ret);
4505 		goto err_reinit;
4506 	}
4507 
4508 	return 0;
4509 
4510 err_reinit:
4511 	while (v--)
4512 		if (pf->vsi[v])
4513 			ice_vsi_free_q_vectors(pf->vsi[v]);
4514 
4515 	return ret;
4516 }
4517 
4518 /**
4519  * ice_suspend
4520  * @dev: generic device information structure
4521  *
4522  * Power Management callback to quiesce the device and prepare
4523  * for D3 transition.
4524  */
4525 static int __maybe_unused ice_suspend(struct device *dev)
4526 {
4527 	struct pci_dev *pdev = to_pci_dev(dev);
4528 	struct ice_pf *pf;
4529 	int disabled, v;
4530 
4531 	pf = pci_get_drvdata(pdev);
4532 
4533 	if (!ice_pf_state_is_nominal(pf)) {
4534 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4535 		return -EBUSY;
4536 	}
4537 
4538 	/* Stop watchdog tasks until resume completion.
4539 	 * Even though it is most likely that the service task is
4540 	 * disabled if the device is suspended or down, the service task's
4541 	 * state is controlled by a different state bit, and we should
4542 	 * store and honor whatever state that bit is in at this point.
4543 	 */
4544 	disabled = ice_service_task_stop(pf);
4545 
4546 	/* Already suspended?, then there is nothing to do */
4547 	if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
4548 		if (!disabled)
4549 			ice_service_task_restart(pf);
4550 		return 0;
4551 	}
4552 
4553 	if (test_bit(__ICE_DOWN, pf->state) ||
4554 	    ice_is_reset_in_progress(pf->state)) {
4555 		dev_err(dev, "can't suspend device in reset or already down\n");
4556 		if (!disabled)
4557 			ice_service_task_restart(pf);
4558 		return 0;
4559 	}
4560 
4561 	ice_setup_mc_magic_wake(pf);
4562 
4563 	ice_prepare_for_shutdown(pf);
4564 
4565 	ice_set_wake(pf);
4566 
4567 	/* Free vectors, clear the interrupt scheme and release IRQs
4568 	 * for proper hibernation, especially with large number of CPUs.
4569 	 * Otherwise hibernation might fail when mapping all the vectors back
4570 	 * to CPU0.
4571 	 */
4572 	ice_free_irq_msix_misc(pf);
4573 	ice_for_each_vsi(pf, v) {
4574 		if (!pf->vsi[v])
4575 			continue;
4576 		ice_vsi_free_q_vectors(pf->vsi[v]);
4577 	}
4578 	ice_clear_interrupt_scheme(pf);
4579 
4580 	pci_save_state(pdev);
4581 	pci_wake_from_d3(pdev, pf->wol_ena);
4582 	pci_set_power_state(pdev, PCI_D3hot);
4583 	return 0;
4584 }
4585 
4586 /**
4587  * ice_resume - PM callback for waking up from D3
4588  * @dev: generic device information structure
4589  */
4590 static int __maybe_unused ice_resume(struct device *dev)
4591 {
4592 	struct pci_dev *pdev = to_pci_dev(dev);
4593 	enum ice_reset_req reset_type;
4594 	struct ice_pf *pf;
4595 	struct ice_hw *hw;
4596 	int ret;
4597 
4598 	pci_set_power_state(pdev, PCI_D0);
4599 	pci_restore_state(pdev);
4600 	pci_save_state(pdev);
4601 
4602 	if (!pci_device_is_present(pdev))
4603 		return -ENODEV;
4604 
4605 	ret = pci_enable_device_mem(pdev);
4606 	if (ret) {
4607 		dev_err(dev, "Cannot enable device after suspend\n");
4608 		return ret;
4609 	}
4610 
4611 	pf = pci_get_drvdata(pdev);
4612 	hw = &pf->hw;
4613 
4614 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4615 	ice_print_wake_reason(pf);
4616 
4617 	/* We cleared the interrupt scheme when we suspended, so we need to
4618 	 * restore it now to resume device functionality.
4619 	 */
4620 	ret = ice_reinit_interrupt_scheme(pf);
4621 	if (ret)
4622 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4623 
4624 	clear_bit(__ICE_DOWN, pf->state);
4625 	/* Now perform PF reset and rebuild */
4626 	reset_type = ICE_RESET_PFR;
4627 	/* re-enable service task for reset, but allow reset to schedule it */
4628 	clear_bit(__ICE_SERVICE_DIS, pf->state);
4629 
4630 	if (ice_schedule_reset(pf, reset_type))
4631 		dev_err(dev, "Reset during resume failed.\n");
4632 
4633 	clear_bit(__ICE_SUSPENDED, pf->state);
4634 	ice_service_task_restart(pf);
4635 
4636 	/* Restart the service task */
4637 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4638 
4639 	return 0;
4640 }
4641 #endif /* CONFIG_PM */
4642 
4643 /**
4644  * ice_pci_err_detected - warning that PCI error has been detected
4645  * @pdev: PCI device information struct
4646  * @err: the type of PCI error
4647  *
4648  * Called to warn that something happened on the PCI bus and the error handling
4649  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4650  */
4651 static pci_ers_result_t
4652 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4653 {
4654 	struct ice_pf *pf = pci_get_drvdata(pdev);
4655 
4656 	if (!pf) {
4657 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4658 			__func__, err);
4659 		return PCI_ERS_RESULT_DISCONNECT;
4660 	}
4661 
4662 	if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4663 		ice_service_task_stop(pf);
4664 
4665 		if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4666 			set_bit(__ICE_PFR_REQ, pf->state);
4667 			ice_prepare_for_reset(pf);
4668 		}
4669 	}
4670 
4671 	return PCI_ERS_RESULT_NEED_RESET;
4672 }
4673 
4674 /**
4675  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4676  * @pdev: PCI device information struct
4677  *
4678  * Called to determine if the driver can recover from the PCI slot reset by
4679  * using a register read to determine if the device is recoverable.
4680  */
4681 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4682 {
4683 	struct ice_pf *pf = pci_get_drvdata(pdev);
4684 	pci_ers_result_t result;
4685 	int err;
4686 	u32 reg;
4687 
4688 	err = pci_enable_device_mem(pdev);
4689 	if (err) {
4690 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4691 			err);
4692 		result = PCI_ERS_RESULT_DISCONNECT;
4693 	} else {
4694 		pci_set_master(pdev);
4695 		pci_restore_state(pdev);
4696 		pci_save_state(pdev);
4697 		pci_wake_from_d3(pdev, false);
4698 
4699 		/* Check for life */
4700 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4701 		if (!reg)
4702 			result = PCI_ERS_RESULT_RECOVERED;
4703 		else
4704 			result = PCI_ERS_RESULT_DISCONNECT;
4705 	}
4706 
4707 	err = pci_aer_clear_nonfatal_status(pdev);
4708 	if (err)
4709 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4710 			err);
4711 		/* non-fatal, continue */
4712 
4713 	return result;
4714 }
4715 
4716 /**
4717  * ice_pci_err_resume - restart operations after PCI error recovery
4718  * @pdev: PCI device information struct
4719  *
4720  * Called to allow the driver to bring things back up after PCI error and/or
4721  * reset recovery have finished
4722  */
4723 static void ice_pci_err_resume(struct pci_dev *pdev)
4724 {
4725 	struct ice_pf *pf = pci_get_drvdata(pdev);
4726 
4727 	if (!pf) {
4728 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4729 			__func__);
4730 		return;
4731 	}
4732 
4733 	if (test_bit(__ICE_SUSPENDED, pf->state)) {
4734 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4735 			__func__);
4736 		return;
4737 	}
4738 
4739 	ice_restore_all_vfs_msi_state(pdev);
4740 
4741 	ice_do_reset(pf, ICE_RESET_PFR);
4742 	ice_service_task_restart(pf);
4743 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4744 }
4745 
4746 /**
4747  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4748  * @pdev: PCI device information struct
4749  */
4750 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4751 {
4752 	struct ice_pf *pf = pci_get_drvdata(pdev);
4753 
4754 	if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4755 		ice_service_task_stop(pf);
4756 
4757 		if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4758 			set_bit(__ICE_PFR_REQ, pf->state);
4759 			ice_prepare_for_reset(pf);
4760 		}
4761 	}
4762 }
4763 
4764 /**
4765  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4766  * @pdev: PCI device information struct
4767  */
4768 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4769 {
4770 	ice_pci_err_resume(pdev);
4771 }
4772 
4773 /* ice_pci_tbl - PCI Device ID Table
4774  *
4775  * Wildcard entries (PCI_ANY_ID) should come last
4776  * Last entry must be all 0s
4777  *
4778  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4779  *   Class, Class Mask, private data (not used) }
4780  */
4781 static const struct pci_device_id ice_pci_tbl[] = {
4782 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4783 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4784 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4785 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4786 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4787 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4788 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4789 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4790 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4791 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4792 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4793 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4794 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4795 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4796 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4797 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4798 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4799 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4800 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4801 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4802 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4803 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4804 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4805 	/* required last entry */
4806 	{ 0, }
4807 };
4808 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4809 
4810 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4811 
4812 static const struct pci_error_handlers ice_pci_err_handler = {
4813 	.error_detected = ice_pci_err_detected,
4814 	.slot_reset = ice_pci_err_slot_reset,
4815 	.reset_prepare = ice_pci_err_reset_prepare,
4816 	.reset_done = ice_pci_err_reset_done,
4817 	.resume = ice_pci_err_resume
4818 };
4819 
4820 static struct pci_driver ice_driver = {
4821 	.name = KBUILD_MODNAME,
4822 	.id_table = ice_pci_tbl,
4823 	.probe = ice_probe,
4824 	.remove = ice_remove,
4825 #ifdef CONFIG_PM
4826 	.driver.pm = &ice_pm_ops,
4827 #endif /* CONFIG_PM */
4828 	.shutdown = ice_shutdown,
4829 	.sriov_configure = ice_sriov_configure,
4830 	.err_handler = &ice_pci_err_handler
4831 };
4832 
4833 /**
4834  * ice_module_init - Driver registration routine
4835  *
4836  * ice_module_init is the first routine called when the driver is
4837  * loaded. All it does is register with the PCI subsystem.
4838  */
4839 static int __init ice_module_init(void)
4840 {
4841 	int status;
4842 
4843 	pr_info("%s\n", ice_driver_string);
4844 	pr_info("%s\n", ice_copyright);
4845 
4846 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
4847 	if (!ice_wq) {
4848 		pr_err("Failed to create workqueue\n");
4849 		return -ENOMEM;
4850 	}
4851 
4852 	status = pci_register_driver(&ice_driver);
4853 	if (status) {
4854 		pr_err("failed to register PCI driver, err %d\n", status);
4855 		destroy_workqueue(ice_wq);
4856 	}
4857 
4858 	return status;
4859 }
4860 module_init(ice_module_init);
4861 
4862 /**
4863  * ice_module_exit - Driver exit cleanup routine
4864  *
4865  * ice_module_exit is called just before the driver is removed
4866  * from memory.
4867  */
4868 static void __exit ice_module_exit(void)
4869 {
4870 	pci_unregister_driver(&ice_driver);
4871 	destroy_workqueue(ice_wq);
4872 	pr_info("module unloaded\n");
4873 }
4874 module_exit(ice_module_exit);
4875 
4876 /**
4877  * ice_set_mac_address - NDO callback to set MAC address
4878  * @netdev: network interface device structure
4879  * @pi: pointer to an address structure
4880  *
4881  * Returns 0 on success, negative on failure
4882  */
4883 static int ice_set_mac_address(struct net_device *netdev, void *pi)
4884 {
4885 	struct ice_netdev_priv *np = netdev_priv(netdev);
4886 	struct ice_vsi *vsi = np->vsi;
4887 	struct ice_pf *pf = vsi->back;
4888 	struct ice_hw *hw = &pf->hw;
4889 	struct sockaddr *addr = pi;
4890 	enum ice_status status;
4891 	u8 flags = 0;
4892 	int err = 0;
4893 	u8 *mac;
4894 
4895 	mac = (u8 *)addr->sa_data;
4896 
4897 	if (!is_valid_ether_addr(mac))
4898 		return -EADDRNOTAVAIL;
4899 
4900 	if (ether_addr_equal(netdev->dev_addr, mac)) {
4901 		netdev_warn(netdev, "already using mac %pM\n", mac);
4902 		return 0;
4903 	}
4904 
4905 	if (test_bit(__ICE_DOWN, pf->state) ||
4906 	    ice_is_reset_in_progress(pf->state)) {
4907 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
4908 			   mac);
4909 		return -EBUSY;
4910 	}
4911 
4912 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
4913 	status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
4914 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4915 		err = -EADDRNOTAVAIL;
4916 		goto err_update_filters;
4917 	}
4918 
4919 	/* Add filter for new MAC. If filter exists, return success */
4920 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
4921 	if (status == ICE_ERR_ALREADY_EXISTS) {
4922 		/* Although this MAC filter is already present in hardware it's
4923 		 * possible in some cases (e.g. bonding) that dev_addr was
4924 		 * modified outside of the driver and needs to be restored back
4925 		 * to this value.
4926 		 */
4927 		memcpy(netdev->dev_addr, mac, netdev->addr_len);
4928 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
4929 		return 0;
4930 	}
4931 
4932 	/* error if the new filter addition failed */
4933 	if (status)
4934 		err = -EADDRNOTAVAIL;
4935 
4936 err_update_filters:
4937 	if (err) {
4938 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4939 			   mac);
4940 		return err;
4941 	}
4942 
4943 	/* change the netdev's MAC address */
4944 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
4945 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4946 		   netdev->dev_addr);
4947 
4948 	/* write new MAC address to the firmware */
4949 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4950 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
4951 	if (status) {
4952 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
4953 			   mac, ice_stat_str(status));
4954 	}
4955 	return 0;
4956 }
4957 
4958 /**
4959  * ice_set_rx_mode - NDO callback to set the netdev filters
4960  * @netdev: network interface device structure
4961  */
4962 static void ice_set_rx_mode(struct net_device *netdev)
4963 {
4964 	struct ice_netdev_priv *np = netdev_priv(netdev);
4965 	struct ice_vsi *vsi = np->vsi;
4966 
4967 	if (!vsi)
4968 		return;
4969 
4970 	/* Set the flags to synchronize filters
4971 	 * ndo_set_rx_mode may be triggered even without a change in netdev
4972 	 * flags
4973 	 */
4974 	set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
4975 	set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
4976 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
4977 
4978 	/* schedule our worker thread which will take care of
4979 	 * applying the new filter changes
4980 	 */
4981 	ice_service_task_schedule(vsi->back);
4982 }
4983 
4984 /**
4985  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
4986  * @netdev: network interface device structure
4987  * @queue_index: Queue ID
4988  * @maxrate: maximum bandwidth in Mbps
4989  */
4990 static int
4991 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
4992 {
4993 	struct ice_netdev_priv *np = netdev_priv(netdev);
4994 	struct ice_vsi *vsi = np->vsi;
4995 	enum ice_status status;
4996 	u16 q_handle;
4997 	u8 tc;
4998 
4999 	/* Validate maxrate requested is within permitted range */
5000 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5001 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5002 			   maxrate, queue_index);
5003 		return -EINVAL;
5004 	}
5005 
5006 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5007 	tc = ice_dcb_get_tc(vsi, queue_index);
5008 
5009 	/* Set BW back to default, when user set maxrate to 0 */
5010 	if (!maxrate)
5011 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5012 					       q_handle, ICE_MAX_BW);
5013 	else
5014 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5015 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5016 	if (status) {
5017 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5018 			   ice_stat_str(status));
5019 		return -EIO;
5020 	}
5021 
5022 	return 0;
5023 }
5024 
5025 /**
5026  * ice_fdb_add - add an entry to the hardware database
5027  * @ndm: the input from the stack
5028  * @tb: pointer to array of nladdr (unused)
5029  * @dev: the net device pointer
5030  * @addr: the MAC address entry being added
5031  * @vid: VLAN ID
5032  * @flags: instructions from stack about fdb operation
5033  * @extack: netlink extended ack
5034  */
5035 static int
5036 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5037 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5038 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5039 {
5040 	int err;
5041 
5042 	if (vid) {
5043 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5044 		return -EINVAL;
5045 	}
5046 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5047 		netdev_err(dev, "FDB only supports static addresses\n");
5048 		return -EINVAL;
5049 	}
5050 
5051 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5052 		err = dev_uc_add_excl(dev, addr);
5053 	else if (is_multicast_ether_addr(addr))
5054 		err = dev_mc_add_excl(dev, addr);
5055 	else
5056 		err = -EINVAL;
5057 
5058 	/* Only return duplicate errors if NLM_F_EXCL is set */
5059 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5060 		err = 0;
5061 
5062 	return err;
5063 }
5064 
5065 /**
5066  * ice_fdb_del - delete an entry from the hardware database
5067  * @ndm: the input from the stack
5068  * @tb: pointer to array of nladdr (unused)
5069  * @dev: the net device pointer
5070  * @addr: the MAC address entry being added
5071  * @vid: VLAN ID
5072  */
5073 static int
5074 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5075 	    struct net_device *dev, const unsigned char *addr,
5076 	    __always_unused u16 vid)
5077 {
5078 	int err;
5079 
5080 	if (ndm->ndm_state & NUD_PERMANENT) {
5081 		netdev_err(dev, "FDB only supports static addresses\n");
5082 		return -EINVAL;
5083 	}
5084 
5085 	if (is_unicast_ether_addr(addr))
5086 		err = dev_uc_del(dev, addr);
5087 	else if (is_multicast_ether_addr(addr))
5088 		err = dev_mc_del(dev, addr);
5089 	else
5090 		err = -EINVAL;
5091 
5092 	return err;
5093 }
5094 
5095 /**
5096  * ice_set_features - set the netdev feature flags
5097  * @netdev: ptr to the netdev being adjusted
5098  * @features: the feature set that the stack is suggesting
5099  */
5100 static int
5101 ice_set_features(struct net_device *netdev, netdev_features_t features)
5102 {
5103 	struct ice_netdev_priv *np = netdev_priv(netdev);
5104 	struct ice_vsi *vsi = np->vsi;
5105 	struct ice_pf *pf = vsi->back;
5106 	int ret = 0;
5107 
5108 	/* Don't set any netdev advanced features with device in Safe Mode */
5109 	if (ice_is_safe_mode(vsi->back)) {
5110 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5111 		return ret;
5112 	}
5113 
5114 	/* Do not change setting during reset */
5115 	if (ice_is_reset_in_progress(pf->state)) {
5116 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5117 		return -EBUSY;
5118 	}
5119 
5120 	/* Multiple features can be changed in one call so keep features in
5121 	 * separate if/else statements to guarantee each feature is checked
5122 	 */
5123 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5124 		ret = ice_vsi_manage_rss_lut(vsi, true);
5125 	else if (!(features & NETIF_F_RXHASH) &&
5126 		 netdev->features & NETIF_F_RXHASH)
5127 		ret = ice_vsi_manage_rss_lut(vsi, false);
5128 
5129 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5130 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5131 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5132 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5133 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5134 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5135 
5136 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5137 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5138 		ret = ice_vsi_manage_vlan_insertion(vsi);
5139 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5140 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5141 		ret = ice_vsi_manage_vlan_insertion(vsi);
5142 
5143 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5144 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5145 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5146 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5147 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5148 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5149 
5150 	if ((features & NETIF_F_NTUPLE) &&
5151 	    !(netdev->features & NETIF_F_NTUPLE)) {
5152 		ice_vsi_manage_fdir(vsi, true);
5153 		ice_init_arfs(vsi);
5154 	} else if (!(features & NETIF_F_NTUPLE) &&
5155 		 (netdev->features & NETIF_F_NTUPLE)) {
5156 		ice_vsi_manage_fdir(vsi, false);
5157 		ice_clear_arfs(vsi);
5158 	}
5159 
5160 	return ret;
5161 }
5162 
5163 /**
5164  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5165  * @vsi: VSI to setup VLAN properties for
5166  */
5167 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5168 {
5169 	int ret = 0;
5170 
5171 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5172 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5173 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5174 		ret = ice_vsi_manage_vlan_insertion(vsi);
5175 
5176 	return ret;
5177 }
5178 
5179 /**
5180  * ice_vsi_cfg - Setup the VSI
5181  * @vsi: the VSI being configured
5182  *
5183  * Return 0 on success and negative value on error
5184  */
5185 int ice_vsi_cfg(struct ice_vsi *vsi)
5186 {
5187 	int err;
5188 
5189 	if (vsi->netdev) {
5190 		ice_set_rx_mode(vsi->netdev);
5191 
5192 		err = ice_vsi_vlan_setup(vsi);
5193 
5194 		if (err)
5195 			return err;
5196 	}
5197 	ice_vsi_cfg_dcb_rings(vsi);
5198 
5199 	err = ice_vsi_cfg_lan_txqs(vsi);
5200 	if (!err && ice_is_xdp_ena_vsi(vsi))
5201 		err = ice_vsi_cfg_xdp_txqs(vsi);
5202 	if (!err)
5203 		err = ice_vsi_cfg_rxqs(vsi);
5204 
5205 	return err;
5206 }
5207 
5208 /**
5209  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5210  * @vsi: the VSI being configured
5211  */
5212 static void ice_napi_enable_all(struct ice_vsi *vsi)
5213 {
5214 	int q_idx;
5215 
5216 	if (!vsi->netdev)
5217 		return;
5218 
5219 	ice_for_each_q_vector(vsi, q_idx) {
5220 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5221 
5222 		if (q_vector->rx.ring || q_vector->tx.ring)
5223 			napi_enable(&q_vector->napi);
5224 	}
5225 }
5226 
5227 /**
5228  * ice_up_complete - Finish the last steps of bringing up a connection
5229  * @vsi: The VSI being configured
5230  *
5231  * Return 0 on success and negative value on error
5232  */
5233 static int ice_up_complete(struct ice_vsi *vsi)
5234 {
5235 	struct ice_pf *pf = vsi->back;
5236 	int err;
5237 
5238 	ice_vsi_cfg_msix(vsi);
5239 
5240 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5241 	 * Tx queue group list was configured and the context bits were
5242 	 * programmed using ice_vsi_cfg_txqs
5243 	 */
5244 	err = ice_vsi_start_all_rx_rings(vsi);
5245 	if (err)
5246 		return err;
5247 
5248 	clear_bit(__ICE_DOWN, vsi->state);
5249 	ice_napi_enable_all(vsi);
5250 	ice_vsi_ena_irq(vsi);
5251 
5252 	if (vsi->port_info &&
5253 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5254 	    vsi->netdev) {
5255 		ice_print_link_msg(vsi, true);
5256 		netif_tx_start_all_queues(vsi->netdev);
5257 		netif_carrier_on(vsi->netdev);
5258 	}
5259 
5260 	ice_service_task_schedule(pf);
5261 
5262 	return 0;
5263 }
5264 
5265 /**
5266  * ice_up - Bring the connection back up after being down
5267  * @vsi: VSI being configured
5268  */
5269 int ice_up(struct ice_vsi *vsi)
5270 {
5271 	int err;
5272 
5273 	err = ice_vsi_cfg(vsi);
5274 	if (!err)
5275 		err = ice_up_complete(vsi);
5276 
5277 	return err;
5278 }
5279 
5280 /**
5281  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5282  * @ring: Tx or Rx ring to read stats from
5283  * @pkts: packets stats counter
5284  * @bytes: bytes stats counter
5285  *
5286  * This function fetches stats from the ring considering the atomic operations
5287  * that needs to be performed to read u64 values in 32 bit machine.
5288  */
5289 static void
5290 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5291 {
5292 	unsigned int start;
5293 	*pkts = 0;
5294 	*bytes = 0;
5295 
5296 	if (!ring)
5297 		return;
5298 	do {
5299 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5300 		*pkts = ring->stats.pkts;
5301 		*bytes = ring->stats.bytes;
5302 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5303 }
5304 
5305 /**
5306  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5307  * @vsi: the VSI to be updated
5308  * @rings: rings to work on
5309  * @count: number of rings
5310  */
5311 static void
5312 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5313 			     u16 count)
5314 {
5315 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5316 	u16 i;
5317 
5318 	for (i = 0; i < count; i++) {
5319 		struct ice_ring *ring;
5320 		u64 pkts, bytes;
5321 
5322 		ring = READ_ONCE(rings[i]);
5323 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5324 		vsi_stats->tx_packets += pkts;
5325 		vsi_stats->tx_bytes += bytes;
5326 		vsi->tx_restart += ring->tx_stats.restart_q;
5327 		vsi->tx_busy += ring->tx_stats.tx_busy;
5328 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5329 	}
5330 }
5331 
5332 /**
5333  * ice_update_vsi_ring_stats - Update VSI stats counters
5334  * @vsi: the VSI to be updated
5335  */
5336 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5337 {
5338 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5339 	struct ice_ring *ring;
5340 	u64 pkts, bytes;
5341 	int i;
5342 
5343 	/* reset netdev stats */
5344 	vsi_stats->tx_packets = 0;
5345 	vsi_stats->tx_bytes = 0;
5346 	vsi_stats->rx_packets = 0;
5347 	vsi_stats->rx_bytes = 0;
5348 
5349 	/* reset non-netdev (extended) stats */
5350 	vsi->tx_restart = 0;
5351 	vsi->tx_busy = 0;
5352 	vsi->tx_linearize = 0;
5353 	vsi->rx_buf_failed = 0;
5354 	vsi->rx_page_failed = 0;
5355 	vsi->rx_gro_dropped = 0;
5356 
5357 	rcu_read_lock();
5358 
5359 	/* update Tx rings counters */
5360 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5361 
5362 	/* update Rx rings counters */
5363 	ice_for_each_rxq(vsi, i) {
5364 		ring = READ_ONCE(vsi->rx_rings[i]);
5365 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5366 		vsi_stats->rx_packets += pkts;
5367 		vsi_stats->rx_bytes += bytes;
5368 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5369 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5370 		vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
5371 	}
5372 
5373 	/* update XDP Tx rings counters */
5374 	if (ice_is_xdp_ena_vsi(vsi))
5375 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5376 					     vsi->num_xdp_txq);
5377 
5378 	rcu_read_unlock();
5379 }
5380 
5381 /**
5382  * ice_update_vsi_stats - Update VSI stats counters
5383  * @vsi: the VSI to be updated
5384  */
5385 void ice_update_vsi_stats(struct ice_vsi *vsi)
5386 {
5387 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5388 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5389 	struct ice_pf *pf = vsi->back;
5390 
5391 	if (test_bit(__ICE_DOWN, vsi->state) ||
5392 	    test_bit(__ICE_CFG_BUSY, pf->state))
5393 		return;
5394 
5395 	/* get stats as recorded by Tx/Rx rings */
5396 	ice_update_vsi_ring_stats(vsi);
5397 
5398 	/* get VSI stats as recorded by the hardware */
5399 	ice_update_eth_stats(vsi);
5400 
5401 	cur_ns->tx_errors = cur_es->tx_errors;
5402 	cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
5403 	cur_ns->tx_dropped = cur_es->tx_discards;
5404 	cur_ns->multicast = cur_es->rx_multicast;
5405 
5406 	/* update some more netdev stats if this is main VSI */
5407 	if (vsi->type == ICE_VSI_PF) {
5408 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5409 		cur_ns->rx_errors = pf->stats.crc_errors +
5410 				    pf->stats.illegal_bytes +
5411 				    pf->stats.rx_len_errors +
5412 				    pf->stats.rx_undersize +
5413 				    pf->hw_csum_rx_error +
5414 				    pf->stats.rx_jabber +
5415 				    pf->stats.rx_fragments +
5416 				    pf->stats.rx_oversize;
5417 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5418 		/* record drops from the port level */
5419 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5420 	}
5421 }
5422 
5423 /**
5424  * ice_update_pf_stats - Update PF port stats counters
5425  * @pf: PF whose stats needs to be updated
5426  */
5427 void ice_update_pf_stats(struct ice_pf *pf)
5428 {
5429 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5430 	struct ice_hw *hw = &pf->hw;
5431 	u16 fd_ctr_base;
5432 	u8 port;
5433 
5434 	port = hw->port_info->lport;
5435 	prev_ps = &pf->stats_prev;
5436 	cur_ps = &pf->stats;
5437 
5438 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5439 			  &prev_ps->eth.rx_bytes,
5440 			  &cur_ps->eth.rx_bytes);
5441 
5442 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5443 			  &prev_ps->eth.rx_unicast,
5444 			  &cur_ps->eth.rx_unicast);
5445 
5446 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5447 			  &prev_ps->eth.rx_multicast,
5448 			  &cur_ps->eth.rx_multicast);
5449 
5450 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5451 			  &prev_ps->eth.rx_broadcast,
5452 			  &cur_ps->eth.rx_broadcast);
5453 
5454 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5455 			  &prev_ps->eth.rx_discards,
5456 			  &cur_ps->eth.rx_discards);
5457 
5458 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5459 			  &prev_ps->eth.tx_bytes,
5460 			  &cur_ps->eth.tx_bytes);
5461 
5462 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5463 			  &prev_ps->eth.tx_unicast,
5464 			  &cur_ps->eth.tx_unicast);
5465 
5466 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5467 			  &prev_ps->eth.tx_multicast,
5468 			  &cur_ps->eth.tx_multicast);
5469 
5470 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5471 			  &prev_ps->eth.tx_broadcast,
5472 			  &cur_ps->eth.tx_broadcast);
5473 
5474 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5475 			  &prev_ps->tx_dropped_link_down,
5476 			  &cur_ps->tx_dropped_link_down);
5477 
5478 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5479 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5480 
5481 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5482 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5483 
5484 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5485 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5486 
5487 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5488 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5489 
5490 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5491 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5492 
5493 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5494 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5495 
5496 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5497 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5498 
5499 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5500 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5501 
5502 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5503 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5504 
5505 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5506 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5507 
5508 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5509 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5510 
5511 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5512 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5513 
5514 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5515 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5516 
5517 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5518 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5519 
5520 	fd_ctr_base = hw->fd_ctr_base;
5521 
5522 	ice_stat_update40(hw,
5523 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5524 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5525 			  &cur_ps->fd_sb_match);
5526 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5527 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5528 
5529 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5530 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5531 
5532 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5533 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5534 
5535 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5536 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5537 
5538 	ice_update_dcb_stats(pf);
5539 
5540 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5541 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5542 
5543 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5544 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5545 
5546 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5547 			  &prev_ps->mac_local_faults,
5548 			  &cur_ps->mac_local_faults);
5549 
5550 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5551 			  &prev_ps->mac_remote_faults,
5552 			  &cur_ps->mac_remote_faults);
5553 
5554 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5555 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5556 
5557 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5558 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5559 
5560 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5561 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5562 
5563 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5564 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5565 
5566 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5567 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5568 
5569 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5570 
5571 	pf->stat_prev_loaded = true;
5572 }
5573 
5574 /**
5575  * ice_get_stats64 - get statistics for network device structure
5576  * @netdev: network interface device structure
5577  * @stats: main device statistics structure
5578  */
5579 static
5580 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5581 {
5582 	struct ice_netdev_priv *np = netdev_priv(netdev);
5583 	struct rtnl_link_stats64 *vsi_stats;
5584 	struct ice_vsi *vsi = np->vsi;
5585 
5586 	vsi_stats = &vsi->net_stats;
5587 
5588 	if (!vsi->num_txq || !vsi->num_rxq)
5589 		return;
5590 
5591 	/* netdev packet/byte stats come from ring counter. These are obtained
5592 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5593 	 * But, only call the update routine and read the registers if VSI is
5594 	 * not down.
5595 	 */
5596 	if (!test_bit(__ICE_DOWN, vsi->state))
5597 		ice_update_vsi_ring_stats(vsi);
5598 	stats->tx_packets = vsi_stats->tx_packets;
5599 	stats->tx_bytes = vsi_stats->tx_bytes;
5600 	stats->rx_packets = vsi_stats->rx_packets;
5601 	stats->rx_bytes = vsi_stats->rx_bytes;
5602 
5603 	/* The rest of the stats can be read from the hardware but instead we
5604 	 * just return values that the watchdog task has already obtained from
5605 	 * the hardware.
5606 	 */
5607 	stats->multicast = vsi_stats->multicast;
5608 	stats->tx_errors = vsi_stats->tx_errors;
5609 	stats->tx_dropped = vsi_stats->tx_dropped;
5610 	stats->rx_errors = vsi_stats->rx_errors;
5611 	stats->rx_dropped = vsi_stats->rx_dropped;
5612 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5613 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5614 }
5615 
5616 /**
5617  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5618  * @vsi: VSI having NAPI disabled
5619  */
5620 static void ice_napi_disable_all(struct ice_vsi *vsi)
5621 {
5622 	int q_idx;
5623 
5624 	if (!vsi->netdev)
5625 		return;
5626 
5627 	ice_for_each_q_vector(vsi, q_idx) {
5628 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5629 
5630 		if (q_vector->rx.ring || q_vector->tx.ring)
5631 			napi_disable(&q_vector->napi);
5632 	}
5633 }
5634 
5635 /**
5636  * ice_down - Shutdown the connection
5637  * @vsi: The VSI being stopped
5638  */
5639 int ice_down(struct ice_vsi *vsi)
5640 {
5641 	int i, tx_err, rx_err, link_err = 0;
5642 
5643 	/* Caller of this function is expected to set the
5644 	 * vsi->state __ICE_DOWN bit
5645 	 */
5646 	if (vsi->netdev) {
5647 		netif_carrier_off(vsi->netdev);
5648 		netif_tx_disable(vsi->netdev);
5649 	}
5650 
5651 	ice_vsi_dis_irq(vsi);
5652 
5653 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5654 	if (tx_err)
5655 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5656 			   vsi->vsi_num, tx_err);
5657 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5658 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5659 		if (tx_err)
5660 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5661 				   vsi->vsi_num, tx_err);
5662 	}
5663 
5664 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
5665 	if (rx_err)
5666 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5667 			   vsi->vsi_num, rx_err);
5668 
5669 	ice_napi_disable_all(vsi);
5670 
5671 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5672 		link_err = ice_force_phys_link_state(vsi, false);
5673 		if (link_err)
5674 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5675 				   vsi->vsi_num, link_err);
5676 	}
5677 
5678 	ice_for_each_txq(vsi, i)
5679 		ice_clean_tx_ring(vsi->tx_rings[i]);
5680 
5681 	ice_for_each_rxq(vsi, i)
5682 		ice_clean_rx_ring(vsi->rx_rings[i]);
5683 
5684 	if (tx_err || rx_err || link_err) {
5685 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5686 			   vsi->vsi_num, vsi->vsw->sw_id);
5687 		return -EIO;
5688 	}
5689 
5690 	return 0;
5691 }
5692 
5693 /**
5694  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5695  * @vsi: VSI having resources allocated
5696  *
5697  * Return 0 on success, negative on failure
5698  */
5699 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5700 {
5701 	int i, err = 0;
5702 
5703 	if (!vsi->num_txq) {
5704 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5705 			vsi->vsi_num);
5706 		return -EINVAL;
5707 	}
5708 
5709 	ice_for_each_txq(vsi, i) {
5710 		struct ice_ring *ring = vsi->tx_rings[i];
5711 
5712 		if (!ring)
5713 			return -EINVAL;
5714 
5715 		ring->netdev = vsi->netdev;
5716 		err = ice_setup_tx_ring(ring);
5717 		if (err)
5718 			break;
5719 	}
5720 
5721 	return err;
5722 }
5723 
5724 /**
5725  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5726  * @vsi: VSI having resources allocated
5727  *
5728  * Return 0 on success, negative on failure
5729  */
5730 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5731 {
5732 	int i, err = 0;
5733 
5734 	if (!vsi->num_rxq) {
5735 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5736 			vsi->vsi_num);
5737 		return -EINVAL;
5738 	}
5739 
5740 	ice_for_each_rxq(vsi, i) {
5741 		struct ice_ring *ring = vsi->rx_rings[i];
5742 
5743 		if (!ring)
5744 			return -EINVAL;
5745 
5746 		ring->netdev = vsi->netdev;
5747 		err = ice_setup_rx_ring(ring);
5748 		if (err)
5749 			break;
5750 	}
5751 
5752 	return err;
5753 }
5754 
5755 /**
5756  * ice_vsi_open_ctrl - open control VSI for use
5757  * @vsi: the VSI to open
5758  *
5759  * Initialization of the Control VSI
5760  *
5761  * Returns 0 on success, negative value on error
5762  */
5763 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5764 {
5765 	char int_name[ICE_INT_NAME_STR_LEN];
5766 	struct ice_pf *pf = vsi->back;
5767 	struct device *dev;
5768 	int err;
5769 
5770 	dev = ice_pf_to_dev(pf);
5771 	/* allocate descriptors */
5772 	err = ice_vsi_setup_tx_rings(vsi);
5773 	if (err)
5774 		goto err_setup_tx;
5775 
5776 	err = ice_vsi_setup_rx_rings(vsi);
5777 	if (err)
5778 		goto err_setup_rx;
5779 
5780 	err = ice_vsi_cfg(vsi);
5781 	if (err)
5782 		goto err_setup_rx;
5783 
5784 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
5785 		 dev_driver_string(dev), dev_name(dev));
5786 	err = ice_vsi_req_irq_msix(vsi, int_name);
5787 	if (err)
5788 		goto err_setup_rx;
5789 
5790 	ice_vsi_cfg_msix(vsi);
5791 
5792 	err = ice_vsi_start_all_rx_rings(vsi);
5793 	if (err)
5794 		goto err_up_complete;
5795 
5796 	clear_bit(__ICE_DOWN, vsi->state);
5797 	ice_vsi_ena_irq(vsi);
5798 
5799 	return 0;
5800 
5801 err_up_complete:
5802 	ice_down(vsi);
5803 err_setup_rx:
5804 	ice_vsi_free_rx_rings(vsi);
5805 err_setup_tx:
5806 	ice_vsi_free_tx_rings(vsi);
5807 
5808 	return err;
5809 }
5810 
5811 /**
5812  * ice_vsi_open - Called when a network interface is made active
5813  * @vsi: the VSI to open
5814  *
5815  * Initialization of the VSI
5816  *
5817  * Returns 0 on success, negative value on error
5818  */
5819 static int ice_vsi_open(struct ice_vsi *vsi)
5820 {
5821 	char int_name[ICE_INT_NAME_STR_LEN];
5822 	struct ice_pf *pf = vsi->back;
5823 	int err;
5824 
5825 	/* allocate descriptors */
5826 	err = ice_vsi_setup_tx_rings(vsi);
5827 	if (err)
5828 		goto err_setup_tx;
5829 
5830 	err = ice_vsi_setup_rx_rings(vsi);
5831 	if (err)
5832 		goto err_setup_rx;
5833 
5834 	err = ice_vsi_cfg(vsi);
5835 	if (err)
5836 		goto err_setup_rx;
5837 
5838 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5839 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
5840 	err = ice_vsi_req_irq_msix(vsi, int_name);
5841 	if (err)
5842 		goto err_setup_rx;
5843 
5844 	/* Notify the stack of the actual queue counts. */
5845 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5846 	if (err)
5847 		goto err_set_qs;
5848 
5849 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5850 	if (err)
5851 		goto err_set_qs;
5852 
5853 	err = ice_up_complete(vsi);
5854 	if (err)
5855 		goto err_up_complete;
5856 
5857 	return 0;
5858 
5859 err_up_complete:
5860 	ice_down(vsi);
5861 err_set_qs:
5862 	ice_vsi_free_irq(vsi);
5863 err_setup_rx:
5864 	ice_vsi_free_rx_rings(vsi);
5865 err_setup_tx:
5866 	ice_vsi_free_tx_rings(vsi);
5867 
5868 	return err;
5869 }
5870 
5871 /**
5872  * ice_vsi_release_all - Delete all VSIs
5873  * @pf: PF from which all VSIs are being removed
5874  */
5875 static void ice_vsi_release_all(struct ice_pf *pf)
5876 {
5877 	int err, i;
5878 
5879 	if (!pf->vsi)
5880 		return;
5881 
5882 	ice_for_each_vsi(pf, i) {
5883 		if (!pf->vsi[i])
5884 			continue;
5885 
5886 		err = ice_vsi_release(pf->vsi[i]);
5887 		if (err)
5888 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5889 				i, err, pf->vsi[i]->vsi_num);
5890 	}
5891 }
5892 
5893 /**
5894  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
5895  * @pf: pointer to the PF instance
5896  * @type: VSI type to rebuild
5897  *
5898  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
5899  */
5900 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
5901 {
5902 	struct device *dev = ice_pf_to_dev(pf);
5903 	enum ice_status status;
5904 	int i, err;
5905 
5906 	ice_for_each_vsi(pf, i) {
5907 		struct ice_vsi *vsi = pf->vsi[i];
5908 
5909 		if (!vsi || vsi->type != type)
5910 			continue;
5911 
5912 		/* rebuild the VSI */
5913 		err = ice_vsi_rebuild(vsi, true);
5914 		if (err) {
5915 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
5916 				err, vsi->idx, ice_vsi_type_str(type));
5917 			return err;
5918 		}
5919 
5920 		/* replay filters for the VSI */
5921 		status = ice_replay_vsi(&pf->hw, vsi->idx);
5922 		if (status) {
5923 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
5924 				ice_stat_str(status), vsi->idx,
5925 				ice_vsi_type_str(type));
5926 			return -EIO;
5927 		}
5928 
5929 		/* Re-map HW VSI number, using VSI handle that has been
5930 		 * previously validated in ice_replay_vsi() call above
5931 		 */
5932 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
5933 
5934 		/* enable the VSI */
5935 		err = ice_ena_vsi(vsi, false);
5936 		if (err) {
5937 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
5938 				err, vsi->idx, ice_vsi_type_str(type));
5939 			return err;
5940 		}
5941 
5942 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
5943 			 ice_vsi_type_str(type));
5944 	}
5945 
5946 	return 0;
5947 }
5948 
5949 /**
5950  * ice_update_pf_netdev_link - Update PF netdev link status
5951  * @pf: pointer to the PF instance
5952  */
5953 static void ice_update_pf_netdev_link(struct ice_pf *pf)
5954 {
5955 	bool link_up;
5956 	int i;
5957 
5958 	ice_for_each_vsi(pf, i) {
5959 		struct ice_vsi *vsi = pf->vsi[i];
5960 
5961 		if (!vsi || vsi->type != ICE_VSI_PF)
5962 			return;
5963 
5964 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
5965 		if (link_up) {
5966 			netif_carrier_on(pf->vsi[i]->netdev);
5967 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
5968 		} else {
5969 			netif_carrier_off(pf->vsi[i]->netdev);
5970 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
5971 		}
5972 	}
5973 }
5974 
5975 /**
5976  * ice_rebuild - rebuild after reset
5977  * @pf: PF to rebuild
5978  * @reset_type: type of reset
5979  *
5980  * Do not rebuild VF VSI in this flow because that is already handled via
5981  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
5982  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
5983  * to reset/rebuild all the VF VSI twice.
5984  */
5985 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
5986 {
5987 	struct device *dev = ice_pf_to_dev(pf);
5988 	struct ice_hw *hw = &pf->hw;
5989 	enum ice_status ret;
5990 	int err;
5991 
5992 	if (test_bit(__ICE_DOWN, pf->state))
5993 		goto clear_recovery;
5994 
5995 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
5996 
5997 	ret = ice_init_all_ctrlq(hw);
5998 	if (ret) {
5999 		dev_err(dev, "control queues init failed %s\n",
6000 			ice_stat_str(ret));
6001 		goto err_init_ctrlq;
6002 	}
6003 
6004 	/* if DDP was previously loaded successfully */
6005 	if (!ice_is_safe_mode(pf)) {
6006 		/* reload the SW DB of filter tables */
6007 		if (reset_type == ICE_RESET_PFR)
6008 			ice_fill_blk_tbls(hw);
6009 		else
6010 			/* Reload DDP Package after CORER/GLOBR reset */
6011 			ice_load_pkg(NULL, pf);
6012 	}
6013 
6014 	ret = ice_clear_pf_cfg(hw);
6015 	if (ret) {
6016 		dev_err(dev, "clear PF configuration failed %s\n",
6017 			ice_stat_str(ret));
6018 		goto err_init_ctrlq;
6019 	}
6020 
6021 	if (pf->first_sw->dflt_vsi_ena)
6022 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6023 	/* clear the default VSI configuration if it exists */
6024 	pf->first_sw->dflt_vsi = NULL;
6025 	pf->first_sw->dflt_vsi_ena = false;
6026 
6027 	ice_clear_pxe_mode(hw);
6028 
6029 	ret = ice_get_caps(hw);
6030 	if (ret) {
6031 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6032 		goto err_init_ctrlq;
6033 	}
6034 
6035 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6036 	if (ret) {
6037 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6038 		goto err_init_ctrlq;
6039 	}
6040 
6041 	err = ice_sched_init_port(hw->port_info);
6042 	if (err)
6043 		goto err_sched_init_port;
6044 
6045 	/* start misc vector */
6046 	err = ice_req_irq_msix_misc(pf);
6047 	if (err) {
6048 		dev_err(dev, "misc vector setup failed: %d\n", err);
6049 		goto err_sched_init_port;
6050 	}
6051 
6052 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6053 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6054 		if (!rd32(hw, PFQF_FD_SIZE)) {
6055 			u16 unused, guar, b_effort;
6056 
6057 			guar = hw->func_caps.fd_fltr_guar;
6058 			b_effort = hw->func_caps.fd_fltr_best_effort;
6059 
6060 			/* force guaranteed filter pool for PF */
6061 			ice_alloc_fd_guar_item(hw, &unused, guar);
6062 			/* force shared filter pool for PF */
6063 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6064 		}
6065 	}
6066 
6067 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6068 		ice_dcb_rebuild(pf);
6069 
6070 	/* rebuild PF VSI */
6071 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6072 	if (err) {
6073 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6074 		goto err_vsi_rebuild;
6075 	}
6076 
6077 	/* If Flow Director is active */
6078 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6079 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6080 		if (err) {
6081 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6082 			goto err_vsi_rebuild;
6083 		}
6084 
6085 		/* replay HW Flow Director recipes */
6086 		if (hw->fdir_prof)
6087 			ice_fdir_replay_flows(hw);
6088 
6089 		/* replay Flow Director filters */
6090 		ice_fdir_replay_fltrs(pf);
6091 
6092 		ice_rebuild_arfs(pf);
6093 	}
6094 
6095 	ice_update_pf_netdev_link(pf);
6096 
6097 	/* tell the firmware we are up */
6098 	ret = ice_send_version(pf);
6099 	if (ret) {
6100 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6101 			ice_stat_str(ret));
6102 		goto err_vsi_rebuild;
6103 	}
6104 
6105 	ice_replay_post(hw);
6106 
6107 	/* if we get here, reset flow is successful */
6108 	clear_bit(__ICE_RESET_FAILED, pf->state);
6109 	return;
6110 
6111 err_vsi_rebuild:
6112 err_sched_init_port:
6113 	ice_sched_cleanup_all(hw);
6114 err_init_ctrlq:
6115 	ice_shutdown_all_ctrlq(hw);
6116 	set_bit(__ICE_RESET_FAILED, pf->state);
6117 clear_recovery:
6118 	/* set this bit in PF state to control service task scheduling */
6119 	set_bit(__ICE_NEEDS_RESTART, pf->state);
6120 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6121 }
6122 
6123 /**
6124  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6125  * @vsi: Pointer to VSI structure
6126  */
6127 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6128 {
6129 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6130 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6131 	else
6132 		return ICE_RXBUF_3072;
6133 }
6134 
6135 /**
6136  * ice_change_mtu - NDO callback to change the MTU
6137  * @netdev: network interface device structure
6138  * @new_mtu: new value for maximum frame size
6139  *
6140  * Returns 0 on success, negative on failure
6141  */
6142 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6143 {
6144 	struct ice_netdev_priv *np = netdev_priv(netdev);
6145 	struct ice_vsi *vsi = np->vsi;
6146 	struct ice_pf *pf = vsi->back;
6147 	u8 count = 0;
6148 
6149 	if (new_mtu == (int)netdev->mtu) {
6150 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6151 		return 0;
6152 	}
6153 
6154 	if (ice_is_xdp_ena_vsi(vsi)) {
6155 		int frame_size = ice_max_xdp_frame_size(vsi);
6156 
6157 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6158 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6159 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6160 			return -EINVAL;
6161 		}
6162 	}
6163 
6164 	/* if a reset is in progress, wait for some time for it to complete */
6165 	do {
6166 		if (ice_is_reset_in_progress(pf->state)) {
6167 			count++;
6168 			usleep_range(1000, 2000);
6169 		} else {
6170 			break;
6171 		}
6172 
6173 	} while (count < 100);
6174 
6175 	if (count == 100) {
6176 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6177 		return -EBUSY;
6178 	}
6179 
6180 	netdev->mtu = (unsigned int)new_mtu;
6181 
6182 	/* if VSI is up, bring it down and then back up */
6183 	if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
6184 		int err;
6185 
6186 		err = ice_down(vsi);
6187 		if (err) {
6188 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6189 			return err;
6190 		}
6191 
6192 		err = ice_up(vsi);
6193 		if (err) {
6194 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6195 			return err;
6196 		}
6197 	}
6198 
6199 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6200 	return 0;
6201 }
6202 
6203 /**
6204  * ice_aq_str - convert AQ err code to a string
6205  * @aq_err: the AQ error code to convert
6206  */
6207 const char *ice_aq_str(enum ice_aq_err aq_err)
6208 {
6209 	switch (aq_err) {
6210 	case ICE_AQ_RC_OK:
6211 		return "OK";
6212 	case ICE_AQ_RC_EPERM:
6213 		return "ICE_AQ_RC_EPERM";
6214 	case ICE_AQ_RC_ENOENT:
6215 		return "ICE_AQ_RC_ENOENT";
6216 	case ICE_AQ_RC_ENOMEM:
6217 		return "ICE_AQ_RC_ENOMEM";
6218 	case ICE_AQ_RC_EBUSY:
6219 		return "ICE_AQ_RC_EBUSY";
6220 	case ICE_AQ_RC_EEXIST:
6221 		return "ICE_AQ_RC_EEXIST";
6222 	case ICE_AQ_RC_EINVAL:
6223 		return "ICE_AQ_RC_EINVAL";
6224 	case ICE_AQ_RC_ENOSPC:
6225 		return "ICE_AQ_RC_ENOSPC";
6226 	case ICE_AQ_RC_ENOSYS:
6227 		return "ICE_AQ_RC_ENOSYS";
6228 	case ICE_AQ_RC_EMODE:
6229 		return "ICE_AQ_RC_EMODE";
6230 	case ICE_AQ_RC_ENOSEC:
6231 		return "ICE_AQ_RC_ENOSEC";
6232 	case ICE_AQ_RC_EBADSIG:
6233 		return "ICE_AQ_RC_EBADSIG";
6234 	case ICE_AQ_RC_ESVN:
6235 		return "ICE_AQ_RC_ESVN";
6236 	case ICE_AQ_RC_EBADMAN:
6237 		return "ICE_AQ_RC_EBADMAN";
6238 	case ICE_AQ_RC_EBADBUF:
6239 		return "ICE_AQ_RC_EBADBUF";
6240 	}
6241 
6242 	return "ICE_AQ_RC_UNKNOWN";
6243 }
6244 
6245 /**
6246  * ice_stat_str - convert status err code to a string
6247  * @stat_err: the status error code to convert
6248  */
6249 const char *ice_stat_str(enum ice_status stat_err)
6250 {
6251 	switch (stat_err) {
6252 	case ICE_SUCCESS:
6253 		return "OK";
6254 	case ICE_ERR_PARAM:
6255 		return "ICE_ERR_PARAM";
6256 	case ICE_ERR_NOT_IMPL:
6257 		return "ICE_ERR_NOT_IMPL";
6258 	case ICE_ERR_NOT_READY:
6259 		return "ICE_ERR_NOT_READY";
6260 	case ICE_ERR_NOT_SUPPORTED:
6261 		return "ICE_ERR_NOT_SUPPORTED";
6262 	case ICE_ERR_BAD_PTR:
6263 		return "ICE_ERR_BAD_PTR";
6264 	case ICE_ERR_INVAL_SIZE:
6265 		return "ICE_ERR_INVAL_SIZE";
6266 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6267 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6268 	case ICE_ERR_RESET_FAILED:
6269 		return "ICE_ERR_RESET_FAILED";
6270 	case ICE_ERR_FW_API_VER:
6271 		return "ICE_ERR_FW_API_VER";
6272 	case ICE_ERR_NO_MEMORY:
6273 		return "ICE_ERR_NO_MEMORY";
6274 	case ICE_ERR_CFG:
6275 		return "ICE_ERR_CFG";
6276 	case ICE_ERR_OUT_OF_RANGE:
6277 		return "ICE_ERR_OUT_OF_RANGE";
6278 	case ICE_ERR_ALREADY_EXISTS:
6279 		return "ICE_ERR_ALREADY_EXISTS";
6280 	case ICE_ERR_NVM:
6281 		return "ICE_ERR_NVM";
6282 	case ICE_ERR_NVM_CHECKSUM:
6283 		return "ICE_ERR_NVM_CHECKSUM";
6284 	case ICE_ERR_BUF_TOO_SHORT:
6285 		return "ICE_ERR_BUF_TOO_SHORT";
6286 	case ICE_ERR_NVM_BLANK_MODE:
6287 		return "ICE_ERR_NVM_BLANK_MODE";
6288 	case ICE_ERR_IN_USE:
6289 		return "ICE_ERR_IN_USE";
6290 	case ICE_ERR_MAX_LIMIT:
6291 		return "ICE_ERR_MAX_LIMIT";
6292 	case ICE_ERR_RESET_ONGOING:
6293 		return "ICE_ERR_RESET_ONGOING";
6294 	case ICE_ERR_HW_TABLE:
6295 		return "ICE_ERR_HW_TABLE";
6296 	case ICE_ERR_DOES_NOT_EXIST:
6297 		return "ICE_ERR_DOES_NOT_EXIST";
6298 	case ICE_ERR_FW_DDP_MISMATCH:
6299 		return "ICE_ERR_FW_DDP_MISMATCH";
6300 	case ICE_ERR_AQ_ERROR:
6301 		return "ICE_ERR_AQ_ERROR";
6302 	case ICE_ERR_AQ_TIMEOUT:
6303 		return "ICE_ERR_AQ_TIMEOUT";
6304 	case ICE_ERR_AQ_FULL:
6305 		return "ICE_ERR_AQ_FULL";
6306 	case ICE_ERR_AQ_NO_WORK:
6307 		return "ICE_ERR_AQ_NO_WORK";
6308 	case ICE_ERR_AQ_EMPTY:
6309 		return "ICE_ERR_AQ_EMPTY";
6310 	case ICE_ERR_AQ_FW_CRITICAL:
6311 		return "ICE_ERR_AQ_FW_CRITICAL";
6312 	}
6313 
6314 	return "ICE_ERR_UNKNOWN";
6315 }
6316 
6317 /**
6318  * ice_set_rss - Set RSS keys and lut
6319  * @vsi: Pointer to VSI structure
6320  * @seed: RSS hash seed
6321  * @lut: Lookup table
6322  * @lut_size: Lookup table size
6323  *
6324  * Returns 0 on success, negative on failure
6325  */
6326 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6327 {
6328 	struct ice_pf *pf = vsi->back;
6329 	struct ice_hw *hw = &pf->hw;
6330 	enum ice_status status;
6331 	struct device *dev;
6332 
6333 	dev = ice_pf_to_dev(pf);
6334 	if (seed) {
6335 		struct ice_aqc_get_set_rss_keys *buf =
6336 				  (struct ice_aqc_get_set_rss_keys *)seed;
6337 
6338 		status = ice_aq_set_rss_key(hw, vsi->idx, buf);
6339 
6340 		if (status) {
6341 			dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
6342 				ice_stat_str(status),
6343 				ice_aq_str(hw->adminq.sq_last_status));
6344 			return -EIO;
6345 		}
6346 	}
6347 
6348 	if (lut) {
6349 		status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6350 					    lut, lut_size);
6351 		if (status) {
6352 			dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
6353 				ice_stat_str(status),
6354 				ice_aq_str(hw->adminq.sq_last_status));
6355 			return -EIO;
6356 		}
6357 	}
6358 
6359 	return 0;
6360 }
6361 
6362 /**
6363  * ice_get_rss - Get RSS keys and lut
6364  * @vsi: Pointer to VSI structure
6365  * @seed: Buffer to store the keys
6366  * @lut: Buffer to store the lookup table entries
6367  * @lut_size: Size of buffer to store the lookup table entries
6368  *
6369  * Returns 0 on success, negative on failure
6370  */
6371 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6372 {
6373 	struct ice_pf *pf = vsi->back;
6374 	struct ice_hw *hw = &pf->hw;
6375 	enum ice_status status;
6376 	struct device *dev;
6377 
6378 	dev = ice_pf_to_dev(pf);
6379 	if (seed) {
6380 		struct ice_aqc_get_set_rss_keys *buf =
6381 				  (struct ice_aqc_get_set_rss_keys *)seed;
6382 
6383 		status = ice_aq_get_rss_key(hw, vsi->idx, buf);
6384 		if (status) {
6385 			dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
6386 				ice_stat_str(status),
6387 				ice_aq_str(hw->adminq.sq_last_status));
6388 			return -EIO;
6389 		}
6390 	}
6391 
6392 	if (lut) {
6393 		status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6394 					    lut, lut_size);
6395 		if (status) {
6396 			dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
6397 				ice_stat_str(status),
6398 				ice_aq_str(hw->adminq.sq_last_status));
6399 			return -EIO;
6400 		}
6401 	}
6402 
6403 	return 0;
6404 }
6405 
6406 /**
6407  * ice_bridge_getlink - Get the hardware bridge mode
6408  * @skb: skb buff
6409  * @pid: process ID
6410  * @seq: RTNL message seq
6411  * @dev: the netdev being configured
6412  * @filter_mask: filter mask passed in
6413  * @nlflags: netlink flags passed in
6414  *
6415  * Return the bridge mode (VEB/VEPA)
6416  */
6417 static int
6418 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6419 		   struct net_device *dev, u32 filter_mask, int nlflags)
6420 {
6421 	struct ice_netdev_priv *np = netdev_priv(dev);
6422 	struct ice_vsi *vsi = np->vsi;
6423 	struct ice_pf *pf = vsi->back;
6424 	u16 bmode;
6425 
6426 	bmode = pf->first_sw->bridge_mode;
6427 
6428 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6429 				       filter_mask, NULL);
6430 }
6431 
6432 /**
6433  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6434  * @vsi: Pointer to VSI structure
6435  * @bmode: Hardware bridge mode (VEB/VEPA)
6436  *
6437  * Returns 0 on success, negative on failure
6438  */
6439 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6440 {
6441 	struct ice_aqc_vsi_props *vsi_props;
6442 	struct ice_hw *hw = &vsi->back->hw;
6443 	struct ice_vsi_ctx *ctxt;
6444 	enum ice_status status;
6445 	int ret = 0;
6446 
6447 	vsi_props = &vsi->info;
6448 
6449 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6450 	if (!ctxt)
6451 		return -ENOMEM;
6452 
6453 	ctxt->info = vsi->info;
6454 
6455 	if (bmode == BRIDGE_MODE_VEB)
6456 		/* change from VEPA to VEB mode */
6457 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6458 	else
6459 		/* change from VEB to VEPA mode */
6460 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6461 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6462 
6463 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6464 	if (status) {
6465 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6466 			bmode, ice_stat_str(status),
6467 			ice_aq_str(hw->adminq.sq_last_status));
6468 		ret = -EIO;
6469 		goto out;
6470 	}
6471 	/* Update sw flags for book keeping */
6472 	vsi_props->sw_flags = ctxt->info.sw_flags;
6473 
6474 out:
6475 	kfree(ctxt);
6476 	return ret;
6477 }
6478 
6479 /**
6480  * ice_bridge_setlink - Set the hardware bridge mode
6481  * @dev: the netdev being configured
6482  * @nlh: RTNL message
6483  * @flags: bridge setlink flags
6484  * @extack: netlink extended ack
6485  *
6486  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6487  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6488  * not already set for all VSIs connected to this switch. And also update the
6489  * unicast switch filter rules for the corresponding switch of the netdev.
6490  */
6491 static int
6492 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6493 		   u16 __always_unused flags,
6494 		   struct netlink_ext_ack __always_unused *extack)
6495 {
6496 	struct ice_netdev_priv *np = netdev_priv(dev);
6497 	struct ice_pf *pf = np->vsi->back;
6498 	struct nlattr *attr, *br_spec;
6499 	struct ice_hw *hw = &pf->hw;
6500 	enum ice_status status;
6501 	struct ice_sw *pf_sw;
6502 	int rem, v, err = 0;
6503 
6504 	pf_sw = pf->first_sw;
6505 	/* find the attribute in the netlink message */
6506 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6507 
6508 	nla_for_each_nested(attr, br_spec, rem) {
6509 		__u16 mode;
6510 
6511 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6512 			continue;
6513 		mode = nla_get_u16(attr);
6514 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6515 			return -EINVAL;
6516 		/* Continue  if bridge mode is not being flipped */
6517 		if (mode == pf_sw->bridge_mode)
6518 			continue;
6519 		/* Iterates through the PF VSI list and update the loopback
6520 		 * mode of the VSI
6521 		 */
6522 		ice_for_each_vsi(pf, v) {
6523 			if (!pf->vsi[v])
6524 				continue;
6525 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6526 			if (err)
6527 				return err;
6528 		}
6529 
6530 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6531 		/* Update the unicast switch filter rules for the corresponding
6532 		 * switch of the netdev
6533 		 */
6534 		status = ice_update_sw_rule_bridge_mode(hw);
6535 		if (status) {
6536 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6537 				   mode, ice_stat_str(status),
6538 				   ice_aq_str(hw->adminq.sq_last_status));
6539 			/* revert hw->evb_veb */
6540 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6541 			return -EIO;
6542 		}
6543 
6544 		pf_sw->bridge_mode = mode;
6545 	}
6546 
6547 	return 0;
6548 }
6549 
6550 /**
6551  * ice_tx_timeout - Respond to a Tx Hang
6552  * @netdev: network interface device structure
6553  * @txqueue: Tx queue
6554  */
6555 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6556 {
6557 	struct ice_netdev_priv *np = netdev_priv(netdev);
6558 	struct ice_ring *tx_ring = NULL;
6559 	struct ice_vsi *vsi = np->vsi;
6560 	struct ice_pf *pf = vsi->back;
6561 	u32 i;
6562 
6563 	pf->tx_timeout_count++;
6564 
6565 	/* Check if PFC is enabled for the TC to which the queue belongs
6566 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6567 	 * need to reset and rebuild
6568 	 */
6569 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6570 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6571 			 txqueue);
6572 		return;
6573 	}
6574 
6575 	/* now that we have an index, find the tx_ring struct */
6576 	for (i = 0; i < vsi->num_txq; i++)
6577 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6578 			if (txqueue == vsi->tx_rings[i]->q_index) {
6579 				tx_ring = vsi->tx_rings[i];
6580 				break;
6581 			}
6582 
6583 	/* Reset recovery level if enough time has elapsed after last timeout.
6584 	 * Also ensure no new reset action happens before next timeout period.
6585 	 */
6586 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6587 		pf->tx_timeout_recovery_level = 1;
6588 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6589 				       netdev->watchdog_timeo)))
6590 		return;
6591 
6592 	if (tx_ring) {
6593 		struct ice_hw *hw = &pf->hw;
6594 		u32 head, val = 0;
6595 
6596 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6597 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6598 		/* Read interrupt register */
6599 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6600 
6601 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6602 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6603 			    head, tx_ring->next_to_use, val);
6604 	}
6605 
6606 	pf->tx_timeout_last_recovery = jiffies;
6607 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6608 		    pf->tx_timeout_recovery_level, txqueue);
6609 
6610 	switch (pf->tx_timeout_recovery_level) {
6611 	case 1:
6612 		set_bit(__ICE_PFR_REQ, pf->state);
6613 		break;
6614 	case 2:
6615 		set_bit(__ICE_CORER_REQ, pf->state);
6616 		break;
6617 	case 3:
6618 		set_bit(__ICE_GLOBR_REQ, pf->state);
6619 		break;
6620 	default:
6621 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6622 		set_bit(__ICE_DOWN, pf->state);
6623 		set_bit(__ICE_NEEDS_RESTART, vsi->state);
6624 		set_bit(__ICE_SERVICE_DIS, pf->state);
6625 		break;
6626 	}
6627 
6628 	ice_service_task_schedule(pf);
6629 	pf->tx_timeout_recovery_level++;
6630 }
6631 
6632 /**
6633  * ice_open - Called when a network interface becomes active
6634  * @netdev: network interface device structure
6635  *
6636  * The open entry point is called when a network interface is made
6637  * active by the system (IFF_UP). At this point all resources needed
6638  * for transmit and receive operations are allocated, the interrupt
6639  * handler is registered with the OS, the netdev watchdog is enabled,
6640  * and the stack is notified that the interface is ready.
6641  *
6642  * Returns 0 on success, negative value on failure
6643  */
6644 int ice_open(struct net_device *netdev)
6645 {
6646 	struct ice_netdev_priv *np = netdev_priv(netdev);
6647 	struct ice_vsi *vsi = np->vsi;
6648 	struct ice_pf *pf = vsi->back;
6649 	struct ice_port_info *pi;
6650 	int err;
6651 
6652 	if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
6653 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6654 		return -EIO;
6655 	}
6656 
6657 	if (test_bit(__ICE_DOWN, pf->state)) {
6658 		netdev_err(netdev, "device is not ready yet\n");
6659 		return -EBUSY;
6660 	}
6661 
6662 	netif_carrier_off(netdev);
6663 
6664 	pi = vsi->port_info;
6665 	err = ice_update_link_info(pi);
6666 	if (err) {
6667 		netdev_err(netdev, "Failed to get link info, error %d\n",
6668 			   err);
6669 		return err;
6670 	}
6671 
6672 	/* Set PHY if there is media, otherwise, turn off PHY */
6673 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6674 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6675 		if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
6676 			err = ice_init_phy_user_cfg(pi);
6677 			if (err) {
6678 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6679 					   err);
6680 				return err;
6681 			}
6682 		}
6683 
6684 		err = ice_configure_phy(vsi);
6685 		if (err) {
6686 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
6687 				   err);
6688 			return err;
6689 		}
6690 	} else {
6691 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6692 		err = ice_aq_set_link_restart_an(pi, false, NULL);
6693 		if (err) {
6694 			netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
6695 				   vsi->vsi_num, err);
6696 			return err;
6697 		}
6698 	}
6699 
6700 	err = ice_vsi_open(vsi);
6701 	if (err)
6702 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6703 			   vsi->vsi_num, vsi->vsw->sw_id);
6704 
6705 	/* Update existing tunnels information */
6706 	udp_tunnel_get_rx_info(netdev);
6707 
6708 	return err;
6709 }
6710 
6711 /**
6712  * ice_stop - Disables a network interface
6713  * @netdev: network interface device structure
6714  *
6715  * The stop entry point is called when an interface is de-activated by the OS,
6716  * and the netdevice enters the DOWN state. The hardware is still under the
6717  * driver's control, but the netdev interface is disabled.
6718  *
6719  * Returns success only - not allowed to fail
6720  */
6721 int ice_stop(struct net_device *netdev)
6722 {
6723 	struct ice_netdev_priv *np = netdev_priv(netdev);
6724 	struct ice_vsi *vsi = np->vsi;
6725 
6726 	ice_vsi_close(vsi);
6727 
6728 	return 0;
6729 }
6730 
6731 /**
6732  * ice_features_check - Validate encapsulated packet conforms to limits
6733  * @skb: skb buffer
6734  * @netdev: This port's netdev
6735  * @features: Offload features that the stack believes apply
6736  */
6737 static netdev_features_t
6738 ice_features_check(struct sk_buff *skb,
6739 		   struct net_device __always_unused *netdev,
6740 		   netdev_features_t features)
6741 {
6742 	size_t len;
6743 
6744 	/* No point in doing any of this if neither checksum nor GSO are
6745 	 * being requested for this frame. We can rule out both by just
6746 	 * checking for CHECKSUM_PARTIAL
6747 	 */
6748 	if (skb->ip_summed != CHECKSUM_PARTIAL)
6749 		return features;
6750 
6751 	/* We cannot support GSO if the MSS is going to be less than
6752 	 * 64 bytes. If it is then we need to drop support for GSO.
6753 	 */
6754 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6755 		features &= ~NETIF_F_GSO_MASK;
6756 
6757 	len = skb_network_header(skb) - skb->data;
6758 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
6759 		goto out_rm_features;
6760 
6761 	len = skb_transport_header(skb) - skb_network_header(skb);
6762 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6763 		goto out_rm_features;
6764 
6765 	if (skb->encapsulation) {
6766 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
6767 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
6768 			goto out_rm_features;
6769 
6770 		len = skb_inner_transport_header(skb) -
6771 		      skb_inner_network_header(skb);
6772 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6773 			goto out_rm_features;
6774 	}
6775 
6776 	return features;
6777 out_rm_features:
6778 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6779 }
6780 
6781 static const struct net_device_ops ice_netdev_safe_mode_ops = {
6782 	.ndo_open = ice_open,
6783 	.ndo_stop = ice_stop,
6784 	.ndo_start_xmit = ice_start_xmit,
6785 	.ndo_set_mac_address = ice_set_mac_address,
6786 	.ndo_validate_addr = eth_validate_addr,
6787 	.ndo_change_mtu = ice_change_mtu,
6788 	.ndo_get_stats64 = ice_get_stats64,
6789 	.ndo_tx_timeout = ice_tx_timeout,
6790 };
6791 
6792 static const struct net_device_ops ice_netdev_ops = {
6793 	.ndo_open = ice_open,
6794 	.ndo_stop = ice_stop,
6795 	.ndo_start_xmit = ice_start_xmit,
6796 	.ndo_features_check = ice_features_check,
6797 	.ndo_set_rx_mode = ice_set_rx_mode,
6798 	.ndo_set_mac_address = ice_set_mac_address,
6799 	.ndo_validate_addr = eth_validate_addr,
6800 	.ndo_change_mtu = ice_change_mtu,
6801 	.ndo_get_stats64 = ice_get_stats64,
6802 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
6803 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
6804 	.ndo_set_vf_mac = ice_set_vf_mac,
6805 	.ndo_get_vf_config = ice_get_vf_cfg,
6806 	.ndo_set_vf_trust = ice_set_vf_trust,
6807 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
6808 	.ndo_set_vf_link_state = ice_set_vf_link_state,
6809 	.ndo_get_vf_stats = ice_get_vf_stats,
6810 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6811 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6812 	.ndo_set_features = ice_set_features,
6813 	.ndo_bridge_getlink = ice_bridge_getlink,
6814 	.ndo_bridge_setlink = ice_bridge_setlink,
6815 	.ndo_fdb_add = ice_fdb_add,
6816 	.ndo_fdb_del = ice_fdb_del,
6817 #ifdef CONFIG_RFS_ACCEL
6818 	.ndo_rx_flow_steer = ice_rx_flow_steer,
6819 #endif
6820 	.ndo_tx_timeout = ice_tx_timeout,
6821 	.ndo_bpf = ice_xdp,
6822 	.ndo_xdp_xmit = ice_xdp_xmit,
6823 	.ndo_xsk_wakeup = ice_xsk_wakeup,
6824 };
6825