1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 
17 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
18 static const char ice_driver_string[] = DRV_SUMMARY;
19 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20 
21 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
22 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
23 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
24 
25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26 MODULE_DESCRIPTION(DRV_SUMMARY);
27 MODULE_LICENSE("GPL v2");
28 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29 
30 static int debug = -1;
31 module_param(debug, int, 0644);
32 #ifndef CONFIG_DYNAMIC_DEBUG
33 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34 #else
35 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36 #endif /* !CONFIG_DYNAMIC_DEBUG */
37 
38 static struct workqueue_struct *ice_wq;
39 static const struct net_device_ops ice_netdev_safe_mode_ops;
40 static const struct net_device_ops ice_netdev_ops;
41 static int ice_vsi_open(struct ice_vsi *vsi);
42 
43 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
44 
45 static void ice_vsi_release_all(struct ice_pf *pf);
46 
47 bool netif_is_ice(struct net_device *dev)
48 {
49 	return dev && (dev->netdev_ops == &ice_netdev_ops);
50 }
51 
52 /**
53  * ice_get_tx_pending - returns number of Tx descriptors not processed
54  * @ring: the ring of descriptors
55  */
56 static u16 ice_get_tx_pending(struct ice_ring *ring)
57 {
58 	u16 head, tail;
59 
60 	head = ring->next_to_clean;
61 	tail = ring->next_to_use;
62 
63 	if (head != tail)
64 		return (head < tail) ?
65 			tail - head : (tail + ring->count - head);
66 	return 0;
67 }
68 
69 /**
70  * ice_check_for_hang_subtask - check for and recover hung queues
71  * @pf: pointer to PF struct
72  */
73 static void ice_check_for_hang_subtask(struct ice_pf *pf)
74 {
75 	struct ice_vsi *vsi = NULL;
76 	struct ice_hw *hw;
77 	unsigned int i;
78 	int packets;
79 	u32 v;
80 
81 	ice_for_each_vsi(pf, v)
82 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
83 			vsi = pf->vsi[v];
84 			break;
85 		}
86 
87 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
88 		return;
89 
90 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
91 		return;
92 
93 	hw = &vsi->back->hw;
94 
95 	for (i = 0; i < vsi->num_txq; i++) {
96 		struct ice_ring *tx_ring = vsi->tx_rings[i];
97 
98 		if (tx_ring && tx_ring->desc) {
99 			/* If packet counter has not changed the queue is
100 			 * likely stalled, so force an interrupt for this
101 			 * queue.
102 			 *
103 			 * prev_pkt would be negative if there was no
104 			 * pending work.
105 			 */
106 			packets = tx_ring->stats.pkts & INT_MAX;
107 			if (tx_ring->tx_stats.prev_pkt == packets) {
108 				/* Trigger sw interrupt to revive the queue */
109 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
110 				continue;
111 			}
112 
113 			/* Memory barrier between read of packet count and call
114 			 * to ice_get_tx_pending()
115 			 */
116 			smp_rmb();
117 			tx_ring->tx_stats.prev_pkt =
118 			    ice_get_tx_pending(tx_ring) ? packets : -1;
119 		}
120 	}
121 }
122 
123 /**
124  * ice_init_mac_fltr - Set initial MAC filters
125  * @pf: board private structure
126  *
127  * Set initial set of MAC filters for PF VSI; configure filters for permanent
128  * address and broadcast address. If an error is encountered, netdevice will be
129  * unregistered.
130  */
131 static int ice_init_mac_fltr(struct ice_pf *pf)
132 {
133 	enum ice_status status;
134 	struct ice_vsi *vsi;
135 	u8 *perm_addr;
136 
137 	vsi = ice_get_main_vsi(pf);
138 	if (!vsi)
139 		return -EINVAL;
140 
141 	perm_addr = vsi->port_info->mac.perm_addr;
142 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
143 	if (status)
144 		return -EIO;
145 
146 	return 0;
147 }
148 
149 /**
150  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
151  * @netdev: the net device on which the sync is happening
152  * @addr: MAC address to sync
153  *
154  * This is a callback function which is called by the in kernel device sync
155  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
156  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
157  * MAC filters from the hardware.
158  */
159 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
160 {
161 	struct ice_netdev_priv *np = netdev_priv(netdev);
162 	struct ice_vsi *vsi = np->vsi;
163 
164 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
165 				     ICE_FWD_TO_VSI))
166 		return -EINVAL;
167 
168 	return 0;
169 }
170 
171 /**
172  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
173  * @netdev: the net device on which the unsync is happening
174  * @addr: MAC address to unsync
175  *
176  * This is a callback function which is called by the in kernel device unsync
177  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
178  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
179  * delete the MAC filters from the hardware.
180  */
181 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
182 {
183 	struct ice_netdev_priv *np = netdev_priv(netdev);
184 	struct ice_vsi *vsi = np->vsi;
185 
186 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
187 				     ICE_FWD_TO_VSI))
188 		return -EINVAL;
189 
190 	return 0;
191 }
192 
193 /**
194  * ice_vsi_fltr_changed - check if filter state changed
195  * @vsi: VSI to be checked
196  *
197  * returns true if filter state has changed, false otherwise.
198  */
199 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
200 {
201 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
202 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
203 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
204 }
205 
206 /**
207  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
208  * @vsi: the VSI being configured
209  * @promisc_m: mask of promiscuous config bits
210  * @set_promisc: enable or disable promisc flag request
211  *
212  */
213 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
214 {
215 	struct ice_hw *hw = &vsi->back->hw;
216 	enum ice_status status = 0;
217 
218 	if (vsi->type != ICE_VSI_PF)
219 		return 0;
220 
221 	if (vsi->num_vlan > 1) {
222 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
223 						  set_promisc);
224 	} else {
225 		if (set_promisc)
226 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
227 						     0);
228 		else
229 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
230 						       0);
231 	}
232 
233 	if (status)
234 		return -EIO;
235 
236 	return 0;
237 }
238 
239 /**
240  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
241  * @vsi: ptr to the VSI
242  *
243  * Push any outstanding VSI filter changes through the AdminQ.
244  */
245 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
246 {
247 	struct device *dev = ice_pf_to_dev(vsi->back);
248 	struct net_device *netdev = vsi->netdev;
249 	bool promisc_forced_on = false;
250 	struct ice_pf *pf = vsi->back;
251 	struct ice_hw *hw = &pf->hw;
252 	enum ice_status status = 0;
253 	u32 changed_flags = 0;
254 	u8 promisc_m;
255 	int err = 0;
256 
257 	if (!vsi->netdev)
258 		return -EINVAL;
259 
260 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
261 		usleep_range(1000, 2000);
262 
263 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
264 	vsi->current_netdev_flags = vsi->netdev->flags;
265 
266 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
267 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
268 
269 	if (ice_vsi_fltr_changed(vsi)) {
270 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
271 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
272 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
273 
274 		/* grab the netdev's addr_list_lock */
275 		netif_addr_lock_bh(netdev);
276 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
277 			      ice_add_mac_to_unsync_list);
278 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
279 			      ice_add_mac_to_unsync_list);
280 		/* our temp lists are populated. release lock */
281 		netif_addr_unlock_bh(netdev);
282 	}
283 
284 	/* Remove MAC addresses in the unsync list */
285 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
286 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
287 	if (status) {
288 		netdev_err(netdev, "Failed to delete MAC filters\n");
289 		/* if we failed because of alloc failures, just bail */
290 		if (status == ICE_ERR_NO_MEMORY) {
291 			err = -ENOMEM;
292 			goto out;
293 		}
294 	}
295 
296 	/* Add MAC addresses in the sync list */
297 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
298 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
299 	/* If filter is added successfully or already exists, do not go into
300 	 * 'if' condition and report it as error. Instead continue processing
301 	 * rest of the function.
302 	 */
303 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
304 		netdev_err(netdev, "Failed to add MAC filters\n");
305 		/* If there is no more space for new umac filters, VSI
306 		 * should go into promiscuous mode. There should be some
307 		 * space reserved for promiscuous filters.
308 		 */
309 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
310 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
311 				      vsi->state)) {
312 			promisc_forced_on = true;
313 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
314 				    vsi->vsi_num);
315 		} else {
316 			err = -EIO;
317 			goto out;
318 		}
319 	}
320 	/* check for changes in promiscuous modes */
321 	if (changed_flags & IFF_ALLMULTI) {
322 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
323 			if (vsi->num_vlan > 1)
324 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
325 			else
326 				promisc_m = ICE_MCAST_PROMISC_BITS;
327 
328 			err = ice_cfg_promisc(vsi, promisc_m, true);
329 			if (err) {
330 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
331 					   vsi->vsi_num);
332 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
333 				goto out_promisc;
334 			}
335 		} else {
336 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
337 			if (vsi->num_vlan > 1)
338 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
339 			else
340 				promisc_m = ICE_MCAST_PROMISC_BITS;
341 
342 			err = ice_cfg_promisc(vsi, promisc_m, false);
343 			if (err) {
344 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
345 					   vsi->vsi_num);
346 				vsi->current_netdev_flags |= IFF_ALLMULTI;
347 				goto out_promisc;
348 			}
349 		}
350 	}
351 
352 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
353 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
354 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
355 		if (vsi->current_netdev_flags & IFF_PROMISC) {
356 			/* Apply Rx filter rule to get traffic from wire */
357 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
358 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
359 				if (err && err != -EEXIST) {
360 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
361 						   err, vsi->vsi_num);
362 					vsi->current_netdev_flags &=
363 						~IFF_PROMISC;
364 					goto out_promisc;
365 				}
366 				ice_cfg_vlan_pruning(vsi, false, false);
367 			}
368 		} else {
369 			/* Clear Rx filter to remove traffic from wire */
370 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
371 				err = ice_clear_dflt_vsi(pf->first_sw);
372 				if (err) {
373 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
374 						   err, vsi->vsi_num);
375 					vsi->current_netdev_flags |=
376 						IFF_PROMISC;
377 					goto out_promisc;
378 				}
379 				if (vsi->num_vlan > 1)
380 					ice_cfg_vlan_pruning(vsi, true, false);
381 			}
382 		}
383 	}
384 	goto exit;
385 
386 out_promisc:
387 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
388 	goto exit;
389 out:
390 	/* if something went wrong then set the changed flag so we try again */
391 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
392 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
393 exit:
394 	clear_bit(ICE_CFG_BUSY, vsi->state);
395 	return err;
396 }
397 
398 /**
399  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
400  * @pf: board private structure
401  */
402 static void ice_sync_fltr_subtask(struct ice_pf *pf)
403 {
404 	int v;
405 
406 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
407 		return;
408 
409 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
410 
411 	ice_for_each_vsi(pf, v)
412 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
413 		    ice_vsi_sync_fltr(pf->vsi[v])) {
414 			/* come back and try again later */
415 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
416 			break;
417 		}
418 }
419 
420 /**
421  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
422  * @pf: the PF
423  * @locked: is the rtnl_lock already held
424  */
425 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
426 {
427 	int node;
428 	int v;
429 
430 	ice_for_each_vsi(pf, v)
431 		if (pf->vsi[v])
432 			ice_dis_vsi(pf->vsi[v], locked);
433 
434 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
435 		pf->pf_agg_node[node].num_vsis = 0;
436 
437 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
438 		pf->vf_agg_node[node].num_vsis = 0;
439 }
440 
441 /**
442  * ice_prepare_for_reset - prep for the core to reset
443  * @pf: board private structure
444  *
445  * Inform or close all dependent features in prep for reset.
446  */
447 static void
448 ice_prepare_for_reset(struct ice_pf *pf)
449 {
450 	struct ice_hw *hw = &pf->hw;
451 	unsigned int i;
452 
453 	/* already prepared for reset */
454 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
455 		return;
456 
457 	/* Notify VFs of impending reset */
458 	if (ice_check_sq_alive(hw, &hw->mailboxq))
459 		ice_vc_notify_reset(pf);
460 
461 	/* Disable VFs until reset is completed */
462 	ice_for_each_vf(pf, i)
463 		ice_set_vf_state_qs_dis(&pf->vf[i]);
464 
465 	/* clear SW filtering DB */
466 	ice_clear_hw_tbls(hw);
467 	/* disable the VSIs and their queues that are not already DOWN */
468 	ice_pf_dis_all_vsi(pf, false);
469 
470 	if (hw->port_info)
471 		ice_sched_clear_port(hw->port_info);
472 
473 	ice_shutdown_all_ctrlq(hw);
474 
475 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
476 }
477 
478 /**
479  * ice_do_reset - Initiate one of many types of resets
480  * @pf: board private structure
481  * @reset_type: reset type requested
482  * before this function was called.
483  */
484 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
485 {
486 	struct device *dev = ice_pf_to_dev(pf);
487 	struct ice_hw *hw = &pf->hw;
488 
489 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
490 
491 	ice_prepare_for_reset(pf);
492 
493 	/* trigger the reset */
494 	if (ice_reset(hw, reset_type)) {
495 		dev_err(dev, "reset %d failed\n", reset_type);
496 		set_bit(ICE_RESET_FAILED, pf->state);
497 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
498 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
499 		clear_bit(ICE_PFR_REQ, pf->state);
500 		clear_bit(ICE_CORER_REQ, pf->state);
501 		clear_bit(ICE_GLOBR_REQ, pf->state);
502 		return;
503 	}
504 
505 	/* PFR is a bit of a special case because it doesn't result in an OICR
506 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
507 	 * associated state bits.
508 	 */
509 	if (reset_type == ICE_RESET_PFR) {
510 		pf->pfr_count++;
511 		ice_rebuild(pf, reset_type);
512 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
513 		clear_bit(ICE_PFR_REQ, pf->state);
514 		ice_reset_all_vfs(pf, true);
515 	}
516 }
517 
518 /**
519  * ice_reset_subtask - Set up for resetting the device and driver
520  * @pf: board private structure
521  */
522 static void ice_reset_subtask(struct ice_pf *pf)
523 {
524 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
525 
526 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
527 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
528 	 * of reset is pending and sets bits in pf->state indicating the reset
529 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
530 	 * prepare for pending reset if not already (for PF software-initiated
531 	 * global resets the software should already be prepared for it as
532 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
533 	 * by firmware or software on other PFs, that bit is not set so prepare
534 	 * for the reset now), poll for reset done, rebuild and return.
535 	 */
536 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
537 		/* Perform the largest reset requested */
538 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
539 			reset_type = ICE_RESET_CORER;
540 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
541 			reset_type = ICE_RESET_GLOBR;
542 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
543 			reset_type = ICE_RESET_EMPR;
544 		/* return if no valid reset type requested */
545 		if (reset_type == ICE_RESET_INVAL)
546 			return;
547 		ice_prepare_for_reset(pf);
548 
549 		/* make sure we are ready to rebuild */
550 		if (ice_check_reset(&pf->hw)) {
551 			set_bit(ICE_RESET_FAILED, pf->state);
552 		} else {
553 			/* done with reset. start rebuild */
554 			pf->hw.reset_ongoing = false;
555 			ice_rebuild(pf, reset_type);
556 			/* clear bit to resume normal operations, but
557 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
558 			 */
559 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
560 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
561 			clear_bit(ICE_PFR_REQ, pf->state);
562 			clear_bit(ICE_CORER_REQ, pf->state);
563 			clear_bit(ICE_GLOBR_REQ, pf->state);
564 			ice_reset_all_vfs(pf, true);
565 		}
566 
567 		return;
568 	}
569 
570 	/* No pending resets to finish processing. Check for new resets */
571 	if (test_bit(ICE_PFR_REQ, pf->state))
572 		reset_type = ICE_RESET_PFR;
573 	if (test_bit(ICE_CORER_REQ, pf->state))
574 		reset_type = ICE_RESET_CORER;
575 	if (test_bit(ICE_GLOBR_REQ, pf->state))
576 		reset_type = ICE_RESET_GLOBR;
577 	/* If no valid reset type requested just return */
578 	if (reset_type == ICE_RESET_INVAL)
579 		return;
580 
581 	/* reset if not already down or busy */
582 	if (!test_bit(ICE_DOWN, pf->state) &&
583 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
584 		ice_do_reset(pf, reset_type);
585 	}
586 }
587 
588 /**
589  * ice_print_topo_conflict - print topology conflict message
590  * @vsi: the VSI whose topology status is being checked
591  */
592 static void ice_print_topo_conflict(struct ice_vsi *vsi)
593 {
594 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
595 	case ICE_AQ_LINK_TOPO_CONFLICT:
596 	case ICE_AQ_LINK_MEDIA_CONFLICT:
597 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
598 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
599 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
600 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
601 		break;
602 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
603 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
604 		break;
605 	default:
606 		break;
607 	}
608 }
609 
610 /**
611  * ice_print_link_msg - print link up or down message
612  * @vsi: the VSI whose link status is being queried
613  * @isup: boolean for if the link is now up or down
614  */
615 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
616 {
617 	struct ice_aqc_get_phy_caps_data *caps;
618 	const char *an_advertised;
619 	enum ice_status status;
620 	const char *fec_req;
621 	const char *speed;
622 	const char *fec;
623 	const char *fc;
624 	const char *an;
625 
626 	if (!vsi)
627 		return;
628 
629 	if (vsi->current_isup == isup)
630 		return;
631 
632 	vsi->current_isup = isup;
633 
634 	if (!isup) {
635 		netdev_info(vsi->netdev, "NIC Link is Down\n");
636 		return;
637 	}
638 
639 	switch (vsi->port_info->phy.link_info.link_speed) {
640 	case ICE_AQ_LINK_SPEED_100GB:
641 		speed = "100 G";
642 		break;
643 	case ICE_AQ_LINK_SPEED_50GB:
644 		speed = "50 G";
645 		break;
646 	case ICE_AQ_LINK_SPEED_40GB:
647 		speed = "40 G";
648 		break;
649 	case ICE_AQ_LINK_SPEED_25GB:
650 		speed = "25 G";
651 		break;
652 	case ICE_AQ_LINK_SPEED_20GB:
653 		speed = "20 G";
654 		break;
655 	case ICE_AQ_LINK_SPEED_10GB:
656 		speed = "10 G";
657 		break;
658 	case ICE_AQ_LINK_SPEED_5GB:
659 		speed = "5 G";
660 		break;
661 	case ICE_AQ_LINK_SPEED_2500MB:
662 		speed = "2.5 G";
663 		break;
664 	case ICE_AQ_LINK_SPEED_1000MB:
665 		speed = "1 G";
666 		break;
667 	case ICE_AQ_LINK_SPEED_100MB:
668 		speed = "100 M";
669 		break;
670 	default:
671 		speed = "Unknown ";
672 		break;
673 	}
674 
675 	switch (vsi->port_info->fc.current_mode) {
676 	case ICE_FC_FULL:
677 		fc = "Rx/Tx";
678 		break;
679 	case ICE_FC_TX_PAUSE:
680 		fc = "Tx";
681 		break;
682 	case ICE_FC_RX_PAUSE:
683 		fc = "Rx";
684 		break;
685 	case ICE_FC_NONE:
686 		fc = "None";
687 		break;
688 	default:
689 		fc = "Unknown";
690 		break;
691 	}
692 
693 	/* Get FEC mode based on negotiated link info */
694 	switch (vsi->port_info->phy.link_info.fec_info) {
695 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
696 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
697 		fec = "RS-FEC";
698 		break;
699 	case ICE_AQ_LINK_25G_KR_FEC_EN:
700 		fec = "FC-FEC/BASE-R";
701 		break;
702 	default:
703 		fec = "NONE";
704 		break;
705 	}
706 
707 	/* check if autoneg completed, might be false due to not supported */
708 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
709 		an = "True";
710 	else
711 		an = "False";
712 
713 	/* Get FEC mode requested based on PHY caps last SW configuration */
714 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
715 	if (!caps) {
716 		fec_req = "Unknown";
717 		an_advertised = "Unknown";
718 		goto done;
719 	}
720 
721 	status = ice_aq_get_phy_caps(vsi->port_info, false,
722 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
723 	if (status)
724 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
725 
726 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
727 
728 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
729 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
730 		fec_req = "RS-FEC";
731 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
732 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
733 		fec_req = "FC-FEC/BASE-R";
734 	else
735 		fec_req = "NONE";
736 
737 	kfree(caps);
738 
739 done:
740 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
741 		    speed, fec_req, fec, an_advertised, an, fc);
742 	ice_print_topo_conflict(vsi);
743 }
744 
745 /**
746  * ice_vsi_link_event - update the VSI's netdev
747  * @vsi: the VSI on which the link event occurred
748  * @link_up: whether or not the VSI needs to be set up or down
749  */
750 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
751 {
752 	if (!vsi)
753 		return;
754 
755 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
756 		return;
757 
758 	if (vsi->type == ICE_VSI_PF) {
759 		if (link_up == netif_carrier_ok(vsi->netdev))
760 			return;
761 
762 		if (link_up) {
763 			netif_carrier_on(vsi->netdev);
764 			netif_tx_wake_all_queues(vsi->netdev);
765 		} else {
766 			netif_carrier_off(vsi->netdev);
767 			netif_tx_stop_all_queues(vsi->netdev);
768 		}
769 	}
770 }
771 
772 /**
773  * ice_set_dflt_mib - send a default config MIB to the FW
774  * @pf: private PF struct
775  *
776  * This function sends a default configuration MIB to the FW.
777  *
778  * If this function errors out at any point, the driver is still able to
779  * function.  The main impact is that LFC may not operate as expected.
780  * Therefore an error state in this function should be treated with a DBG
781  * message and continue on with driver rebuild/reenable.
782  */
783 static void ice_set_dflt_mib(struct ice_pf *pf)
784 {
785 	struct device *dev = ice_pf_to_dev(pf);
786 	u8 mib_type, *buf, *lldpmib = NULL;
787 	u16 len, typelen, offset = 0;
788 	struct ice_lldp_org_tlv *tlv;
789 	struct ice_hw *hw = &pf->hw;
790 	u32 ouisubtype;
791 
792 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
793 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
794 	if (!lldpmib) {
795 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
796 			__func__);
797 		return;
798 	}
799 
800 	/* Add ETS CFG TLV */
801 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
802 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
803 		   ICE_IEEE_ETS_TLV_LEN);
804 	tlv->typelen = htons(typelen);
805 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
806 		      ICE_IEEE_SUBTYPE_ETS_CFG);
807 	tlv->ouisubtype = htonl(ouisubtype);
808 
809 	buf = tlv->tlvinfo;
810 	buf[0] = 0;
811 
812 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
813 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
814 	 * Octets 13 - 20 are TSA values - leave as zeros
815 	 */
816 	buf[5] = 0x64;
817 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
818 	offset += len + 2;
819 	tlv = (struct ice_lldp_org_tlv *)
820 		((char *)tlv + sizeof(tlv->typelen) + len);
821 
822 	/* Add ETS REC TLV */
823 	buf = tlv->tlvinfo;
824 	tlv->typelen = htons(typelen);
825 
826 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
827 		      ICE_IEEE_SUBTYPE_ETS_REC);
828 	tlv->ouisubtype = htonl(ouisubtype);
829 
830 	/* First octet of buf is reserved
831 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
832 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
833 	 * Octets 13 - 20 are TSA value - leave as zeros
834 	 */
835 	buf[5] = 0x64;
836 	offset += len + 2;
837 	tlv = (struct ice_lldp_org_tlv *)
838 		((char *)tlv + sizeof(tlv->typelen) + len);
839 
840 	/* Add PFC CFG TLV */
841 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
842 		   ICE_IEEE_PFC_TLV_LEN);
843 	tlv->typelen = htons(typelen);
844 
845 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
846 		      ICE_IEEE_SUBTYPE_PFC_CFG);
847 	tlv->ouisubtype = htonl(ouisubtype);
848 
849 	/* Octet 1 left as all zeros - PFC disabled */
850 	buf[0] = 0x08;
851 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
852 	offset += len + 2;
853 
854 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
855 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
856 
857 	kfree(lldpmib);
858 }
859 
860 /**
861  * ice_link_event - process the link event
862  * @pf: PF that the link event is associated with
863  * @pi: port_info for the port that the link event is associated with
864  * @link_up: true if the physical link is up and false if it is down
865  * @link_speed: current link speed received from the link event
866  *
867  * Returns 0 on success and negative on failure
868  */
869 static int
870 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
871 	       u16 link_speed)
872 {
873 	struct device *dev = ice_pf_to_dev(pf);
874 	struct ice_phy_info *phy_info;
875 	enum ice_status status;
876 	struct ice_vsi *vsi;
877 	u16 old_link_speed;
878 	bool old_link;
879 
880 	phy_info = &pi->phy;
881 	phy_info->link_info_old = phy_info->link_info;
882 
883 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
884 	old_link_speed = phy_info->link_info_old.link_speed;
885 
886 	/* update the link info structures and re-enable link events,
887 	 * don't bail on failure due to other book keeping needed
888 	 */
889 	status = ice_update_link_info(pi);
890 	if (status)
891 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
892 			pi->lport, ice_stat_str(status),
893 			ice_aq_str(pi->hw->adminq.sq_last_status));
894 
895 	/* Check if the link state is up after updating link info, and treat
896 	 * this event as an UP event since the link is actually UP now.
897 	 */
898 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
899 		link_up = true;
900 
901 	vsi = ice_get_main_vsi(pf);
902 	if (!vsi || !vsi->port_info)
903 		return -EINVAL;
904 
905 	/* turn off PHY if media was removed */
906 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
907 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
908 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
909 		ice_set_link(vsi, false);
910 	}
911 
912 	/* if the old link up/down and speed is the same as the new */
913 	if (link_up == old_link && link_speed == old_link_speed)
914 		return 0;
915 
916 	if (ice_is_dcb_active(pf)) {
917 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
918 			ice_dcb_rebuild(pf);
919 	} else {
920 		if (link_up)
921 			ice_set_dflt_mib(pf);
922 	}
923 	ice_vsi_link_event(vsi, link_up);
924 	ice_print_link_msg(vsi, link_up);
925 
926 	ice_vc_notify_link_state(pf);
927 
928 	return 0;
929 }
930 
931 /**
932  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
933  * @pf: board private structure
934  */
935 static void ice_watchdog_subtask(struct ice_pf *pf)
936 {
937 	int i;
938 
939 	/* if interface is down do nothing */
940 	if (test_bit(ICE_DOWN, pf->state) ||
941 	    test_bit(ICE_CFG_BUSY, pf->state))
942 		return;
943 
944 	/* make sure we don't do these things too often */
945 	if (time_before(jiffies,
946 			pf->serv_tmr_prev + pf->serv_tmr_period))
947 		return;
948 
949 	pf->serv_tmr_prev = jiffies;
950 
951 	/* Update the stats for active netdevs so the network stack
952 	 * can look at updated numbers whenever it cares to
953 	 */
954 	ice_update_pf_stats(pf);
955 	ice_for_each_vsi(pf, i)
956 		if (pf->vsi[i] && pf->vsi[i]->netdev)
957 			ice_update_vsi_stats(pf->vsi[i]);
958 }
959 
960 /**
961  * ice_init_link_events - enable/initialize link events
962  * @pi: pointer to the port_info instance
963  *
964  * Returns -EIO on failure, 0 on success
965  */
966 static int ice_init_link_events(struct ice_port_info *pi)
967 {
968 	u16 mask;
969 
970 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
971 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
972 
973 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
974 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
975 			pi->lport);
976 		return -EIO;
977 	}
978 
979 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
980 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
981 			pi->lport);
982 		return -EIO;
983 	}
984 
985 	return 0;
986 }
987 
988 /**
989  * ice_handle_link_event - handle link event via ARQ
990  * @pf: PF that the link event is associated with
991  * @event: event structure containing link status info
992  */
993 static int
994 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
995 {
996 	struct ice_aqc_get_link_status_data *link_data;
997 	struct ice_port_info *port_info;
998 	int status;
999 
1000 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1001 	port_info = pf->hw.port_info;
1002 	if (!port_info)
1003 		return -EINVAL;
1004 
1005 	status = ice_link_event(pf, port_info,
1006 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1007 				le16_to_cpu(link_data->link_speed));
1008 	if (status)
1009 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1010 			status);
1011 
1012 	return status;
1013 }
1014 
1015 enum ice_aq_task_state {
1016 	ICE_AQ_TASK_WAITING = 0,
1017 	ICE_AQ_TASK_COMPLETE,
1018 	ICE_AQ_TASK_CANCELED,
1019 };
1020 
1021 struct ice_aq_task {
1022 	struct hlist_node entry;
1023 
1024 	u16 opcode;
1025 	struct ice_rq_event_info *event;
1026 	enum ice_aq_task_state state;
1027 };
1028 
1029 /**
1030  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1031  * @pf: pointer to the PF private structure
1032  * @opcode: the opcode to wait for
1033  * @timeout: how long to wait, in jiffies
1034  * @event: storage for the event info
1035  *
1036  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1037  * current thread will be put to sleep until the specified event occurs or
1038  * until the given timeout is reached.
1039  *
1040  * To obtain only the descriptor contents, pass an event without an allocated
1041  * msg_buf. If the complete data buffer is desired, allocate the
1042  * event->msg_buf with enough space ahead of time.
1043  *
1044  * Returns: zero on success, or a negative error code on failure.
1045  */
1046 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1047 			  struct ice_rq_event_info *event)
1048 {
1049 	struct device *dev = ice_pf_to_dev(pf);
1050 	struct ice_aq_task *task;
1051 	unsigned long start;
1052 	long ret;
1053 	int err;
1054 
1055 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1056 	if (!task)
1057 		return -ENOMEM;
1058 
1059 	INIT_HLIST_NODE(&task->entry);
1060 	task->opcode = opcode;
1061 	task->event = event;
1062 	task->state = ICE_AQ_TASK_WAITING;
1063 
1064 	spin_lock_bh(&pf->aq_wait_lock);
1065 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1066 	spin_unlock_bh(&pf->aq_wait_lock);
1067 
1068 	start = jiffies;
1069 
1070 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1071 					       timeout);
1072 	switch (task->state) {
1073 	case ICE_AQ_TASK_WAITING:
1074 		err = ret < 0 ? ret : -ETIMEDOUT;
1075 		break;
1076 	case ICE_AQ_TASK_CANCELED:
1077 		err = ret < 0 ? ret : -ECANCELED;
1078 		break;
1079 	case ICE_AQ_TASK_COMPLETE:
1080 		err = ret < 0 ? ret : 0;
1081 		break;
1082 	default:
1083 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1084 		err = -EINVAL;
1085 		break;
1086 	}
1087 
1088 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1089 		jiffies_to_msecs(jiffies - start),
1090 		jiffies_to_msecs(timeout),
1091 		opcode);
1092 
1093 	spin_lock_bh(&pf->aq_wait_lock);
1094 	hlist_del(&task->entry);
1095 	spin_unlock_bh(&pf->aq_wait_lock);
1096 	kfree(task);
1097 
1098 	return err;
1099 }
1100 
1101 /**
1102  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1103  * @pf: pointer to the PF private structure
1104  * @opcode: the opcode of the event
1105  * @event: the event to check
1106  *
1107  * Loops over the current list of pending threads waiting for an AdminQ event.
1108  * For each matching task, copy the contents of the event into the task
1109  * structure and wake up the thread.
1110  *
1111  * If multiple threads wait for the same opcode, they will all be woken up.
1112  *
1113  * Note that event->msg_buf will only be duplicated if the event has a buffer
1114  * with enough space already allocated. Otherwise, only the descriptor and
1115  * message length will be copied.
1116  *
1117  * Returns: true if an event was found, false otherwise
1118  */
1119 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1120 				struct ice_rq_event_info *event)
1121 {
1122 	struct ice_aq_task *task;
1123 	bool found = false;
1124 
1125 	spin_lock_bh(&pf->aq_wait_lock);
1126 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1127 		if (task->state || task->opcode != opcode)
1128 			continue;
1129 
1130 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1131 		task->event->msg_len = event->msg_len;
1132 
1133 		/* Only copy the data buffer if a destination was set */
1134 		if (task->event->msg_buf &&
1135 		    task->event->buf_len > event->buf_len) {
1136 			memcpy(task->event->msg_buf, event->msg_buf,
1137 			       event->buf_len);
1138 			task->event->buf_len = event->buf_len;
1139 		}
1140 
1141 		task->state = ICE_AQ_TASK_COMPLETE;
1142 		found = true;
1143 	}
1144 	spin_unlock_bh(&pf->aq_wait_lock);
1145 
1146 	if (found)
1147 		wake_up(&pf->aq_wait_queue);
1148 }
1149 
1150 /**
1151  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1152  * @pf: the PF private structure
1153  *
1154  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1155  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1156  */
1157 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1158 {
1159 	struct ice_aq_task *task;
1160 
1161 	spin_lock_bh(&pf->aq_wait_lock);
1162 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1163 		task->state = ICE_AQ_TASK_CANCELED;
1164 	spin_unlock_bh(&pf->aq_wait_lock);
1165 
1166 	wake_up(&pf->aq_wait_queue);
1167 }
1168 
1169 /**
1170  * __ice_clean_ctrlq - helper function to clean controlq rings
1171  * @pf: ptr to struct ice_pf
1172  * @q_type: specific Control queue type
1173  */
1174 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1175 {
1176 	struct device *dev = ice_pf_to_dev(pf);
1177 	struct ice_rq_event_info event;
1178 	struct ice_hw *hw = &pf->hw;
1179 	struct ice_ctl_q_info *cq;
1180 	u16 pending, i = 0;
1181 	const char *qtype;
1182 	u32 oldval, val;
1183 
1184 	/* Do not clean control queue if/when PF reset fails */
1185 	if (test_bit(ICE_RESET_FAILED, pf->state))
1186 		return 0;
1187 
1188 	switch (q_type) {
1189 	case ICE_CTL_Q_ADMIN:
1190 		cq = &hw->adminq;
1191 		qtype = "Admin";
1192 		break;
1193 	case ICE_CTL_Q_MAILBOX:
1194 		cq = &hw->mailboxq;
1195 		qtype = "Mailbox";
1196 		/* we are going to try to detect a malicious VF, so set the
1197 		 * state to begin detection
1198 		 */
1199 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1200 		break;
1201 	default:
1202 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1203 		return 0;
1204 	}
1205 
1206 	/* check for error indications - PF_xx_AxQLEN register layout for
1207 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1208 	 */
1209 	val = rd32(hw, cq->rq.len);
1210 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1211 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1212 		oldval = val;
1213 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1214 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1215 				qtype);
1216 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1217 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1218 				qtype);
1219 		}
1220 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1221 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1222 				qtype);
1223 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1224 			 PF_FW_ARQLEN_ARQCRIT_M);
1225 		if (oldval != val)
1226 			wr32(hw, cq->rq.len, val);
1227 	}
1228 
1229 	val = rd32(hw, cq->sq.len);
1230 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1231 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1232 		oldval = val;
1233 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1234 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1235 				qtype);
1236 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1237 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1238 				qtype);
1239 		}
1240 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1241 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1242 				qtype);
1243 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1244 			 PF_FW_ATQLEN_ATQCRIT_M);
1245 		if (oldval != val)
1246 			wr32(hw, cq->sq.len, val);
1247 	}
1248 
1249 	event.buf_len = cq->rq_buf_size;
1250 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1251 	if (!event.msg_buf)
1252 		return 0;
1253 
1254 	do {
1255 		enum ice_status ret;
1256 		u16 opcode;
1257 
1258 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1259 		if (ret == ICE_ERR_AQ_NO_WORK)
1260 			break;
1261 		if (ret) {
1262 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1263 				ice_stat_str(ret));
1264 			break;
1265 		}
1266 
1267 		opcode = le16_to_cpu(event.desc.opcode);
1268 
1269 		/* Notify any thread that might be waiting for this event */
1270 		ice_aq_check_events(pf, opcode, &event);
1271 
1272 		switch (opcode) {
1273 		case ice_aqc_opc_get_link_status:
1274 			if (ice_handle_link_event(pf, &event))
1275 				dev_err(dev, "Could not handle link event\n");
1276 			break;
1277 		case ice_aqc_opc_event_lan_overflow:
1278 			ice_vf_lan_overflow_event(pf, &event);
1279 			break;
1280 		case ice_mbx_opc_send_msg_to_pf:
1281 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1282 				ice_vc_process_vf_msg(pf, &event);
1283 			break;
1284 		case ice_aqc_opc_fw_logging:
1285 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1286 			break;
1287 		case ice_aqc_opc_lldp_set_mib_change:
1288 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1289 			break;
1290 		default:
1291 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1292 				qtype, opcode);
1293 			break;
1294 		}
1295 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1296 
1297 	kfree(event.msg_buf);
1298 
1299 	return pending && (i == ICE_DFLT_IRQ_WORK);
1300 }
1301 
1302 /**
1303  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1304  * @hw: pointer to hardware info
1305  * @cq: control queue information
1306  *
1307  * returns true if there are pending messages in a queue, false if there aren't
1308  */
1309 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1310 {
1311 	u16 ntu;
1312 
1313 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1314 	return cq->rq.next_to_clean != ntu;
1315 }
1316 
1317 /**
1318  * ice_clean_adminq_subtask - clean the AdminQ rings
1319  * @pf: board private structure
1320  */
1321 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1322 {
1323 	struct ice_hw *hw = &pf->hw;
1324 
1325 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1326 		return;
1327 
1328 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1329 		return;
1330 
1331 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1332 
1333 	/* There might be a situation where new messages arrive to a control
1334 	 * queue between processing the last message and clearing the
1335 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1336 	 * ice_ctrlq_pending) and process new messages if any.
1337 	 */
1338 	if (ice_ctrlq_pending(hw, &hw->adminq))
1339 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1340 
1341 	ice_flush(hw);
1342 }
1343 
1344 /**
1345  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1346  * @pf: board private structure
1347  */
1348 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1349 {
1350 	struct ice_hw *hw = &pf->hw;
1351 
1352 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1353 		return;
1354 
1355 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1356 		return;
1357 
1358 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1359 
1360 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1361 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1362 
1363 	ice_flush(hw);
1364 }
1365 
1366 /**
1367  * ice_service_task_schedule - schedule the service task to wake up
1368  * @pf: board private structure
1369  *
1370  * If not already scheduled, this puts the task into the work queue.
1371  */
1372 void ice_service_task_schedule(struct ice_pf *pf)
1373 {
1374 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1375 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1376 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1377 		queue_work(ice_wq, &pf->serv_task);
1378 }
1379 
1380 /**
1381  * ice_service_task_complete - finish up the service task
1382  * @pf: board private structure
1383  */
1384 static void ice_service_task_complete(struct ice_pf *pf)
1385 {
1386 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1387 
1388 	/* force memory (pf->state) to sync before next service task */
1389 	smp_mb__before_atomic();
1390 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1391 }
1392 
1393 /**
1394  * ice_service_task_stop - stop service task and cancel works
1395  * @pf: board private structure
1396  *
1397  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1398  * 1 otherwise.
1399  */
1400 static int ice_service_task_stop(struct ice_pf *pf)
1401 {
1402 	int ret;
1403 
1404 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1405 
1406 	if (pf->serv_tmr.function)
1407 		del_timer_sync(&pf->serv_tmr);
1408 	if (pf->serv_task.func)
1409 		cancel_work_sync(&pf->serv_task);
1410 
1411 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1412 	return ret;
1413 }
1414 
1415 /**
1416  * ice_service_task_restart - restart service task and schedule works
1417  * @pf: board private structure
1418  *
1419  * This function is needed for suspend and resume works (e.g WoL scenario)
1420  */
1421 static void ice_service_task_restart(struct ice_pf *pf)
1422 {
1423 	clear_bit(ICE_SERVICE_DIS, pf->state);
1424 	ice_service_task_schedule(pf);
1425 }
1426 
1427 /**
1428  * ice_service_timer - timer callback to schedule service task
1429  * @t: pointer to timer_list
1430  */
1431 static void ice_service_timer(struct timer_list *t)
1432 {
1433 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1434 
1435 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1436 	ice_service_task_schedule(pf);
1437 }
1438 
1439 /**
1440  * ice_handle_mdd_event - handle malicious driver detect event
1441  * @pf: pointer to the PF structure
1442  *
1443  * Called from service task. OICR interrupt handler indicates MDD event.
1444  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1445  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1446  * disable the queue, the PF can be configured to reset the VF using ethtool
1447  * private flag mdd-auto-reset-vf.
1448  */
1449 static void ice_handle_mdd_event(struct ice_pf *pf)
1450 {
1451 	struct device *dev = ice_pf_to_dev(pf);
1452 	struct ice_hw *hw = &pf->hw;
1453 	unsigned int i;
1454 	u32 reg;
1455 
1456 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1457 		/* Since the VF MDD event logging is rate limited, check if
1458 		 * there are pending MDD events.
1459 		 */
1460 		ice_print_vfs_mdd_events(pf);
1461 		return;
1462 	}
1463 
1464 	/* find what triggered an MDD event */
1465 	reg = rd32(hw, GL_MDET_TX_PQM);
1466 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1467 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1468 				GL_MDET_TX_PQM_PF_NUM_S;
1469 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1470 				GL_MDET_TX_PQM_VF_NUM_S;
1471 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1472 				GL_MDET_TX_PQM_MAL_TYPE_S;
1473 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1474 				GL_MDET_TX_PQM_QNUM_S);
1475 
1476 		if (netif_msg_tx_err(pf))
1477 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1478 				 event, queue, pf_num, vf_num);
1479 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1480 	}
1481 
1482 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1483 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1484 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1485 				GL_MDET_TX_TCLAN_PF_NUM_S;
1486 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1487 				GL_MDET_TX_TCLAN_VF_NUM_S;
1488 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1489 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1490 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1491 				GL_MDET_TX_TCLAN_QNUM_S);
1492 
1493 		if (netif_msg_tx_err(pf))
1494 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1495 				 event, queue, pf_num, vf_num);
1496 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1497 	}
1498 
1499 	reg = rd32(hw, GL_MDET_RX);
1500 	if (reg & GL_MDET_RX_VALID_M) {
1501 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1502 				GL_MDET_RX_PF_NUM_S;
1503 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1504 				GL_MDET_RX_VF_NUM_S;
1505 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1506 				GL_MDET_RX_MAL_TYPE_S;
1507 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1508 				GL_MDET_RX_QNUM_S);
1509 
1510 		if (netif_msg_rx_err(pf))
1511 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1512 				 event, queue, pf_num, vf_num);
1513 		wr32(hw, GL_MDET_RX, 0xffffffff);
1514 	}
1515 
1516 	/* check to see if this PF caused an MDD event */
1517 	reg = rd32(hw, PF_MDET_TX_PQM);
1518 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1519 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1520 		if (netif_msg_tx_err(pf))
1521 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1522 	}
1523 
1524 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1525 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1526 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1527 		if (netif_msg_tx_err(pf))
1528 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1529 	}
1530 
1531 	reg = rd32(hw, PF_MDET_RX);
1532 	if (reg & PF_MDET_RX_VALID_M) {
1533 		wr32(hw, PF_MDET_RX, 0xFFFF);
1534 		if (netif_msg_rx_err(pf))
1535 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1536 	}
1537 
1538 	/* Check to see if one of the VFs caused an MDD event, and then
1539 	 * increment counters and set print pending
1540 	 */
1541 	ice_for_each_vf(pf, i) {
1542 		struct ice_vf *vf = &pf->vf[i];
1543 
1544 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1545 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1546 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1547 			vf->mdd_tx_events.count++;
1548 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1549 			if (netif_msg_tx_err(pf))
1550 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1551 					 i);
1552 		}
1553 
1554 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1555 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1556 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1557 			vf->mdd_tx_events.count++;
1558 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1559 			if (netif_msg_tx_err(pf))
1560 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1561 					 i);
1562 		}
1563 
1564 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1565 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1566 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1567 			vf->mdd_tx_events.count++;
1568 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1569 			if (netif_msg_tx_err(pf))
1570 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1571 					 i);
1572 		}
1573 
1574 		reg = rd32(hw, VP_MDET_RX(i));
1575 		if (reg & VP_MDET_RX_VALID_M) {
1576 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1577 			vf->mdd_rx_events.count++;
1578 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1579 			if (netif_msg_rx_err(pf))
1580 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1581 					 i);
1582 
1583 			/* Since the queue is disabled on VF Rx MDD events, the
1584 			 * PF can be configured to reset the VF through ethtool
1585 			 * private flag mdd-auto-reset-vf.
1586 			 */
1587 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1588 				/* VF MDD event counters will be cleared by
1589 				 * reset, so print the event prior to reset.
1590 				 */
1591 				ice_print_vf_rx_mdd_event(vf);
1592 				ice_reset_vf(&pf->vf[i], false);
1593 			}
1594 		}
1595 	}
1596 
1597 	ice_print_vfs_mdd_events(pf);
1598 }
1599 
1600 /**
1601  * ice_force_phys_link_state - Force the physical link state
1602  * @vsi: VSI to force the physical link state to up/down
1603  * @link_up: true/false indicates to set the physical link to up/down
1604  *
1605  * Force the physical link state by getting the current PHY capabilities from
1606  * hardware and setting the PHY config based on the determined capabilities. If
1607  * link changes a link event will be triggered because both the Enable Automatic
1608  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1609  *
1610  * Returns 0 on success, negative on failure
1611  */
1612 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1613 {
1614 	struct ice_aqc_get_phy_caps_data *pcaps;
1615 	struct ice_aqc_set_phy_cfg_data *cfg;
1616 	struct ice_port_info *pi;
1617 	struct device *dev;
1618 	int retcode;
1619 
1620 	if (!vsi || !vsi->port_info || !vsi->back)
1621 		return -EINVAL;
1622 	if (vsi->type != ICE_VSI_PF)
1623 		return 0;
1624 
1625 	dev = ice_pf_to_dev(vsi->back);
1626 
1627 	pi = vsi->port_info;
1628 
1629 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1630 	if (!pcaps)
1631 		return -ENOMEM;
1632 
1633 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1634 				      NULL);
1635 	if (retcode) {
1636 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1637 			vsi->vsi_num, retcode);
1638 		retcode = -EIO;
1639 		goto out;
1640 	}
1641 
1642 	/* No change in link */
1643 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1644 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1645 		goto out;
1646 
1647 	/* Use the current user PHY configuration. The current user PHY
1648 	 * configuration is initialized during probe from PHY capabilities
1649 	 * software mode, and updated on set PHY configuration.
1650 	 */
1651 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1652 	if (!cfg) {
1653 		retcode = -ENOMEM;
1654 		goto out;
1655 	}
1656 
1657 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1658 	if (link_up)
1659 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1660 	else
1661 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1662 
1663 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1664 	if (retcode) {
1665 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1666 			vsi->vsi_num, retcode);
1667 		retcode = -EIO;
1668 	}
1669 
1670 	kfree(cfg);
1671 out:
1672 	kfree(pcaps);
1673 	return retcode;
1674 }
1675 
1676 /**
1677  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1678  * @pi: port info structure
1679  *
1680  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1681  */
1682 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1683 {
1684 	struct ice_aqc_get_phy_caps_data *pcaps;
1685 	struct ice_pf *pf = pi->hw->back;
1686 	enum ice_status status;
1687 	int err = 0;
1688 
1689 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1690 	if (!pcaps)
1691 		return -ENOMEM;
1692 
1693 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1694 				     NULL);
1695 
1696 	if (status) {
1697 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1698 		err = -EIO;
1699 		goto out;
1700 	}
1701 
1702 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1703 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1704 
1705 out:
1706 	kfree(pcaps);
1707 	return err;
1708 }
1709 
1710 /**
1711  * ice_init_link_dflt_override - Initialize link default override
1712  * @pi: port info structure
1713  *
1714  * Initialize link default override and PHY total port shutdown during probe
1715  */
1716 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1717 {
1718 	struct ice_link_default_override_tlv *ldo;
1719 	struct ice_pf *pf = pi->hw->back;
1720 
1721 	ldo = &pf->link_dflt_override;
1722 	if (ice_get_link_default_override(ldo, pi))
1723 		return;
1724 
1725 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1726 		return;
1727 
1728 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1729 	 * ethtool private flag) for ports with Port Disable bit set.
1730 	 */
1731 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1732 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1733 }
1734 
1735 /**
1736  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1737  * @pi: port info structure
1738  *
1739  * If default override is enabled, initialize the user PHY cfg speed and FEC
1740  * settings using the default override mask from the NVM.
1741  *
1742  * The PHY should only be configured with the default override settings the
1743  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1744  * is used to indicate that the user PHY cfg default override is initialized
1745  * and the PHY has not been configured with the default override settings. The
1746  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1747  * configured.
1748  *
1749  * This function should be called only if the FW doesn't support default
1750  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1751  */
1752 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1753 {
1754 	struct ice_link_default_override_tlv *ldo;
1755 	struct ice_aqc_set_phy_cfg_data *cfg;
1756 	struct ice_phy_info *phy = &pi->phy;
1757 	struct ice_pf *pf = pi->hw->back;
1758 
1759 	ldo = &pf->link_dflt_override;
1760 
1761 	/* If link default override is enabled, use to mask NVM PHY capabilities
1762 	 * for speed and FEC default configuration.
1763 	 */
1764 	cfg = &phy->curr_user_phy_cfg;
1765 
1766 	if (ldo->phy_type_low || ldo->phy_type_high) {
1767 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1768 				    cpu_to_le64(ldo->phy_type_low);
1769 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1770 				     cpu_to_le64(ldo->phy_type_high);
1771 	}
1772 	cfg->link_fec_opt = ldo->fec_options;
1773 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1774 
1775 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1776 }
1777 
1778 /**
1779  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1780  * @pi: port info structure
1781  *
1782  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1783  * mode to default. The PHY defaults are from get PHY capabilities topology
1784  * with media so call when media is first available. An error is returned if
1785  * called when media is not available. The PHY initialization completed state is
1786  * set here.
1787  *
1788  * These configurations are used when setting PHY
1789  * configuration. The user PHY configuration is updated on set PHY
1790  * configuration. Returns 0 on success, negative on failure
1791  */
1792 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1793 {
1794 	struct ice_aqc_get_phy_caps_data *pcaps;
1795 	struct ice_phy_info *phy = &pi->phy;
1796 	struct ice_pf *pf = pi->hw->back;
1797 	enum ice_status status;
1798 	int err = 0;
1799 
1800 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1801 		return -EIO;
1802 
1803 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1804 	if (!pcaps)
1805 		return -ENOMEM;
1806 
1807 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1808 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1809 					     pcaps, NULL);
1810 	else
1811 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1812 					     pcaps, NULL);
1813 	if (status) {
1814 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1815 		err = -EIO;
1816 		goto err_out;
1817 	}
1818 
1819 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1820 
1821 	/* check if lenient mode is supported and enabled */
1822 	if (ice_fw_supports_link_override(pi->hw) &&
1823 	    !(pcaps->module_compliance_enforcement &
1824 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1825 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1826 
1827 		/* if the FW supports default PHY configuration mode, then the driver
1828 		 * does not have to apply link override settings. If not,
1829 		 * initialize user PHY configuration with link override values
1830 		 */
1831 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1832 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1833 			ice_init_phy_cfg_dflt_override(pi);
1834 			goto out;
1835 		}
1836 	}
1837 
1838 	/* if link default override is not enabled, set user flow control and
1839 	 * FEC settings based on what get_phy_caps returned
1840 	 */
1841 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1842 						      pcaps->link_fec_options);
1843 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1844 
1845 out:
1846 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1847 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1848 err_out:
1849 	kfree(pcaps);
1850 	return err;
1851 }
1852 
1853 /**
1854  * ice_configure_phy - configure PHY
1855  * @vsi: VSI of PHY
1856  *
1857  * Set the PHY configuration. If the current PHY configuration is the same as
1858  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1859  * configure the based get PHY capabilities for topology with media.
1860  */
1861 static int ice_configure_phy(struct ice_vsi *vsi)
1862 {
1863 	struct device *dev = ice_pf_to_dev(vsi->back);
1864 	struct ice_port_info *pi = vsi->port_info;
1865 	struct ice_aqc_get_phy_caps_data *pcaps;
1866 	struct ice_aqc_set_phy_cfg_data *cfg;
1867 	struct ice_phy_info *phy = &pi->phy;
1868 	struct ice_pf *pf = vsi->back;
1869 	enum ice_status status;
1870 	int err = 0;
1871 
1872 	/* Ensure we have media as we cannot configure a medialess port */
1873 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1874 		return -EPERM;
1875 
1876 	ice_print_topo_conflict(vsi);
1877 
1878 	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1879 		return -EPERM;
1880 
1881 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1882 		return ice_force_phys_link_state(vsi, true);
1883 
1884 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1885 	if (!pcaps)
1886 		return -ENOMEM;
1887 
1888 	/* Get current PHY config */
1889 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1890 				     NULL);
1891 	if (status) {
1892 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1893 			vsi->vsi_num, ice_stat_str(status));
1894 		err = -EIO;
1895 		goto done;
1896 	}
1897 
1898 	/* If PHY enable link is configured and configuration has not changed,
1899 	 * there's nothing to do
1900 	 */
1901 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1902 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1903 		goto done;
1904 
1905 	/* Use PHY topology as baseline for configuration */
1906 	memset(pcaps, 0, sizeof(*pcaps));
1907 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1908 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1909 					     pcaps, NULL);
1910 	else
1911 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1912 					     pcaps, NULL);
1913 	if (status) {
1914 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
1915 			vsi->vsi_num, ice_stat_str(status));
1916 		err = -EIO;
1917 		goto done;
1918 	}
1919 
1920 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1921 	if (!cfg) {
1922 		err = -ENOMEM;
1923 		goto done;
1924 	}
1925 
1926 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1927 
1928 	/* Speed - If default override pending, use curr_user_phy_cfg set in
1929 	 * ice_init_phy_user_cfg_ldo.
1930 	 */
1931 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1932 			       vsi->back->state)) {
1933 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
1934 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
1935 	} else {
1936 		u64 phy_low = 0, phy_high = 0;
1937 
1938 		ice_update_phy_type(&phy_low, &phy_high,
1939 				    pi->phy.curr_user_speed_req);
1940 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1941 		cfg->phy_type_high = pcaps->phy_type_high &
1942 				     cpu_to_le64(phy_high);
1943 	}
1944 
1945 	/* Can't provide what was requested; use PHY capabilities */
1946 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
1947 		cfg->phy_type_low = pcaps->phy_type_low;
1948 		cfg->phy_type_high = pcaps->phy_type_high;
1949 	}
1950 
1951 	/* FEC */
1952 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
1953 
1954 	/* Can't provide what was requested; use PHY capabilities */
1955 	if (cfg->link_fec_opt !=
1956 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
1957 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1958 		cfg->link_fec_opt = pcaps->link_fec_options;
1959 	}
1960 
1961 	/* Flow Control - always supported; no need to check against
1962 	 * capabilities
1963 	 */
1964 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
1965 
1966 	/* Enable link and link update */
1967 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
1968 
1969 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
1970 	if (status) {
1971 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
1972 			vsi->vsi_num, ice_stat_str(status));
1973 		err = -EIO;
1974 	}
1975 
1976 	kfree(cfg);
1977 done:
1978 	kfree(pcaps);
1979 	return err;
1980 }
1981 
1982 /**
1983  * ice_check_media_subtask - Check for media
1984  * @pf: pointer to PF struct
1985  *
1986  * If media is available, then initialize PHY user configuration if it is not
1987  * been, and configure the PHY if the interface is up.
1988  */
1989 static void ice_check_media_subtask(struct ice_pf *pf)
1990 {
1991 	struct ice_port_info *pi;
1992 	struct ice_vsi *vsi;
1993 	int err;
1994 
1995 	/* No need to check for media if it's already present */
1996 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
1997 		return;
1998 
1999 	vsi = ice_get_main_vsi(pf);
2000 	if (!vsi)
2001 		return;
2002 
2003 	/* Refresh link info and check if media is present */
2004 	pi = vsi->port_info;
2005 	err = ice_update_link_info(pi);
2006 	if (err)
2007 		return;
2008 
2009 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2010 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2011 			ice_init_phy_user_cfg(pi);
2012 
2013 		/* PHY settings are reset on media insertion, reconfigure
2014 		 * PHY to preserve settings.
2015 		 */
2016 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2017 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2018 			return;
2019 
2020 		err = ice_configure_phy(vsi);
2021 		if (!err)
2022 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2023 
2024 		/* A Link Status Event will be generated; the event handler
2025 		 * will complete bringing the interface up
2026 		 */
2027 	}
2028 }
2029 
2030 /**
2031  * ice_service_task - manage and run subtasks
2032  * @work: pointer to work_struct contained by the PF struct
2033  */
2034 static void ice_service_task(struct work_struct *work)
2035 {
2036 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2037 	unsigned long start_time = jiffies;
2038 
2039 	/* subtasks */
2040 
2041 	/* process reset requests first */
2042 	ice_reset_subtask(pf);
2043 
2044 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2045 	if (ice_is_reset_in_progress(pf->state) ||
2046 	    test_bit(ICE_SUSPENDED, pf->state) ||
2047 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2048 		ice_service_task_complete(pf);
2049 		return;
2050 	}
2051 
2052 	ice_clean_adminq_subtask(pf);
2053 	ice_check_media_subtask(pf);
2054 	ice_check_for_hang_subtask(pf);
2055 	ice_sync_fltr_subtask(pf);
2056 	ice_handle_mdd_event(pf);
2057 	ice_watchdog_subtask(pf);
2058 
2059 	if (ice_is_safe_mode(pf)) {
2060 		ice_service_task_complete(pf);
2061 		return;
2062 	}
2063 
2064 	ice_process_vflr_event(pf);
2065 	ice_clean_mailboxq_subtask(pf);
2066 	ice_sync_arfs_fltrs(pf);
2067 	ice_flush_fdir_ctx(pf);
2068 
2069 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2070 	ice_service_task_complete(pf);
2071 
2072 	/* If the tasks have taken longer than one service timer period
2073 	 * or there is more work to be done, reset the service timer to
2074 	 * schedule the service task now.
2075 	 */
2076 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2077 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2078 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2079 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2080 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2081 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2082 		mod_timer(&pf->serv_tmr, jiffies);
2083 }
2084 
2085 /**
2086  * ice_set_ctrlq_len - helper function to set controlq length
2087  * @hw: pointer to the HW instance
2088  */
2089 static void ice_set_ctrlq_len(struct ice_hw *hw)
2090 {
2091 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2092 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2093 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2094 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2095 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2096 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2097 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2098 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2099 }
2100 
2101 /**
2102  * ice_schedule_reset - schedule a reset
2103  * @pf: board private structure
2104  * @reset: reset being requested
2105  */
2106 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2107 {
2108 	struct device *dev = ice_pf_to_dev(pf);
2109 
2110 	/* bail out if earlier reset has failed */
2111 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2112 		dev_dbg(dev, "earlier reset has failed\n");
2113 		return -EIO;
2114 	}
2115 	/* bail if reset/recovery already in progress */
2116 	if (ice_is_reset_in_progress(pf->state)) {
2117 		dev_dbg(dev, "Reset already in progress\n");
2118 		return -EBUSY;
2119 	}
2120 
2121 	switch (reset) {
2122 	case ICE_RESET_PFR:
2123 		set_bit(ICE_PFR_REQ, pf->state);
2124 		break;
2125 	case ICE_RESET_CORER:
2126 		set_bit(ICE_CORER_REQ, pf->state);
2127 		break;
2128 	case ICE_RESET_GLOBR:
2129 		set_bit(ICE_GLOBR_REQ, pf->state);
2130 		break;
2131 	default:
2132 		return -EINVAL;
2133 	}
2134 
2135 	ice_service_task_schedule(pf);
2136 	return 0;
2137 }
2138 
2139 /**
2140  * ice_irq_affinity_notify - Callback for affinity changes
2141  * @notify: context as to what irq was changed
2142  * @mask: the new affinity mask
2143  *
2144  * This is a callback function used by the irq_set_affinity_notifier function
2145  * so that we may register to receive changes to the irq affinity masks.
2146  */
2147 static void
2148 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2149 			const cpumask_t *mask)
2150 {
2151 	struct ice_q_vector *q_vector =
2152 		container_of(notify, struct ice_q_vector, affinity_notify);
2153 
2154 	cpumask_copy(&q_vector->affinity_mask, mask);
2155 }
2156 
2157 /**
2158  * ice_irq_affinity_release - Callback for affinity notifier release
2159  * @ref: internal core kernel usage
2160  *
2161  * This is a callback function used by the irq_set_affinity_notifier function
2162  * to inform the current notification subscriber that they will no longer
2163  * receive notifications.
2164  */
2165 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2166 
2167 /**
2168  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2169  * @vsi: the VSI being configured
2170  */
2171 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2172 {
2173 	struct ice_hw *hw = &vsi->back->hw;
2174 	int i;
2175 
2176 	ice_for_each_q_vector(vsi, i)
2177 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2178 
2179 	ice_flush(hw);
2180 	return 0;
2181 }
2182 
2183 /**
2184  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2185  * @vsi: the VSI being configured
2186  * @basename: name for the vector
2187  */
2188 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2189 {
2190 	int q_vectors = vsi->num_q_vectors;
2191 	struct ice_pf *pf = vsi->back;
2192 	int base = vsi->base_vector;
2193 	struct device *dev;
2194 	int rx_int_idx = 0;
2195 	int tx_int_idx = 0;
2196 	int vector, err;
2197 	int irq_num;
2198 
2199 	dev = ice_pf_to_dev(pf);
2200 	for (vector = 0; vector < q_vectors; vector++) {
2201 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2202 
2203 		irq_num = pf->msix_entries[base + vector].vector;
2204 
2205 		if (q_vector->tx.ring && q_vector->rx.ring) {
2206 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2207 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2208 			tx_int_idx++;
2209 		} else if (q_vector->rx.ring) {
2210 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2211 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2212 		} else if (q_vector->tx.ring) {
2213 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2214 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2215 		} else {
2216 			/* skip this unused q_vector */
2217 			continue;
2218 		}
2219 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2220 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2221 					       IRQF_SHARED, q_vector->name,
2222 					       q_vector);
2223 		else
2224 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2225 					       0, q_vector->name, q_vector);
2226 		if (err) {
2227 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2228 				   err);
2229 			goto free_q_irqs;
2230 		}
2231 
2232 		/* register for affinity change notifications */
2233 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2234 			struct irq_affinity_notify *affinity_notify;
2235 
2236 			affinity_notify = &q_vector->affinity_notify;
2237 			affinity_notify->notify = ice_irq_affinity_notify;
2238 			affinity_notify->release = ice_irq_affinity_release;
2239 			irq_set_affinity_notifier(irq_num, affinity_notify);
2240 		}
2241 
2242 		/* assign the mask for this irq */
2243 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2244 	}
2245 
2246 	vsi->irqs_ready = true;
2247 	return 0;
2248 
2249 free_q_irqs:
2250 	while (vector) {
2251 		vector--;
2252 		irq_num = pf->msix_entries[base + vector].vector;
2253 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2254 			irq_set_affinity_notifier(irq_num, NULL);
2255 		irq_set_affinity_hint(irq_num, NULL);
2256 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2257 	}
2258 	return err;
2259 }
2260 
2261 /**
2262  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2263  * @vsi: VSI to setup Tx rings used by XDP
2264  *
2265  * Return 0 on success and negative value on error
2266  */
2267 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2268 {
2269 	struct device *dev = ice_pf_to_dev(vsi->back);
2270 	int i;
2271 
2272 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2273 		u16 xdp_q_idx = vsi->alloc_txq + i;
2274 		struct ice_ring *xdp_ring;
2275 
2276 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2277 
2278 		if (!xdp_ring)
2279 			goto free_xdp_rings;
2280 
2281 		xdp_ring->q_index = xdp_q_idx;
2282 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2283 		xdp_ring->ring_active = false;
2284 		xdp_ring->vsi = vsi;
2285 		xdp_ring->netdev = NULL;
2286 		xdp_ring->dev = dev;
2287 		xdp_ring->count = vsi->num_tx_desc;
2288 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2289 		if (ice_setup_tx_ring(xdp_ring))
2290 			goto free_xdp_rings;
2291 		ice_set_ring_xdp(xdp_ring);
2292 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2293 	}
2294 
2295 	return 0;
2296 
2297 free_xdp_rings:
2298 	for (; i >= 0; i--)
2299 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2300 			ice_free_tx_ring(vsi->xdp_rings[i]);
2301 	return -ENOMEM;
2302 }
2303 
2304 /**
2305  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2306  * @vsi: VSI to set the bpf prog on
2307  * @prog: the bpf prog pointer
2308  */
2309 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2310 {
2311 	struct bpf_prog *old_prog;
2312 	int i;
2313 
2314 	old_prog = xchg(&vsi->xdp_prog, prog);
2315 	if (old_prog)
2316 		bpf_prog_put(old_prog);
2317 
2318 	ice_for_each_rxq(vsi, i)
2319 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2320 }
2321 
2322 /**
2323  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2324  * @vsi: VSI to bring up Tx rings used by XDP
2325  * @prog: bpf program that will be assigned to VSI
2326  *
2327  * Return 0 on success and negative value on error
2328  */
2329 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2330 {
2331 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2332 	int xdp_rings_rem = vsi->num_xdp_txq;
2333 	struct ice_pf *pf = vsi->back;
2334 	struct ice_qs_cfg xdp_qs_cfg = {
2335 		.qs_mutex = &pf->avail_q_mutex,
2336 		.pf_map = pf->avail_txqs,
2337 		.pf_map_size = pf->max_pf_txqs,
2338 		.q_count = vsi->num_xdp_txq,
2339 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2340 		.vsi_map = vsi->txq_map,
2341 		.vsi_map_offset = vsi->alloc_txq,
2342 		.mapping_mode = ICE_VSI_MAP_CONTIG
2343 	};
2344 	enum ice_status status;
2345 	struct device *dev;
2346 	int i, v_idx;
2347 
2348 	dev = ice_pf_to_dev(pf);
2349 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2350 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2351 	if (!vsi->xdp_rings)
2352 		return -ENOMEM;
2353 
2354 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2355 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2356 		goto err_map_xdp;
2357 
2358 	if (ice_xdp_alloc_setup_rings(vsi))
2359 		goto clear_xdp_rings;
2360 
2361 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2362 	ice_for_each_q_vector(vsi, v_idx) {
2363 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2364 		int xdp_rings_per_v, q_id, q_base;
2365 
2366 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2367 					       vsi->num_q_vectors - v_idx);
2368 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2369 
2370 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2371 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2372 
2373 			xdp_ring->q_vector = q_vector;
2374 			xdp_ring->next = q_vector->tx.ring;
2375 			q_vector->tx.ring = xdp_ring;
2376 		}
2377 		xdp_rings_rem -= xdp_rings_per_v;
2378 	}
2379 
2380 	/* omit the scheduler update if in reset path; XDP queues will be
2381 	 * taken into account at the end of ice_vsi_rebuild, where
2382 	 * ice_cfg_vsi_lan is being called
2383 	 */
2384 	if (ice_is_reset_in_progress(pf->state))
2385 		return 0;
2386 
2387 	/* tell the Tx scheduler that right now we have
2388 	 * additional queues
2389 	 */
2390 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2391 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2392 
2393 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2394 				 max_txqs);
2395 	if (status) {
2396 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2397 			ice_stat_str(status));
2398 		goto clear_xdp_rings;
2399 	}
2400 	ice_vsi_assign_bpf_prog(vsi, prog);
2401 
2402 	return 0;
2403 clear_xdp_rings:
2404 	for (i = 0; i < vsi->num_xdp_txq; i++)
2405 		if (vsi->xdp_rings[i]) {
2406 			kfree_rcu(vsi->xdp_rings[i], rcu);
2407 			vsi->xdp_rings[i] = NULL;
2408 		}
2409 
2410 err_map_xdp:
2411 	mutex_lock(&pf->avail_q_mutex);
2412 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2413 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2414 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2415 	}
2416 	mutex_unlock(&pf->avail_q_mutex);
2417 
2418 	devm_kfree(dev, vsi->xdp_rings);
2419 	return -ENOMEM;
2420 }
2421 
2422 /**
2423  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2424  * @vsi: VSI to remove XDP rings
2425  *
2426  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2427  * resources
2428  */
2429 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2430 {
2431 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2432 	struct ice_pf *pf = vsi->back;
2433 	int i, v_idx;
2434 
2435 	/* q_vectors are freed in reset path so there's no point in detaching
2436 	 * rings; in case of rebuild being triggered not from reset bits
2437 	 * in pf->state won't be set, so additionally check first q_vector
2438 	 * against NULL
2439 	 */
2440 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2441 		goto free_qmap;
2442 
2443 	ice_for_each_q_vector(vsi, v_idx) {
2444 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2445 		struct ice_ring *ring;
2446 
2447 		ice_for_each_ring(ring, q_vector->tx)
2448 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2449 				break;
2450 
2451 		/* restore the value of last node prior to XDP setup */
2452 		q_vector->tx.ring = ring;
2453 	}
2454 
2455 free_qmap:
2456 	mutex_lock(&pf->avail_q_mutex);
2457 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2458 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2459 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2460 	}
2461 	mutex_unlock(&pf->avail_q_mutex);
2462 
2463 	for (i = 0; i < vsi->num_xdp_txq; i++)
2464 		if (vsi->xdp_rings[i]) {
2465 			if (vsi->xdp_rings[i]->desc)
2466 				ice_free_tx_ring(vsi->xdp_rings[i]);
2467 			kfree_rcu(vsi->xdp_rings[i], rcu);
2468 			vsi->xdp_rings[i] = NULL;
2469 		}
2470 
2471 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2472 	vsi->xdp_rings = NULL;
2473 
2474 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2475 		return 0;
2476 
2477 	ice_vsi_assign_bpf_prog(vsi, NULL);
2478 
2479 	/* notify Tx scheduler that we destroyed XDP queues and bring
2480 	 * back the old number of child nodes
2481 	 */
2482 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2483 		max_txqs[i] = vsi->num_txq;
2484 
2485 	/* change number of XDP Tx queues to 0 */
2486 	vsi->num_xdp_txq = 0;
2487 
2488 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2489 			       max_txqs);
2490 }
2491 
2492 /**
2493  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2494  * @vsi: VSI to schedule napi on
2495  */
2496 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2497 {
2498 	int i;
2499 
2500 	ice_for_each_rxq(vsi, i) {
2501 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2502 
2503 		if (rx_ring->xsk_pool)
2504 			napi_schedule(&rx_ring->q_vector->napi);
2505 	}
2506 }
2507 
2508 /**
2509  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2510  * @vsi: VSI to setup XDP for
2511  * @prog: XDP program
2512  * @extack: netlink extended ack
2513  */
2514 static int
2515 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2516 		   struct netlink_ext_ack *extack)
2517 {
2518 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2519 	bool if_running = netif_running(vsi->netdev);
2520 	int ret = 0, xdp_ring_err = 0;
2521 
2522 	if (frame_size > vsi->rx_buf_len) {
2523 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2524 		return -EOPNOTSUPP;
2525 	}
2526 
2527 	/* need to stop netdev while setting up the program for Rx rings */
2528 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2529 		ret = ice_down(vsi);
2530 		if (ret) {
2531 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2532 			return ret;
2533 		}
2534 	}
2535 
2536 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2537 		vsi->num_xdp_txq = vsi->alloc_rxq;
2538 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2539 		if (xdp_ring_err)
2540 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2541 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2542 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2543 		if (xdp_ring_err)
2544 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2545 	} else {
2546 		ice_vsi_assign_bpf_prog(vsi, prog);
2547 	}
2548 
2549 	if (if_running)
2550 		ret = ice_up(vsi);
2551 
2552 	if (!ret && prog)
2553 		ice_vsi_rx_napi_schedule(vsi);
2554 
2555 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2556 }
2557 
2558 /**
2559  * ice_xdp_safe_mode - XDP handler for safe mode
2560  * @dev: netdevice
2561  * @xdp: XDP command
2562  */
2563 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2564 			     struct netdev_bpf *xdp)
2565 {
2566 	NL_SET_ERR_MSG_MOD(xdp->extack,
2567 			   "Please provide working DDP firmware package in order to use XDP\n"
2568 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2569 	return -EOPNOTSUPP;
2570 }
2571 
2572 /**
2573  * ice_xdp - implements XDP handler
2574  * @dev: netdevice
2575  * @xdp: XDP command
2576  */
2577 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2578 {
2579 	struct ice_netdev_priv *np = netdev_priv(dev);
2580 	struct ice_vsi *vsi = np->vsi;
2581 
2582 	if (vsi->type != ICE_VSI_PF) {
2583 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2584 		return -EINVAL;
2585 	}
2586 
2587 	switch (xdp->command) {
2588 	case XDP_SETUP_PROG:
2589 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2590 	case XDP_SETUP_XSK_POOL:
2591 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2592 					  xdp->xsk.queue_id);
2593 	default:
2594 		return -EINVAL;
2595 	}
2596 }
2597 
2598 /**
2599  * ice_ena_misc_vector - enable the non-queue interrupts
2600  * @pf: board private structure
2601  */
2602 static void ice_ena_misc_vector(struct ice_pf *pf)
2603 {
2604 	struct ice_hw *hw = &pf->hw;
2605 	u32 val;
2606 
2607 	/* Disable anti-spoof detection interrupt to prevent spurious event
2608 	 * interrupts during a function reset. Anti-spoof functionally is
2609 	 * still supported.
2610 	 */
2611 	val = rd32(hw, GL_MDCK_TX_TDPU);
2612 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2613 	wr32(hw, GL_MDCK_TX_TDPU, val);
2614 
2615 	/* clear things first */
2616 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2617 	rd32(hw, PFINT_OICR);		/* read to clear */
2618 
2619 	val = (PFINT_OICR_ECC_ERR_M |
2620 	       PFINT_OICR_MAL_DETECT_M |
2621 	       PFINT_OICR_GRST_M |
2622 	       PFINT_OICR_PCI_EXCEPTION_M |
2623 	       PFINT_OICR_VFLR_M |
2624 	       PFINT_OICR_HMC_ERR_M |
2625 	       PFINT_OICR_PE_CRITERR_M);
2626 
2627 	wr32(hw, PFINT_OICR_ENA, val);
2628 
2629 	/* SW_ITR_IDX = 0, but don't change INTENA */
2630 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2631 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2632 }
2633 
2634 /**
2635  * ice_misc_intr - misc interrupt handler
2636  * @irq: interrupt number
2637  * @data: pointer to a q_vector
2638  */
2639 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2640 {
2641 	struct ice_pf *pf = (struct ice_pf *)data;
2642 	struct ice_hw *hw = &pf->hw;
2643 	irqreturn_t ret = IRQ_NONE;
2644 	struct device *dev;
2645 	u32 oicr, ena_mask;
2646 
2647 	dev = ice_pf_to_dev(pf);
2648 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2649 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2650 
2651 	oicr = rd32(hw, PFINT_OICR);
2652 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2653 
2654 	if (oicr & PFINT_OICR_SWINT_M) {
2655 		ena_mask &= ~PFINT_OICR_SWINT_M;
2656 		pf->sw_int_count++;
2657 	}
2658 
2659 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2660 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2661 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2662 	}
2663 	if (oicr & PFINT_OICR_VFLR_M) {
2664 		/* disable any further VFLR event notifications */
2665 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2666 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2667 
2668 			reg &= ~PFINT_OICR_VFLR_M;
2669 			wr32(hw, PFINT_OICR_ENA, reg);
2670 		} else {
2671 			ena_mask &= ~PFINT_OICR_VFLR_M;
2672 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2673 		}
2674 	}
2675 
2676 	if (oicr & PFINT_OICR_GRST_M) {
2677 		u32 reset;
2678 
2679 		/* we have a reset warning */
2680 		ena_mask &= ~PFINT_OICR_GRST_M;
2681 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2682 			GLGEN_RSTAT_RESET_TYPE_S;
2683 
2684 		if (reset == ICE_RESET_CORER)
2685 			pf->corer_count++;
2686 		else if (reset == ICE_RESET_GLOBR)
2687 			pf->globr_count++;
2688 		else if (reset == ICE_RESET_EMPR)
2689 			pf->empr_count++;
2690 		else
2691 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2692 
2693 		/* If a reset cycle isn't already in progress, we set a bit in
2694 		 * pf->state so that the service task can start a reset/rebuild.
2695 		 * We also make note of which reset happened so that peer
2696 		 * devices/drivers can be informed.
2697 		 */
2698 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2699 			if (reset == ICE_RESET_CORER)
2700 				set_bit(ICE_CORER_RECV, pf->state);
2701 			else if (reset == ICE_RESET_GLOBR)
2702 				set_bit(ICE_GLOBR_RECV, pf->state);
2703 			else
2704 				set_bit(ICE_EMPR_RECV, pf->state);
2705 
2706 			/* There are couple of different bits at play here.
2707 			 * hw->reset_ongoing indicates whether the hardware is
2708 			 * in reset. This is set to true when a reset interrupt
2709 			 * is received and set back to false after the driver
2710 			 * has determined that the hardware is out of reset.
2711 			 *
2712 			 * ICE_RESET_OICR_RECV in pf->state indicates
2713 			 * that a post reset rebuild is required before the
2714 			 * driver is operational again. This is set above.
2715 			 *
2716 			 * As this is the start of the reset/rebuild cycle, set
2717 			 * both to indicate that.
2718 			 */
2719 			hw->reset_ongoing = true;
2720 		}
2721 	}
2722 
2723 	if (oicr & PFINT_OICR_HMC_ERR_M) {
2724 		ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2725 		dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
2726 			rd32(hw, PFHMC_ERRORINFO),
2727 			rd32(hw, PFHMC_ERRORDATA));
2728 	}
2729 
2730 	/* Report any remaining unexpected interrupts */
2731 	oicr &= ena_mask;
2732 	if (oicr) {
2733 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2734 		/* If a critical error is pending there is no choice but to
2735 		 * reset the device.
2736 		 */
2737 		if (oicr & (PFINT_OICR_PE_CRITERR_M |
2738 			    PFINT_OICR_PCI_EXCEPTION_M |
2739 			    PFINT_OICR_ECC_ERR_M)) {
2740 			set_bit(ICE_PFR_REQ, pf->state);
2741 			ice_service_task_schedule(pf);
2742 		}
2743 	}
2744 	ret = IRQ_HANDLED;
2745 
2746 	ice_service_task_schedule(pf);
2747 	ice_irq_dynamic_ena(hw, NULL, NULL);
2748 
2749 	return ret;
2750 }
2751 
2752 /**
2753  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2754  * @hw: pointer to HW structure
2755  */
2756 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2757 {
2758 	/* disable Admin queue Interrupt causes */
2759 	wr32(hw, PFINT_FW_CTL,
2760 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2761 
2762 	/* disable Mailbox queue Interrupt causes */
2763 	wr32(hw, PFINT_MBX_CTL,
2764 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2765 
2766 	/* disable Control queue Interrupt causes */
2767 	wr32(hw, PFINT_OICR_CTL,
2768 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2769 
2770 	ice_flush(hw);
2771 }
2772 
2773 /**
2774  * ice_free_irq_msix_misc - Unroll misc vector setup
2775  * @pf: board private structure
2776  */
2777 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2778 {
2779 	struct ice_hw *hw = &pf->hw;
2780 
2781 	ice_dis_ctrlq_interrupts(hw);
2782 
2783 	/* disable OICR interrupt */
2784 	wr32(hw, PFINT_OICR_ENA, 0);
2785 	ice_flush(hw);
2786 
2787 	if (pf->msix_entries) {
2788 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2789 		devm_free_irq(ice_pf_to_dev(pf),
2790 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2791 	}
2792 
2793 	pf->num_avail_sw_msix += 1;
2794 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2795 }
2796 
2797 /**
2798  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2799  * @hw: pointer to HW structure
2800  * @reg_idx: HW vector index to associate the control queue interrupts with
2801  */
2802 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2803 {
2804 	u32 val;
2805 
2806 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2807 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2808 	wr32(hw, PFINT_OICR_CTL, val);
2809 
2810 	/* enable Admin queue Interrupt causes */
2811 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2812 	       PFINT_FW_CTL_CAUSE_ENA_M);
2813 	wr32(hw, PFINT_FW_CTL, val);
2814 
2815 	/* enable Mailbox queue Interrupt causes */
2816 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2817 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2818 	wr32(hw, PFINT_MBX_CTL, val);
2819 
2820 	ice_flush(hw);
2821 }
2822 
2823 /**
2824  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2825  * @pf: board private structure
2826  *
2827  * This sets up the handler for MSIX 0, which is used to manage the
2828  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2829  * when in MSI or Legacy interrupt mode.
2830  */
2831 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2832 {
2833 	struct device *dev = ice_pf_to_dev(pf);
2834 	struct ice_hw *hw = &pf->hw;
2835 	int oicr_idx, err = 0;
2836 
2837 	if (!pf->int_name[0])
2838 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2839 			 dev_driver_string(dev), dev_name(dev));
2840 
2841 	/* Do not request IRQ but do enable OICR interrupt since settings are
2842 	 * lost during reset. Note that this function is called only during
2843 	 * rebuild path and not while reset is in progress.
2844 	 */
2845 	if (ice_is_reset_in_progress(pf->state))
2846 		goto skip_req_irq;
2847 
2848 	/* reserve one vector in irq_tracker for misc interrupts */
2849 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2850 	if (oicr_idx < 0)
2851 		return oicr_idx;
2852 
2853 	pf->num_avail_sw_msix -= 1;
2854 	pf->oicr_idx = (u16)oicr_idx;
2855 
2856 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2857 			       ice_misc_intr, 0, pf->int_name, pf);
2858 	if (err) {
2859 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2860 			pf->int_name, err);
2861 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2862 		pf->num_avail_sw_msix += 1;
2863 		return err;
2864 	}
2865 
2866 skip_req_irq:
2867 	ice_ena_misc_vector(pf);
2868 
2869 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2870 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2871 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2872 
2873 	ice_flush(hw);
2874 	ice_irq_dynamic_ena(hw, NULL, NULL);
2875 
2876 	return 0;
2877 }
2878 
2879 /**
2880  * ice_napi_add - register NAPI handler for the VSI
2881  * @vsi: VSI for which NAPI handler is to be registered
2882  *
2883  * This function is only called in the driver's load path. Registering the NAPI
2884  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2885  * reset/rebuild, etc.)
2886  */
2887 static void ice_napi_add(struct ice_vsi *vsi)
2888 {
2889 	int v_idx;
2890 
2891 	if (!vsi->netdev)
2892 		return;
2893 
2894 	ice_for_each_q_vector(vsi, v_idx)
2895 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2896 			       ice_napi_poll, NAPI_POLL_WEIGHT);
2897 }
2898 
2899 /**
2900  * ice_set_ops - set netdev and ethtools ops for the given netdev
2901  * @netdev: netdev instance
2902  */
2903 static void ice_set_ops(struct net_device *netdev)
2904 {
2905 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2906 
2907 	if (ice_is_safe_mode(pf)) {
2908 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2909 		ice_set_ethtool_safe_mode_ops(netdev);
2910 		return;
2911 	}
2912 
2913 	netdev->netdev_ops = &ice_netdev_ops;
2914 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
2915 	ice_set_ethtool_ops(netdev);
2916 }
2917 
2918 /**
2919  * ice_set_netdev_features - set features for the given netdev
2920  * @netdev: netdev instance
2921  */
2922 static void ice_set_netdev_features(struct net_device *netdev)
2923 {
2924 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2925 	netdev_features_t csumo_features;
2926 	netdev_features_t vlano_features;
2927 	netdev_features_t dflt_features;
2928 	netdev_features_t tso_features;
2929 
2930 	if (ice_is_safe_mode(pf)) {
2931 		/* safe mode */
2932 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2933 		netdev->hw_features = netdev->features;
2934 		return;
2935 	}
2936 
2937 	dflt_features = NETIF_F_SG	|
2938 			NETIF_F_HIGHDMA	|
2939 			NETIF_F_NTUPLE	|
2940 			NETIF_F_RXHASH;
2941 
2942 	csumo_features = NETIF_F_RXCSUM	  |
2943 			 NETIF_F_IP_CSUM  |
2944 			 NETIF_F_SCTP_CRC |
2945 			 NETIF_F_IPV6_CSUM;
2946 
2947 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2948 			 NETIF_F_HW_VLAN_CTAG_TX     |
2949 			 NETIF_F_HW_VLAN_CTAG_RX;
2950 
2951 	tso_features = NETIF_F_TSO			|
2952 		       NETIF_F_TSO_ECN			|
2953 		       NETIF_F_TSO6			|
2954 		       NETIF_F_GSO_GRE			|
2955 		       NETIF_F_GSO_UDP_TUNNEL		|
2956 		       NETIF_F_GSO_GRE_CSUM		|
2957 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
2958 		       NETIF_F_GSO_PARTIAL		|
2959 		       NETIF_F_GSO_IPXIP4		|
2960 		       NETIF_F_GSO_IPXIP6		|
2961 		       NETIF_F_GSO_UDP_L4;
2962 
2963 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
2964 					NETIF_F_GSO_GRE_CSUM;
2965 	/* set features that user can change */
2966 	netdev->hw_features = dflt_features | csumo_features |
2967 			      vlano_features | tso_features;
2968 
2969 	/* add support for HW_CSUM on packets with MPLS header */
2970 	netdev->mpls_features =  NETIF_F_HW_CSUM;
2971 
2972 	/* enable features */
2973 	netdev->features |= netdev->hw_features;
2974 	/* encap and VLAN devices inherit default, csumo and tso features */
2975 	netdev->hw_enc_features |= dflt_features | csumo_features |
2976 				   tso_features;
2977 	netdev->vlan_features |= dflt_features | csumo_features |
2978 				 tso_features;
2979 }
2980 
2981 /**
2982  * ice_cfg_netdev - Allocate, configure and register a netdev
2983  * @vsi: the VSI associated with the new netdev
2984  *
2985  * Returns 0 on success, negative value on failure
2986  */
2987 static int ice_cfg_netdev(struct ice_vsi *vsi)
2988 {
2989 	struct ice_pf *pf = vsi->back;
2990 	struct ice_netdev_priv *np;
2991 	struct net_device *netdev;
2992 	u8 mac_addr[ETH_ALEN];
2993 
2994 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
2995 				    vsi->alloc_rxq);
2996 	if (!netdev)
2997 		return -ENOMEM;
2998 
2999 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3000 	vsi->netdev = netdev;
3001 	np = netdev_priv(netdev);
3002 	np->vsi = vsi;
3003 
3004 	ice_set_netdev_features(netdev);
3005 
3006 	ice_set_ops(netdev);
3007 
3008 	if (vsi->type == ICE_VSI_PF) {
3009 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
3010 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3011 		ether_addr_copy(netdev->dev_addr, mac_addr);
3012 		ether_addr_copy(netdev->perm_addr, mac_addr);
3013 	}
3014 
3015 	netdev->priv_flags |= IFF_UNICAST_FLT;
3016 
3017 	/* Setup netdev TC information */
3018 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3019 
3020 	/* setup watchdog timeout value to be 5 second */
3021 	netdev->watchdog_timeo = 5 * HZ;
3022 
3023 	netdev->min_mtu = ETH_MIN_MTU;
3024 	netdev->max_mtu = ICE_MAX_MTU;
3025 
3026 	return 0;
3027 }
3028 
3029 /**
3030  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3031  * @lut: Lookup table
3032  * @rss_table_size: Lookup table size
3033  * @rss_size: Range of queue number for hashing
3034  */
3035 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3036 {
3037 	u16 i;
3038 
3039 	for (i = 0; i < rss_table_size; i++)
3040 		lut[i] = i % rss_size;
3041 }
3042 
3043 /**
3044  * ice_pf_vsi_setup - Set up a PF VSI
3045  * @pf: board private structure
3046  * @pi: pointer to the port_info instance
3047  *
3048  * Returns pointer to the successfully allocated VSI software struct
3049  * on success, otherwise returns NULL on failure.
3050  */
3051 static struct ice_vsi *
3052 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3053 {
3054 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3055 }
3056 
3057 /**
3058  * ice_ctrl_vsi_setup - Set up a control VSI
3059  * @pf: board private structure
3060  * @pi: pointer to the port_info instance
3061  *
3062  * Returns pointer to the successfully allocated VSI software struct
3063  * on success, otherwise returns NULL on failure.
3064  */
3065 static struct ice_vsi *
3066 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3067 {
3068 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3069 }
3070 
3071 /**
3072  * ice_lb_vsi_setup - Set up a loopback VSI
3073  * @pf: board private structure
3074  * @pi: pointer to the port_info instance
3075  *
3076  * Returns pointer to the successfully allocated VSI software struct
3077  * on success, otherwise returns NULL on failure.
3078  */
3079 struct ice_vsi *
3080 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3081 {
3082 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3083 }
3084 
3085 /**
3086  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3087  * @netdev: network interface to be adjusted
3088  * @proto: unused protocol
3089  * @vid: VLAN ID to be added
3090  *
3091  * net_device_ops implementation for adding VLAN IDs
3092  */
3093 static int
3094 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3095 		    u16 vid)
3096 {
3097 	struct ice_netdev_priv *np = netdev_priv(netdev);
3098 	struct ice_vsi *vsi = np->vsi;
3099 	int ret;
3100 
3101 	/* VLAN 0 is added by default during load/reset */
3102 	if (!vid)
3103 		return 0;
3104 
3105 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3106 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3107 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3108 		if (ret)
3109 			return ret;
3110 	}
3111 
3112 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3113 	 * packets aren't pruned by the device's internal switch on Rx
3114 	 */
3115 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3116 	if (!ret)
3117 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3118 
3119 	return ret;
3120 }
3121 
3122 /**
3123  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3124  * @netdev: network interface to be adjusted
3125  * @proto: unused protocol
3126  * @vid: VLAN ID to be removed
3127  *
3128  * net_device_ops implementation for removing VLAN IDs
3129  */
3130 static int
3131 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3132 		     u16 vid)
3133 {
3134 	struct ice_netdev_priv *np = netdev_priv(netdev);
3135 	struct ice_vsi *vsi = np->vsi;
3136 	int ret;
3137 
3138 	/* don't allow removal of VLAN 0 */
3139 	if (!vid)
3140 		return 0;
3141 
3142 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3143 	 * information
3144 	 */
3145 	ret = ice_vsi_kill_vlan(vsi, vid);
3146 	if (ret)
3147 		return ret;
3148 
3149 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3150 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3151 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3152 
3153 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3154 	return ret;
3155 }
3156 
3157 /**
3158  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3159  * @pf: board private structure
3160  *
3161  * Returns 0 on success, negative value on failure
3162  */
3163 static int ice_setup_pf_sw(struct ice_pf *pf)
3164 {
3165 	struct ice_vsi *vsi;
3166 	int status = 0;
3167 
3168 	if (ice_is_reset_in_progress(pf->state))
3169 		return -EBUSY;
3170 
3171 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3172 	if (!vsi)
3173 		return -ENOMEM;
3174 
3175 	status = ice_cfg_netdev(vsi);
3176 	if (status) {
3177 		status = -ENODEV;
3178 		goto unroll_vsi_setup;
3179 	}
3180 	/* netdev has to be configured before setting frame size */
3181 	ice_vsi_cfg_frame_size(vsi);
3182 
3183 	/* Setup DCB netlink interface */
3184 	ice_dcbnl_setup(vsi);
3185 
3186 	/* registering the NAPI handler requires both the queues and
3187 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3188 	 * and ice_cfg_netdev() respectively
3189 	 */
3190 	ice_napi_add(vsi);
3191 
3192 	status = ice_set_cpu_rx_rmap(vsi);
3193 	if (status) {
3194 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3195 			vsi->vsi_num, status);
3196 		status = -EINVAL;
3197 		goto unroll_napi_add;
3198 	}
3199 	status = ice_init_mac_fltr(pf);
3200 	if (status)
3201 		goto free_cpu_rx_map;
3202 
3203 	return status;
3204 
3205 free_cpu_rx_map:
3206 	ice_free_cpu_rx_rmap(vsi);
3207 
3208 unroll_napi_add:
3209 	if (vsi) {
3210 		ice_napi_del(vsi);
3211 		if (vsi->netdev) {
3212 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3213 			free_netdev(vsi->netdev);
3214 			vsi->netdev = NULL;
3215 		}
3216 	}
3217 
3218 unroll_vsi_setup:
3219 	ice_vsi_release(vsi);
3220 	return status;
3221 }
3222 
3223 /**
3224  * ice_get_avail_q_count - Get count of queues in use
3225  * @pf_qmap: bitmap to get queue use count from
3226  * @lock: pointer to a mutex that protects access to pf_qmap
3227  * @size: size of the bitmap
3228  */
3229 static u16
3230 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3231 {
3232 	unsigned long bit;
3233 	u16 count = 0;
3234 
3235 	mutex_lock(lock);
3236 	for_each_clear_bit(bit, pf_qmap, size)
3237 		count++;
3238 	mutex_unlock(lock);
3239 
3240 	return count;
3241 }
3242 
3243 /**
3244  * ice_get_avail_txq_count - Get count of Tx queues in use
3245  * @pf: pointer to an ice_pf instance
3246  */
3247 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3248 {
3249 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3250 				     pf->max_pf_txqs);
3251 }
3252 
3253 /**
3254  * ice_get_avail_rxq_count - Get count of Rx queues in use
3255  * @pf: pointer to an ice_pf instance
3256  */
3257 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3258 {
3259 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3260 				     pf->max_pf_rxqs);
3261 }
3262 
3263 /**
3264  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3265  * @pf: board private structure to initialize
3266  */
3267 static void ice_deinit_pf(struct ice_pf *pf)
3268 {
3269 	ice_service_task_stop(pf);
3270 	mutex_destroy(&pf->sw_mutex);
3271 	mutex_destroy(&pf->tc_mutex);
3272 	mutex_destroy(&pf->avail_q_mutex);
3273 
3274 	if (pf->avail_txqs) {
3275 		bitmap_free(pf->avail_txqs);
3276 		pf->avail_txqs = NULL;
3277 	}
3278 
3279 	if (pf->avail_rxqs) {
3280 		bitmap_free(pf->avail_rxqs);
3281 		pf->avail_rxqs = NULL;
3282 	}
3283 }
3284 
3285 /**
3286  * ice_set_pf_caps - set PFs capability flags
3287  * @pf: pointer to the PF instance
3288  */
3289 static void ice_set_pf_caps(struct ice_pf *pf)
3290 {
3291 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3292 
3293 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3294 	if (func_caps->common_cap.dcb)
3295 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3296 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3297 	if (func_caps->common_cap.sr_iov_1_1) {
3298 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3299 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3300 					      ICE_MAX_VF_COUNT);
3301 	}
3302 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3303 	if (func_caps->common_cap.rss_table_size)
3304 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3305 
3306 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3307 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3308 		u16 unused;
3309 
3310 		/* ctrl_vsi_idx will be set to a valid value when flow director
3311 		 * is setup by ice_init_fdir
3312 		 */
3313 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3314 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3315 		/* force guaranteed filter pool for PF */
3316 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3317 				       func_caps->fd_fltr_guar);
3318 		/* force shared filter pool for PF */
3319 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3320 				       func_caps->fd_fltr_best_effort);
3321 	}
3322 
3323 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3324 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3325 }
3326 
3327 /**
3328  * ice_init_pf - Initialize general software structures (struct ice_pf)
3329  * @pf: board private structure to initialize
3330  */
3331 static int ice_init_pf(struct ice_pf *pf)
3332 {
3333 	ice_set_pf_caps(pf);
3334 
3335 	mutex_init(&pf->sw_mutex);
3336 	mutex_init(&pf->tc_mutex);
3337 
3338 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3339 	spin_lock_init(&pf->aq_wait_lock);
3340 	init_waitqueue_head(&pf->aq_wait_queue);
3341 
3342 	/* setup service timer and periodic service task */
3343 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3344 	pf->serv_tmr_period = HZ;
3345 	INIT_WORK(&pf->serv_task, ice_service_task);
3346 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3347 
3348 	mutex_init(&pf->avail_q_mutex);
3349 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3350 	if (!pf->avail_txqs)
3351 		return -ENOMEM;
3352 
3353 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3354 	if (!pf->avail_rxqs) {
3355 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3356 		pf->avail_txqs = NULL;
3357 		return -ENOMEM;
3358 	}
3359 
3360 	return 0;
3361 }
3362 
3363 /**
3364  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3365  * @pf: board private structure
3366  *
3367  * compute the number of MSIX vectors required (v_budget) and request from
3368  * the OS. Return the number of vectors reserved or negative on failure
3369  */
3370 static int ice_ena_msix_range(struct ice_pf *pf)
3371 {
3372 	int v_left, v_actual, v_other, v_budget = 0;
3373 	struct device *dev = ice_pf_to_dev(pf);
3374 	int needed, err, i;
3375 
3376 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3377 
3378 	/* reserve for LAN miscellaneous handler */
3379 	needed = ICE_MIN_LAN_OICR_MSIX;
3380 	if (v_left < needed)
3381 		goto no_hw_vecs_left_err;
3382 	v_budget += needed;
3383 	v_left -= needed;
3384 
3385 	/* reserve for flow director */
3386 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3387 		needed = ICE_FDIR_MSIX;
3388 		if (v_left < needed)
3389 			goto no_hw_vecs_left_err;
3390 		v_budget += needed;
3391 		v_left -= needed;
3392 	}
3393 
3394 	/* total used for non-traffic vectors */
3395 	v_other = v_budget;
3396 
3397 	/* reserve vectors for LAN traffic */
3398 	needed = min_t(int, num_online_cpus(), v_left);
3399 	if (v_left < needed)
3400 		goto no_hw_vecs_left_err;
3401 	pf->num_lan_msix = needed;
3402 	v_budget += needed;
3403 	v_left -= needed;
3404 
3405 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3406 					sizeof(*pf->msix_entries), GFP_KERNEL);
3407 	if (!pf->msix_entries) {
3408 		err = -ENOMEM;
3409 		goto exit_err;
3410 	}
3411 
3412 	for (i = 0; i < v_budget; i++)
3413 		pf->msix_entries[i].entry = i;
3414 
3415 	/* actually reserve the vectors */
3416 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3417 					 ICE_MIN_MSIX, v_budget);
3418 	if (v_actual < 0) {
3419 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3420 		err = v_actual;
3421 		goto msix_err;
3422 	}
3423 
3424 	if (v_actual < v_budget) {
3425 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3426 			 v_budget, v_actual);
3427 
3428 		if (v_actual < ICE_MIN_MSIX) {
3429 			/* error if we can't get minimum vectors */
3430 			pci_disable_msix(pf->pdev);
3431 			err = -ERANGE;
3432 			goto msix_err;
3433 		} else {
3434 			int v_traffic = v_actual - v_other;
3435 
3436 			if (v_actual == ICE_MIN_MSIX ||
3437 			    v_traffic < ICE_MIN_LAN_TXRX_MSIX)
3438 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3439 			else
3440 				pf->num_lan_msix = v_traffic;
3441 
3442 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3443 				   pf->num_lan_msix);
3444 		}
3445 	}
3446 
3447 	return v_actual;
3448 
3449 msix_err:
3450 	devm_kfree(dev, pf->msix_entries);
3451 	goto exit_err;
3452 
3453 no_hw_vecs_left_err:
3454 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3455 		needed, v_left);
3456 	err = -ERANGE;
3457 exit_err:
3458 	pf->num_lan_msix = 0;
3459 	return err;
3460 }
3461 
3462 /**
3463  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3464  * @pf: board private structure
3465  */
3466 static void ice_dis_msix(struct ice_pf *pf)
3467 {
3468 	pci_disable_msix(pf->pdev);
3469 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3470 	pf->msix_entries = NULL;
3471 }
3472 
3473 /**
3474  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3475  * @pf: board private structure
3476  */
3477 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3478 {
3479 	ice_dis_msix(pf);
3480 
3481 	if (pf->irq_tracker) {
3482 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3483 		pf->irq_tracker = NULL;
3484 	}
3485 }
3486 
3487 /**
3488  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3489  * @pf: board private structure to initialize
3490  */
3491 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3492 {
3493 	int vectors;
3494 
3495 	vectors = ice_ena_msix_range(pf);
3496 
3497 	if (vectors < 0)
3498 		return vectors;
3499 
3500 	/* set up vector assignment tracking */
3501 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3502 				       struct_size(pf->irq_tracker, list, vectors),
3503 				       GFP_KERNEL);
3504 	if (!pf->irq_tracker) {
3505 		ice_dis_msix(pf);
3506 		return -ENOMEM;
3507 	}
3508 
3509 	/* populate SW interrupts pool with number of OS granted IRQs. */
3510 	pf->num_avail_sw_msix = (u16)vectors;
3511 	pf->irq_tracker->num_entries = (u16)vectors;
3512 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3513 
3514 	return 0;
3515 }
3516 
3517 /**
3518  * ice_is_wol_supported - check if WoL is supported
3519  * @hw: pointer to hardware info
3520  *
3521  * Check if WoL is supported based on the HW configuration.
3522  * Returns true if NVM supports and enables WoL for this port, false otherwise
3523  */
3524 bool ice_is_wol_supported(struct ice_hw *hw)
3525 {
3526 	u16 wol_ctrl;
3527 
3528 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3529 	 * word) indicates WoL is not supported on the corresponding PF ID.
3530 	 */
3531 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3532 		return false;
3533 
3534 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3535 }
3536 
3537 /**
3538  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3539  * @vsi: VSI being changed
3540  * @new_rx: new number of Rx queues
3541  * @new_tx: new number of Tx queues
3542  *
3543  * Only change the number of queues if new_tx, or new_rx is non-0.
3544  *
3545  * Returns 0 on success.
3546  */
3547 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3548 {
3549 	struct ice_pf *pf = vsi->back;
3550 	int err = 0, timeout = 50;
3551 
3552 	if (!new_rx && !new_tx)
3553 		return -EINVAL;
3554 
3555 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3556 		timeout--;
3557 		if (!timeout)
3558 			return -EBUSY;
3559 		usleep_range(1000, 2000);
3560 	}
3561 
3562 	if (new_tx)
3563 		vsi->req_txq = (u16)new_tx;
3564 	if (new_rx)
3565 		vsi->req_rxq = (u16)new_rx;
3566 
3567 	/* set for the next time the netdev is started */
3568 	if (!netif_running(vsi->netdev)) {
3569 		ice_vsi_rebuild(vsi, false);
3570 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3571 		goto done;
3572 	}
3573 
3574 	ice_vsi_close(vsi);
3575 	ice_vsi_rebuild(vsi, false);
3576 	ice_pf_dcb_recfg(pf);
3577 	ice_vsi_open(vsi);
3578 done:
3579 	clear_bit(ICE_CFG_BUSY, pf->state);
3580 	return err;
3581 }
3582 
3583 /**
3584  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3585  * @pf: PF to configure
3586  *
3587  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3588  * VSI can still Tx/Rx VLAN tagged packets.
3589  */
3590 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3591 {
3592 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3593 	struct ice_vsi_ctx *ctxt;
3594 	enum ice_status status;
3595 	struct ice_hw *hw;
3596 
3597 	if (!vsi)
3598 		return;
3599 
3600 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3601 	if (!ctxt)
3602 		return;
3603 
3604 	hw = &pf->hw;
3605 	ctxt->info = vsi->info;
3606 
3607 	ctxt->info.valid_sections =
3608 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3609 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3610 			    ICE_AQ_VSI_PROP_SW_VALID);
3611 
3612 	/* disable VLAN anti-spoof */
3613 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3614 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3615 
3616 	/* disable VLAN pruning and keep all other settings */
3617 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3618 
3619 	/* allow all VLANs on Tx and don't strip on Rx */
3620 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3621 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3622 
3623 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3624 	if (status) {
3625 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3626 			ice_stat_str(status),
3627 			ice_aq_str(hw->adminq.sq_last_status));
3628 	} else {
3629 		vsi->info.sec_flags = ctxt->info.sec_flags;
3630 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3631 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3632 	}
3633 
3634 	kfree(ctxt);
3635 }
3636 
3637 /**
3638  * ice_log_pkg_init - log result of DDP package load
3639  * @hw: pointer to hardware info
3640  * @status: status of package load
3641  */
3642 static void
3643 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3644 {
3645 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3646 	struct device *dev = ice_pf_to_dev(pf);
3647 
3648 	switch (*status) {
3649 	case ICE_SUCCESS:
3650 		/* The package download AdminQ command returned success because
3651 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3652 		 * already a package loaded on the device.
3653 		 */
3654 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3655 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3656 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3657 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3658 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3659 			    sizeof(hw->pkg_name))) {
3660 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3661 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3662 					 hw->active_pkg_name,
3663 					 hw->active_pkg_ver.major,
3664 					 hw->active_pkg_ver.minor,
3665 					 hw->active_pkg_ver.update,
3666 					 hw->active_pkg_ver.draft);
3667 			else
3668 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3669 					 hw->active_pkg_name,
3670 					 hw->active_pkg_ver.major,
3671 					 hw->active_pkg_ver.minor,
3672 					 hw->active_pkg_ver.update,
3673 					 hw->active_pkg_ver.draft);
3674 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3675 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3676 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3677 				hw->active_pkg_name,
3678 				hw->active_pkg_ver.major,
3679 				hw->active_pkg_ver.minor,
3680 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3681 			*status = ICE_ERR_NOT_SUPPORTED;
3682 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3683 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3684 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3685 				 hw->active_pkg_name,
3686 				 hw->active_pkg_ver.major,
3687 				 hw->active_pkg_ver.minor,
3688 				 hw->active_pkg_ver.update,
3689 				 hw->active_pkg_ver.draft,
3690 				 hw->pkg_name,
3691 				 hw->pkg_ver.major,
3692 				 hw->pkg_ver.minor,
3693 				 hw->pkg_ver.update,
3694 				 hw->pkg_ver.draft);
3695 		} else {
3696 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3697 			*status = ICE_ERR_NOT_SUPPORTED;
3698 		}
3699 		break;
3700 	case ICE_ERR_FW_DDP_MISMATCH:
3701 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3702 		break;
3703 	case ICE_ERR_BUF_TOO_SHORT:
3704 	case ICE_ERR_CFG:
3705 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3706 		break;
3707 	case ICE_ERR_NOT_SUPPORTED:
3708 		/* Package File version not supported */
3709 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3710 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3711 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3712 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3713 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3714 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3715 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3716 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3717 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3718 		break;
3719 	case ICE_ERR_AQ_ERROR:
3720 		switch (hw->pkg_dwnld_status) {
3721 		case ICE_AQ_RC_ENOSEC:
3722 		case ICE_AQ_RC_EBADSIG:
3723 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3724 			return;
3725 		case ICE_AQ_RC_ESVN:
3726 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3727 			return;
3728 		case ICE_AQ_RC_EBADMAN:
3729 		case ICE_AQ_RC_EBADBUF:
3730 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3731 			/* poll for reset to complete */
3732 			if (ice_check_reset(hw))
3733 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3734 			return;
3735 		default:
3736 			break;
3737 		}
3738 		fallthrough;
3739 	default:
3740 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3741 			*status);
3742 		break;
3743 	}
3744 }
3745 
3746 /**
3747  * ice_load_pkg - load/reload the DDP Package file
3748  * @firmware: firmware structure when firmware requested or NULL for reload
3749  * @pf: pointer to the PF instance
3750  *
3751  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3752  * initialize HW tables.
3753  */
3754 static void
3755 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3756 {
3757 	enum ice_status status = ICE_ERR_PARAM;
3758 	struct device *dev = ice_pf_to_dev(pf);
3759 	struct ice_hw *hw = &pf->hw;
3760 
3761 	/* Load DDP Package */
3762 	if (firmware && !hw->pkg_copy) {
3763 		status = ice_copy_and_init_pkg(hw, firmware->data,
3764 					       firmware->size);
3765 		ice_log_pkg_init(hw, &status);
3766 	} else if (!firmware && hw->pkg_copy) {
3767 		/* Reload package during rebuild after CORER/GLOBR reset */
3768 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3769 		ice_log_pkg_init(hw, &status);
3770 	} else {
3771 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3772 	}
3773 
3774 	if (status) {
3775 		/* Safe Mode */
3776 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3777 		return;
3778 	}
3779 
3780 	/* Successful download package is the precondition for advanced
3781 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3782 	 */
3783 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3784 }
3785 
3786 /**
3787  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3788  * @pf: pointer to the PF structure
3789  *
3790  * There is no error returned here because the driver should be able to handle
3791  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3792  * specifically with Tx.
3793  */
3794 static void ice_verify_cacheline_size(struct ice_pf *pf)
3795 {
3796 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3797 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3798 			 ICE_CACHE_LINE_BYTES);
3799 }
3800 
3801 /**
3802  * ice_send_version - update firmware with driver version
3803  * @pf: PF struct
3804  *
3805  * Returns ICE_SUCCESS on success, else error code
3806  */
3807 static enum ice_status ice_send_version(struct ice_pf *pf)
3808 {
3809 	struct ice_driver_ver dv;
3810 
3811 	dv.major_ver = 0xff;
3812 	dv.minor_ver = 0xff;
3813 	dv.build_ver = 0xff;
3814 	dv.subbuild_ver = 0;
3815 	strscpy((char *)dv.driver_string, UTS_RELEASE,
3816 		sizeof(dv.driver_string));
3817 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3818 }
3819 
3820 /**
3821  * ice_init_fdir - Initialize flow director VSI and configuration
3822  * @pf: pointer to the PF instance
3823  *
3824  * returns 0 on success, negative on error
3825  */
3826 static int ice_init_fdir(struct ice_pf *pf)
3827 {
3828 	struct device *dev = ice_pf_to_dev(pf);
3829 	struct ice_vsi *ctrl_vsi;
3830 	int err;
3831 
3832 	/* Side Band Flow Director needs to have a control VSI.
3833 	 * Allocate it and store it in the PF.
3834 	 */
3835 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3836 	if (!ctrl_vsi) {
3837 		dev_dbg(dev, "could not create control VSI\n");
3838 		return -ENOMEM;
3839 	}
3840 
3841 	err = ice_vsi_open_ctrl(ctrl_vsi);
3842 	if (err) {
3843 		dev_dbg(dev, "could not open control VSI\n");
3844 		goto err_vsi_open;
3845 	}
3846 
3847 	mutex_init(&pf->hw.fdir_fltr_lock);
3848 
3849 	err = ice_fdir_create_dflt_rules(pf);
3850 	if (err)
3851 		goto err_fdir_rule;
3852 
3853 	return 0;
3854 
3855 err_fdir_rule:
3856 	ice_fdir_release_flows(&pf->hw);
3857 	ice_vsi_close(ctrl_vsi);
3858 err_vsi_open:
3859 	ice_vsi_release(ctrl_vsi);
3860 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3861 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
3862 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3863 	}
3864 	return err;
3865 }
3866 
3867 /**
3868  * ice_get_opt_fw_name - return optional firmware file name or NULL
3869  * @pf: pointer to the PF instance
3870  */
3871 static char *ice_get_opt_fw_name(struct ice_pf *pf)
3872 {
3873 	/* Optional firmware name same as default with additional dash
3874 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
3875 	 */
3876 	struct pci_dev *pdev = pf->pdev;
3877 	char *opt_fw_filename;
3878 	u64 dsn;
3879 
3880 	/* Determine the name of the optional file using the DSN (two
3881 	 * dwords following the start of the DSN Capability).
3882 	 */
3883 	dsn = pci_get_dsn(pdev);
3884 	if (!dsn)
3885 		return NULL;
3886 
3887 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3888 	if (!opt_fw_filename)
3889 		return NULL;
3890 
3891 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3892 		 ICE_DDP_PKG_PATH, dsn);
3893 
3894 	return opt_fw_filename;
3895 }
3896 
3897 /**
3898  * ice_request_fw - Device initialization routine
3899  * @pf: pointer to the PF instance
3900  */
3901 static void ice_request_fw(struct ice_pf *pf)
3902 {
3903 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
3904 	const struct firmware *firmware = NULL;
3905 	struct device *dev = ice_pf_to_dev(pf);
3906 	int err = 0;
3907 
3908 	/* optional device-specific DDP (if present) overrides the default DDP
3909 	 * package file. kernel logs a debug message if the file doesn't exist,
3910 	 * and warning messages for other errors.
3911 	 */
3912 	if (opt_fw_filename) {
3913 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
3914 		if (err) {
3915 			kfree(opt_fw_filename);
3916 			goto dflt_pkg_load;
3917 		}
3918 
3919 		/* request for firmware was successful. Download to device */
3920 		ice_load_pkg(firmware, pf);
3921 		kfree(opt_fw_filename);
3922 		release_firmware(firmware);
3923 		return;
3924 	}
3925 
3926 dflt_pkg_load:
3927 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
3928 	if (err) {
3929 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
3930 		return;
3931 	}
3932 
3933 	/* request for firmware was successful. Download to device */
3934 	ice_load_pkg(firmware, pf);
3935 	release_firmware(firmware);
3936 }
3937 
3938 /**
3939  * ice_print_wake_reason - show the wake up cause in the log
3940  * @pf: pointer to the PF struct
3941  */
3942 static void ice_print_wake_reason(struct ice_pf *pf)
3943 {
3944 	u32 wus = pf->wakeup_reason;
3945 	const char *wake_str;
3946 
3947 	/* if no wake event, nothing to print */
3948 	if (!wus)
3949 		return;
3950 
3951 	if (wus & PFPM_WUS_LNKC_M)
3952 		wake_str = "Link\n";
3953 	else if (wus & PFPM_WUS_MAG_M)
3954 		wake_str = "Magic Packet\n";
3955 	else if (wus & PFPM_WUS_MNG_M)
3956 		wake_str = "Management\n";
3957 	else if (wus & PFPM_WUS_FW_RST_WK_M)
3958 		wake_str = "Firmware Reset\n";
3959 	else
3960 		wake_str = "Unknown\n";
3961 
3962 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
3963 }
3964 
3965 /**
3966  * ice_register_netdev - register netdev and devlink port
3967  * @pf: pointer to the PF struct
3968  */
3969 static int ice_register_netdev(struct ice_pf *pf)
3970 {
3971 	struct ice_vsi *vsi;
3972 	int err = 0;
3973 
3974 	vsi = ice_get_main_vsi(pf);
3975 	if (!vsi || !vsi->netdev)
3976 		return -EIO;
3977 
3978 	err = register_netdev(vsi->netdev);
3979 	if (err)
3980 		goto err_register_netdev;
3981 
3982 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
3983 	netif_carrier_off(vsi->netdev);
3984 	netif_tx_stop_all_queues(vsi->netdev);
3985 	err = ice_devlink_create_port(vsi);
3986 	if (err)
3987 		goto err_devlink_create;
3988 
3989 	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
3990 
3991 	return 0;
3992 err_devlink_create:
3993 	unregister_netdev(vsi->netdev);
3994 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
3995 err_register_netdev:
3996 	free_netdev(vsi->netdev);
3997 	vsi->netdev = NULL;
3998 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3999 	return err;
4000 }
4001 
4002 /**
4003  * ice_probe - Device initialization routine
4004  * @pdev: PCI device information struct
4005  * @ent: entry in ice_pci_tbl
4006  *
4007  * Returns 0 on success, negative on failure
4008  */
4009 static int
4010 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4011 {
4012 	struct device *dev = &pdev->dev;
4013 	struct ice_pf *pf;
4014 	struct ice_hw *hw;
4015 	int i, err;
4016 
4017 	/* this driver uses devres, see
4018 	 * Documentation/driver-api/driver-model/devres.rst
4019 	 */
4020 	err = pcim_enable_device(pdev);
4021 	if (err)
4022 		return err;
4023 
4024 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4025 	if (err) {
4026 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4027 		return err;
4028 	}
4029 
4030 	pf = ice_allocate_pf(dev);
4031 	if (!pf)
4032 		return -ENOMEM;
4033 
4034 	/* set up for high or low DMA */
4035 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4036 	if (err)
4037 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4038 	if (err) {
4039 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4040 		return err;
4041 	}
4042 
4043 	pci_enable_pcie_error_reporting(pdev);
4044 	pci_set_master(pdev);
4045 
4046 	pf->pdev = pdev;
4047 	pci_set_drvdata(pdev, pf);
4048 	set_bit(ICE_DOWN, pf->state);
4049 	/* Disable service task until DOWN bit is cleared */
4050 	set_bit(ICE_SERVICE_DIS, pf->state);
4051 
4052 	hw = &pf->hw;
4053 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4054 	pci_save_state(pdev);
4055 
4056 	hw->back = pf;
4057 	hw->vendor_id = pdev->vendor;
4058 	hw->device_id = pdev->device;
4059 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4060 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4061 	hw->subsystem_device_id = pdev->subsystem_device;
4062 	hw->bus.device = PCI_SLOT(pdev->devfn);
4063 	hw->bus.func = PCI_FUNC(pdev->devfn);
4064 	ice_set_ctrlq_len(hw);
4065 
4066 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4067 
4068 	err = ice_devlink_register(pf);
4069 	if (err) {
4070 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4071 		goto err_exit_unroll;
4072 	}
4073 
4074 #ifndef CONFIG_DYNAMIC_DEBUG
4075 	if (debug < -1)
4076 		hw->debug_mask = debug;
4077 #endif
4078 
4079 	err = ice_init_hw(hw);
4080 	if (err) {
4081 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4082 		err = -EIO;
4083 		goto err_exit_unroll;
4084 	}
4085 
4086 	ice_request_fw(pf);
4087 
4088 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4089 	 * set in pf->state, which will cause ice_is_safe_mode to return
4090 	 * true
4091 	 */
4092 	if (ice_is_safe_mode(pf)) {
4093 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4094 		/* we already got function/device capabilities but these don't
4095 		 * reflect what the driver needs to do in safe mode. Instead of
4096 		 * adding conditional logic everywhere to ignore these
4097 		 * device/function capabilities, override them.
4098 		 */
4099 		ice_set_safe_mode_caps(hw);
4100 	}
4101 
4102 	err = ice_init_pf(pf);
4103 	if (err) {
4104 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4105 		goto err_init_pf_unroll;
4106 	}
4107 
4108 	ice_devlink_init_regions(pf);
4109 
4110 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4111 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4112 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4113 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4114 	i = 0;
4115 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4116 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4117 			pf->hw.tnl.valid_count[TNL_VXLAN];
4118 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4119 			UDP_TUNNEL_TYPE_VXLAN;
4120 		i++;
4121 	}
4122 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4123 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4124 			pf->hw.tnl.valid_count[TNL_GENEVE];
4125 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4126 			UDP_TUNNEL_TYPE_GENEVE;
4127 		i++;
4128 	}
4129 
4130 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4131 	if (!pf->num_alloc_vsi) {
4132 		err = -EIO;
4133 		goto err_init_pf_unroll;
4134 	}
4135 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4136 		dev_warn(&pf->pdev->dev,
4137 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4138 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4139 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4140 	}
4141 
4142 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4143 			       GFP_KERNEL);
4144 	if (!pf->vsi) {
4145 		err = -ENOMEM;
4146 		goto err_init_pf_unroll;
4147 	}
4148 
4149 	err = ice_init_interrupt_scheme(pf);
4150 	if (err) {
4151 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4152 		err = -EIO;
4153 		goto err_init_vsi_unroll;
4154 	}
4155 
4156 	/* In case of MSIX we are going to setup the misc vector right here
4157 	 * to handle admin queue events etc. In case of legacy and MSI
4158 	 * the misc functionality and queue processing is combined in
4159 	 * the same vector and that gets setup at open.
4160 	 */
4161 	err = ice_req_irq_msix_misc(pf);
4162 	if (err) {
4163 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4164 		goto err_init_interrupt_unroll;
4165 	}
4166 
4167 	/* create switch struct for the switch element created by FW on boot */
4168 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4169 	if (!pf->first_sw) {
4170 		err = -ENOMEM;
4171 		goto err_msix_misc_unroll;
4172 	}
4173 
4174 	if (hw->evb_veb)
4175 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4176 	else
4177 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4178 
4179 	pf->first_sw->pf = pf;
4180 
4181 	/* record the sw_id available for later use */
4182 	pf->first_sw->sw_id = hw->port_info->sw_id;
4183 
4184 	err = ice_setup_pf_sw(pf);
4185 	if (err) {
4186 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4187 		goto err_alloc_sw_unroll;
4188 	}
4189 
4190 	clear_bit(ICE_SERVICE_DIS, pf->state);
4191 
4192 	/* tell the firmware we are up */
4193 	err = ice_send_version(pf);
4194 	if (err) {
4195 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4196 			UTS_RELEASE, err);
4197 		goto err_send_version_unroll;
4198 	}
4199 
4200 	/* since everything is good, start the service timer */
4201 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4202 
4203 	err = ice_init_link_events(pf->hw.port_info);
4204 	if (err) {
4205 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4206 		goto err_send_version_unroll;
4207 	}
4208 
4209 	/* not a fatal error if this fails */
4210 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4211 	if (err)
4212 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4213 
4214 	/* not a fatal error if this fails */
4215 	err = ice_update_link_info(pf->hw.port_info);
4216 	if (err)
4217 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4218 
4219 	ice_init_link_dflt_override(pf->hw.port_info);
4220 
4221 	/* if media available, initialize PHY settings */
4222 	if (pf->hw.port_info->phy.link_info.link_info &
4223 	    ICE_AQ_MEDIA_AVAILABLE) {
4224 		/* not a fatal error if this fails */
4225 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4226 		if (err)
4227 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4228 
4229 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4230 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4231 
4232 			if (vsi)
4233 				ice_configure_phy(vsi);
4234 		}
4235 	} else {
4236 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4237 	}
4238 
4239 	ice_verify_cacheline_size(pf);
4240 
4241 	/* Save wakeup reason register for later use */
4242 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4243 
4244 	/* check for a power management event */
4245 	ice_print_wake_reason(pf);
4246 
4247 	/* clear wake status, all bits */
4248 	wr32(hw, PFPM_WUS, U32_MAX);
4249 
4250 	/* Disable WoL at init, wait for user to enable */
4251 	device_set_wakeup_enable(dev, false);
4252 
4253 	if (ice_is_safe_mode(pf)) {
4254 		ice_set_safe_mode_vlan_cfg(pf);
4255 		goto probe_done;
4256 	}
4257 
4258 	/* initialize DDP driven features */
4259 
4260 	/* Note: Flow director init failure is non-fatal to load */
4261 	if (ice_init_fdir(pf))
4262 		dev_err(dev, "could not initialize flow director\n");
4263 
4264 	/* Note: DCB init failure is non-fatal to load */
4265 	if (ice_init_pf_dcb(pf, false)) {
4266 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4267 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4268 	} else {
4269 		ice_cfg_lldp_mib_change(&pf->hw, true);
4270 	}
4271 
4272 	if (ice_init_lag(pf))
4273 		dev_warn(dev, "Failed to init link aggregation support\n");
4274 
4275 	/* print PCI link speed and width */
4276 	pcie_print_link_status(pf->pdev);
4277 
4278 probe_done:
4279 	err = ice_register_netdev(pf);
4280 	if (err)
4281 		goto err_netdev_reg;
4282 
4283 	/* ready to go, so clear down state bit */
4284 	clear_bit(ICE_DOWN, pf->state);
4285 	return 0;
4286 
4287 err_netdev_reg:
4288 err_send_version_unroll:
4289 	ice_vsi_release_all(pf);
4290 err_alloc_sw_unroll:
4291 	set_bit(ICE_SERVICE_DIS, pf->state);
4292 	set_bit(ICE_DOWN, pf->state);
4293 	devm_kfree(dev, pf->first_sw);
4294 err_msix_misc_unroll:
4295 	ice_free_irq_msix_misc(pf);
4296 err_init_interrupt_unroll:
4297 	ice_clear_interrupt_scheme(pf);
4298 err_init_vsi_unroll:
4299 	devm_kfree(dev, pf->vsi);
4300 err_init_pf_unroll:
4301 	ice_deinit_pf(pf);
4302 	ice_devlink_destroy_regions(pf);
4303 	ice_deinit_hw(hw);
4304 err_exit_unroll:
4305 	ice_devlink_unregister(pf);
4306 	pci_disable_pcie_error_reporting(pdev);
4307 	pci_disable_device(pdev);
4308 	return err;
4309 }
4310 
4311 /**
4312  * ice_set_wake - enable or disable Wake on LAN
4313  * @pf: pointer to the PF struct
4314  *
4315  * Simple helper for WoL control
4316  */
4317 static void ice_set_wake(struct ice_pf *pf)
4318 {
4319 	struct ice_hw *hw = &pf->hw;
4320 	bool wol = pf->wol_ena;
4321 
4322 	/* clear wake state, otherwise new wake events won't fire */
4323 	wr32(hw, PFPM_WUS, U32_MAX);
4324 
4325 	/* enable / disable APM wake up, no RMW needed */
4326 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4327 
4328 	/* set magic packet filter enabled */
4329 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4330 }
4331 
4332 /**
4333  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4334  * @pf: pointer to the PF struct
4335  *
4336  * Issue firmware command to enable multicast magic wake, making
4337  * sure that any locally administered address (LAA) is used for
4338  * wake, and that PF reset doesn't undo the LAA.
4339  */
4340 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4341 {
4342 	struct device *dev = ice_pf_to_dev(pf);
4343 	struct ice_hw *hw = &pf->hw;
4344 	enum ice_status status;
4345 	u8 mac_addr[ETH_ALEN];
4346 	struct ice_vsi *vsi;
4347 	u8 flags;
4348 
4349 	if (!pf->wol_ena)
4350 		return;
4351 
4352 	vsi = ice_get_main_vsi(pf);
4353 	if (!vsi)
4354 		return;
4355 
4356 	/* Get current MAC address in case it's an LAA */
4357 	if (vsi->netdev)
4358 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4359 	else
4360 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4361 
4362 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4363 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4364 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4365 
4366 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4367 	if (status)
4368 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4369 			ice_stat_str(status),
4370 			ice_aq_str(hw->adminq.sq_last_status));
4371 }
4372 
4373 /**
4374  * ice_remove - Device removal routine
4375  * @pdev: PCI device information struct
4376  */
4377 static void ice_remove(struct pci_dev *pdev)
4378 {
4379 	struct ice_pf *pf = pci_get_drvdata(pdev);
4380 	int i;
4381 
4382 	if (!pf)
4383 		return;
4384 
4385 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4386 		if (!ice_is_reset_in_progress(pf->state))
4387 			break;
4388 		msleep(100);
4389 	}
4390 
4391 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4392 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4393 		ice_free_vfs(pf);
4394 	}
4395 
4396 	set_bit(ICE_DOWN, pf->state);
4397 	ice_service_task_stop(pf);
4398 
4399 	ice_aq_cancel_waiting_tasks(pf);
4400 
4401 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4402 	ice_deinit_lag(pf);
4403 	if (!ice_is_safe_mode(pf))
4404 		ice_remove_arfs(pf);
4405 	ice_setup_mc_magic_wake(pf);
4406 	ice_vsi_release_all(pf);
4407 	ice_set_wake(pf);
4408 	ice_free_irq_msix_misc(pf);
4409 	ice_for_each_vsi(pf, i) {
4410 		if (!pf->vsi[i])
4411 			continue;
4412 		ice_vsi_free_q_vectors(pf->vsi[i]);
4413 	}
4414 	ice_deinit_pf(pf);
4415 	ice_devlink_destroy_regions(pf);
4416 	ice_deinit_hw(&pf->hw);
4417 	ice_devlink_unregister(pf);
4418 
4419 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4420 	 * do it via ice_schedule_reset() since there is no need to rebuild
4421 	 * and the service task is already stopped.
4422 	 */
4423 	ice_reset(&pf->hw, ICE_RESET_PFR);
4424 	pci_wait_for_pending_transaction(pdev);
4425 	ice_clear_interrupt_scheme(pf);
4426 	pci_disable_pcie_error_reporting(pdev);
4427 	pci_disable_device(pdev);
4428 }
4429 
4430 /**
4431  * ice_shutdown - PCI callback for shutting down device
4432  * @pdev: PCI device information struct
4433  */
4434 static void ice_shutdown(struct pci_dev *pdev)
4435 {
4436 	struct ice_pf *pf = pci_get_drvdata(pdev);
4437 
4438 	ice_remove(pdev);
4439 
4440 	if (system_state == SYSTEM_POWER_OFF) {
4441 		pci_wake_from_d3(pdev, pf->wol_ena);
4442 		pci_set_power_state(pdev, PCI_D3hot);
4443 	}
4444 }
4445 
4446 #ifdef CONFIG_PM
4447 /**
4448  * ice_prepare_for_shutdown - prep for PCI shutdown
4449  * @pf: board private structure
4450  *
4451  * Inform or close all dependent features in prep for PCI device shutdown
4452  */
4453 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4454 {
4455 	struct ice_hw *hw = &pf->hw;
4456 	u32 v;
4457 
4458 	/* Notify VFs of impending reset */
4459 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4460 		ice_vc_notify_reset(pf);
4461 
4462 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4463 
4464 	/* disable the VSIs and their queues that are not already DOWN */
4465 	ice_pf_dis_all_vsi(pf, false);
4466 
4467 	ice_for_each_vsi(pf, v)
4468 		if (pf->vsi[v])
4469 			pf->vsi[v]->vsi_num = 0;
4470 
4471 	ice_shutdown_all_ctrlq(hw);
4472 }
4473 
4474 /**
4475  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4476  * @pf: board private structure to reinitialize
4477  *
4478  * This routine reinitialize interrupt scheme that was cleared during
4479  * power management suspend callback.
4480  *
4481  * This should be called during resume routine to re-allocate the q_vectors
4482  * and reacquire interrupts.
4483  */
4484 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4485 {
4486 	struct device *dev = ice_pf_to_dev(pf);
4487 	int ret, v;
4488 
4489 	/* Since we clear MSIX flag during suspend, we need to
4490 	 * set it back during resume...
4491 	 */
4492 
4493 	ret = ice_init_interrupt_scheme(pf);
4494 	if (ret) {
4495 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4496 		return ret;
4497 	}
4498 
4499 	/* Remap vectors and rings, after successful re-init interrupts */
4500 	ice_for_each_vsi(pf, v) {
4501 		if (!pf->vsi[v])
4502 			continue;
4503 
4504 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4505 		if (ret)
4506 			goto err_reinit;
4507 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4508 	}
4509 
4510 	ret = ice_req_irq_msix_misc(pf);
4511 	if (ret) {
4512 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4513 			ret);
4514 		goto err_reinit;
4515 	}
4516 
4517 	return 0;
4518 
4519 err_reinit:
4520 	while (v--)
4521 		if (pf->vsi[v])
4522 			ice_vsi_free_q_vectors(pf->vsi[v]);
4523 
4524 	return ret;
4525 }
4526 
4527 /**
4528  * ice_suspend
4529  * @dev: generic device information structure
4530  *
4531  * Power Management callback to quiesce the device and prepare
4532  * for D3 transition.
4533  */
4534 static int __maybe_unused ice_suspend(struct device *dev)
4535 {
4536 	struct pci_dev *pdev = to_pci_dev(dev);
4537 	struct ice_pf *pf;
4538 	int disabled, v;
4539 
4540 	pf = pci_get_drvdata(pdev);
4541 
4542 	if (!ice_pf_state_is_nominal(pf)) {
4543 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4544 		return -EBUSY;
4545 	}
4546 
4547 	/* Stop watchdog tasks until resume completion.
4548 	 * Even though it is most likely that the service task is
4549 	 * disabled if the device is suspended or down, the service task's
4550 	 * state is controlled by a different state bit, and we should
4551 	 * store and honor whatever state that bit is in at this point.
4552 	 */
4553 	disabled = ice_service_task_stop(pf);
4554 
4555 	/* Already suspended?, then there is nothing to do */
4556 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4557 		if (!disabled)
4558 			ice_service_task_restart(pf);
4559 		return 0;
4560 	}
4561 
4562 	if (test_bit(ICE_DOWN, pf->state) ||
4563 	    ice_is_reset_in_progress(pf->state)) {
4564 		dev_err(dev, "can't suspend device in reset or already down\n");
4565 		if (!disabled)
4566 			ice_service_task_restart(pf);
4567 		return 0;
4568 	}
4569 
4570 	ice_setup_mc_magic_wake(pf);
4571 
4572 	ice_prepare_for_shutdown(pf);
4573 
4574 	ice_set_wake(pf);
4575 
4576 	/* Free vectors, clear the interrupt scheme and release IRQs
4577 	 * for proper hibernation, especially with large number of CPUs.
4578 	 * Otherwise hibernation might fail when mapping all the vectors back
4579 	 * to CPU0.
4580 	 */
4581 	ice_free_irq_msix_misc(pf);
4582 	ice_for_each_vsi(pf, v) {
4583 		if (!pf->vsi[v])
4584 			continue;
4585 		ice_vsi_free_q_vectors(pf->vsi[v]);
4586 	}
4587 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4588 	ice_clear_interrupt_scheme(pf);
4589 
4590 	pci_save_state(pdev);
4591 	pci_wake_from_d3(pdev, pf->wol_ena);
4592 	pci_set_power_state(pdev, PCI_D3hot);
4593 	return 0;
4594 }
4595 
4596 /**
4597  * ice_resume - PM callback for waking up from D3
4598  * @dev: generic device information structure
4599  */
4600 static int __maybe_unused ice_resume(struct device *dev)
4601 {
4602 	struct pci_dev *pdev = to_pci_dev(dev);
4603 	enum ice_reset_req reset_type;
4604 	struct ice_pf *pf;
4605 	struct ice_hw *hw;
4606 	int ret;
4607 
4608 	pci_set_power_state(pdev, PCI_D0);
4609 	pci_restore_state(pdev);
4610 	pci_save_state(pdev);
4611 
4612 	if (!pci_device_is_present(pdev))
4613 		return -ENODEV;
4614 
4615 	ret = pci_enable_device_mem(pdev);
4616 	if (ret) {
4617 		dev_err(dev, "Cannot enable device after suspend\n");
4618 		return ret;
4619 	}
4620 
4621 	pf = pci_get_drvdata(pdev);
4622 	hw = &pf->hw;
4623 
4624 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4625 	ice_print_wake_reason(pf);
4626 
4627 	/* We cleared the interrupt scheme when we suspended, so we need to
4628 	 * restore it now to resume device functionality.
4629 	 */
4630 	ret = ice_reinit_interrupt_scheme(pf);
4631 	if (ret)
4632 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4633 
4634 	clear_bit(ICE_DOWN, pf->state);
4635 	/* Now perform PF reset and rebuild */
4636 	reset_type = ICE_RESET_PFR;
4637 	/* re-enable service task for reset, but allow reset to schedule it */
4638 	clear_bit(ICE_SERVICE_DIS, pf->state);
4639 
4640 	if (ice_schedule_reset(pf, reset_type))
4641 		dev_err(dev, "Reset during resume failed.\n");
4642 
4643 	clear_bit(ICE_SUSPENDED, pf->state);
4644 	ice_service_task_restart(pf);
4645 
4646 	/* Restart the service task */
4647 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4648 
4649 	return 0;
4650 }
4651 #endif /* CONFIG_PM */
4652 
4653 /**
4654  * ice_pci_err_detected - warning that PCI error has been detected
4655  * @pdev: PCI device information struct
4656  * @err: the type of PCI error
4657  *
4658  * Called to warn that something happened on the PCI bus and the error handling
4659  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4660  */
4661 static pci_ers_result_t
4662 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4663 {
4664 	struct ice_pf *pf = pci_get_drvdata(pdev);
4665 
4666 	if (!pf) {
4667 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4668 			__func__, err);
4669 		return PCI_ERS_RESULT_DISCONNECT;
4670 	}
4671 
4672 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4673 		ice_service_task_stop(pf);
4674 
4675 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4676 			set_bit(ICE_PFR_REQ, pf->state);
4677 			ice_prepare_for_reset(pf);
4678 		}
4679 	}
4680 
4681 	return PCI_ERS_RESULT_NEED_RESET;
4682 }
4683 
4684 /**
4685  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4686  * @pdev: PCI device information struct
4687  *
4688  * Called to determine if the driver can recover from the PCI slot reset by
4689  * using a register read to determine if the device is recoverable.
4690  */
4691 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4692 {
4693 	struct ice_pf *pf = pci_get_drvdata(pdev);
4694 	pci_ers_result_t result;
4695 	int err;
4696 	u32 reg;
4697 
4698 	err = pci_enable_device_mem(pdev);
4699 	if (err) {
4700 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4701 			err);
4702 		result = PCI_ERS_RESULT_DISCONNECT;
4703 	} else {
4704 		pci_set_master(pdev);
4705 		pci_restore_state(pdev);
4706 		pci_save_state(pdev);
4707 		pci_wake_from_d3(pdev, false);
4708 
4709 		/* Check for life */
4710 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4711 		if (!reg)
4712 			result = PCI_ERS_RESULT_RECOVERED;
4713 		else
4714 			result = PCI_ERS_RESULT_DISCONNECT;
4715 	}
4716 
4717 	err = pci_aer_clear_nonfatal_status(pdev);
4718 	if (err)
4719 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4720 			err);
4721 		/* non-fatal, continue */
4722 
4723 	return result;
4724 }
4725 
4726 /**
4727  * ice_pci_err_resume - restart operations after PCI error recovery
4728  * @pdev: PCI device information struct
4729  *
4730  * Called to allow the driver to bring things back up after PCI error and/or
4731  * reset recovery have finished
4732  */
4733 static void ice_pci_err_resume(struct pci_dev *pdev)
4734 {
4735 	struct ice_pf *pf = pci_get_drvdata(pdev);
4736 
4737 	if (!pf) {
4738 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4739 			__func__);
4740 		return;
4741 	}
4742 
4743 	if (test_bit(ICE_SUSPENDED, pf->state)) {
4744 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4745 			__func__);
4746 		return;
4747 	}
4748 
4749 	ice_restore_all_vfs_msi_state(pdev);
4750 
4751 	ice_do_reset(pf, ICE_RESET_PFR);
4752 	ice_service_task_restart(pf);
4753 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4754 }
4755 
4756 /**
4757  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4758  * @pdev: PCI device information struct
4759  */
4760 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4761 {
4762 	struct ice_pf *pf = pci_get_drvdata(pdev);
4763 
4764 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4765 		ice_service_task_stop(pf);
4766 
4767 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4768 			set_bit(ICE_PFR_REQ, pf->state);
4769 			ice_prepare_for_reset(pf);
4770 		}
4771 	}
4772 }
4773 
4774 /**
4775  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4776  * @pdev: PCI device information struct
4777  */
4778 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4779 {
4780 	ice_pci_err_resume(pdev);
4781 }
4782 
4783 /* ice_pci_tbl - PCI Device ID Table
4784  *
4785  * Wildcard entries (PCI_ANY_ID) should come last
4786  * Last entry must be all 0s
4787  *
4788  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4789  *   Class, Class Mask, private data (not used) }
4790  */
4791 static const struct pci_device_id ice_pci_tbl[] = {
4792 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4793 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4794 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4795 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4796 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4797 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4798 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4799 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4800 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4801 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4802 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4803 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4804 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4805 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4806 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4807 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4808 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4809 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4810 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4811 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4812 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4813 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4814 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4815 	/* required last entry */
4816 	{ 0, }
4817 };
4818 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4819 
4820 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4821 
4822 static const struct pci_error_handlers ice_pci_err_handler = {
4823 	.error_detected = ice_pci_err_detected,
4824 	.slot_reset = ice_pci_err_slot_reset,
4825 	.reset_prepare = ice_pci_err_reset_prepare,
4826 	.reset_done = ice_pci_err_reset_done,
4827 	.resume = ice_pci_err_resume
4828 };
4829 
4830 static struct pci_driver ice_driver = {
4831 	.name = KBUILD_MODNAME,
4832 	.id_table = ice_pci_tbl,
4833 	.probe = ice_probe,
4834 	.remove = ice_remove,
4835 #ifdef CONFIG_PM
4836 	.driver.pm = &ice_pm_ops,
4837 #endif /* CONFIG_PM */
4838 	.shutdown = ice_shutdown,
4839 	.sriov_configure = ice_sriov_configure,
4840 	.err_handler = &ice_pci_err_handler
4841 };
4842 
4843 /**
4844  * ice_module_init - Driver registration routine
4845  *
4846  * ice_module_init is the first routine called when the driver is
4847  * loaded. All it does is register with the PCI subsystem.
4848  */
4849 static int __init ice_module_init(void)
4850 {
4851 	int status;
4852 
4853 	pr_info("%s\n", ice_driver_string);
4854 	pr_info("%s\n", ice_copyright);
4855 
4856 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
4857 	if (!ice_wq) {
4858 		pr_err("Failed to create workqueue\n");
4859 		return -ENOMEM;
4860 	}
4861 
4862 	status = pci_register_driver(&ice_driver);
4863 	if (status) {
4864 		pr_err("failed to register PCI driver, err %d\n", status);
4865 		destroy_workqueue(ice_wq);
4866 	}
4867 
4868 	return status;
4869 }
4870 module_init(ice_module_init);
4871 
4872 /**
4873  * ice_module_exit - Driver exit cleanup routine
4874  *
4875  * ice_module_exit is called just before the driver is removed
4876  * from memory.
4877  */
4878 static void __exit ice_module_exit(void)
4879 {
4880 	pci_unregister_driver(&ice_driver);
4881 	destroy_workqueue(ice_wq);
4882 	pr_info("module unloaded\n");
4883 }
4884 module_exit(ice_module_exit);
4885 
4886 /**
4887  * ice_set_mac_address - NDO callback to set MAC address
4888  * @netdev: network interface device structure
4889  * @pi: pointer to an address structure
4890  *
4891  * Returns 0 on success, negative on failure
4892  */
4893 static int ice_set_mac_address(struct net_device *netdev, void *pi)
4894 {
4895 	struct ice_netdev_priv *np = netdev_priv(netdev);
4896 	struct ice_vsi *vsi = np->vsi;
4897 	struct ice_pf *pf = vsi->back;
4898 	struct ice_hw *hw = &pf->hw;
4899 	struct sockaddr *addr = pi;
4900 	enum ice_status status;
4901 	u8 flags = 0;
4902 	int err = 0;
4903 	u8 *mac;
4904 
4905 	mac = (u8 *)addr->sa_data;
4906 
4907 	if (!is_valid_ether_addr(mac))
4908 		return -EADDRNOTAVAIL;
4909 
4910 	if (ether_addr_equal(netdev->dev_addr, mac)) {
4911 		netdev_warn(netdev, "already using mac %pM\n", mac);
4912 		return 0;
4913 	}
4914 
4915 	if (test_bit(ICE_DOWN, pf->state) ||
4916 	    ice_is_reset_in_progress(pf->state)) {
4917 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
4918 			   mac);
4919 		return -EBUSY;
4920 	}
4921 
4922 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
4923 	status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
4924 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4925 		err = -EADDRNOTAVAIL;
4926 		goto err_update_filters;
4927 	}
4928 
4929 	/* Add filter for new MAC. If filter exists, return success */
4930 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
4931 	if (status == ICE_ERR_ALREADY_EXISTS) {
4932 		/* Although this MAC filter is already present in hardware it's
4933 		 * possible in some cases (e.g. bonding) that dev_addr was
4934 		 * modified outside of the driver and needs to be restored back
4935 		 * to this value.
4936 		 */
4937 		memcpy(netdev->dev_addr, mac, netdev->addr_len);
4938 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
4939 		return 0;
4940 	}
4941 
4942 	/* error if the new filter addition failed */
4943 	if (status)
4944 		err = -EADDRNOTAVAIL;
4945 
4946 err_update_filters:
4947 	if (err) {
4948 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4949 			   mac);
4950 		return err;
4951 	}
4952 
4953 	/* change the netdev's MAC address */
4954 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
4955 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4956 		   netdev->dev_addr);
4957 
4958 	/* write new MAC address to the firmware */
4959 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4960 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
4961 	if (status) {
4962 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
4963 			   mac, ice_stat_str(status));
4964 	}
4965 	return 0;
4966 }
4967 
4968 /**
4969  * ice_set_rx_mode - NDO callback to set the netdev filters
4970  * @netdev: network interface device structure
4971  */
4972 static void ice_set_rx_mode(struct net_device *netdev)
4973 {
4974 	struct ice_netdev_priv *np = netdev_priv(netdev);
4975 	struct ice_vsi *vsi = np->vsi;
4976 
4977 	if (!vsi)
4978 		return;
4979 
4980 	/* Set the flags to synchronize filters
4981 	 * ndo_set_rx_mode may be triggered even without a change in netdev
4982 	 * flags
4983 	 */
4984 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
4985 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
4986 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
4987 
4988 	/* schedule our worker thread which will take care of
4989 	 * applying the new filter changes
4990 	 */
4991 	ice_service_task_schedule(vsi->back);
4992 }
4993 
4994 /**
4995  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
4996  * @netdev: network interface device structure
4997  * @queue_index: Queue ID
4998  * @maxrate: maximum bandwidth in Mbps
4999  */
5000 static int
5001 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5002 {
5003 	struct ice_netdev_priv *np = netdev_priv(netdev);
5004 	struct ice_vsi *vsi = np->vsi;
5005 	enum ice_status status;
5006 	u16 q_handle;
5007 	u8 tc;
5008 
5009 	/* Validate maxrate requested is within permitted range */
5010 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5011 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5012 			   maxrate, queue_index);
5013 		return -EINVAL;
5014 	}
5015 
5016 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5017 	tc = ice_dcb_get_tc(vsi, queue_index);
5018 
5019 	/* Set BW back to default, when user set maxrate to 0 */
5020 	if (!maxrate)
5021 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5022 					       q_handle, ICE_MAX_BW);
5023 	else
5024 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5025 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5026 	if (status) {
5027 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5028 			   ice_stat_str(status));
5029 		return -EIO;
5030 	}
5031 
5032 	return 0;
5033 }
5034 
5035 /**
5036  * ice_fdb_add - add an entry to the hardware database
5037  * @ndm: the input from the stack
5038  * @tb: pointer to array of nladdr (unused)
5039  * @dev: the net device pointer
5040  * @addr: the MAC address entry being added
5041  * @vid: VLAN ID
5042  * @flags: instructions from stack about fdb operation
5043  * @extack: netlink extended ack
5044  */
5045 static int
5046 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5047 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5048 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5049 {
5050 	int err;
5051 
5052 	if (vid) {
5053 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5054 		return -EINVAL;
5055 	}
5056 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5057 		netdev_err(dev, "FDB only supports static addresses\n");
5058 		return -EINVAL;
5059 	}
5060 
5061 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5062 		err = dev_uc_add_excl(dev, addr);
5063 	else if (is_multicast_ether_addr(addr))
5064 		err = dev_mc_add_excl(dev, addr);
5065 	else
5066 		err = -EINVAL;
5067 
5068 	/* Only return duplicate errors if NLM_F_EXCL is set */
5069 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5070 		err = 0;
5071 
5072 	return err;
5073 }
5074 
5075 /**
5076  * ice_fdb_del - delete an entry from the hardware database
5077  * @ndm: the input from the stack
5078  * @tb: pointer to array of nladdr (unused)
5079  * @dev: the net device pointer
5080  * @addr: the MAC address entry being added
5081  * @vid: VLAN ID
5082  */
5083 static int
5084 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5085 	    struct net_device *dev, const unsigned char *addr,
5086 	    __always_unused u16 vid)
5087 {
5088 	int err;
5089 
5090 	if (ndm->ndm_state & NUD_PERMANENT) {
5091 		netdev_err(dev, "FDB only supports static addresses\n");
5092 		return -EINVAL;
5093 	}
5094 
5095 	if (is_unicast_ether_addr(addr))
5096 		err = dev_uc_del(dev, addr);
5097 	else if (is_multicast_ether_addr(addr))
5098 		err = dev_mc_del(dev, addr);
5099 	else
5100 		err = -EINVAL;
5101 
5102 	return err;
5103 }
5104 
5105 /**
5106  * ice_set_features - set the netdev feature flags
5107  * @netdev: ptr to the netdev being adjusted
5108  * @features: the feature set that the stack is suggesting
5109  */
5110 static int
5111 ice_set_features(struct net_device *netdev, netdev_features_t features)
5112 {
5113 	struct ice_netdev_priv *np = netdev_priv(netdev);
5114 	struct ice_vsi *vsi = np->vsi;
5115 	struct ice_pf *pf = vsi->back;
5116 	int ret = 0;
5117 
5118 	/* Don't set any netdev advanced features with device in Safe Mode */
5119 	if (ice_is_safe_mode(vsi->back)) {
5120 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5121 		return ret;
5122 	}
5123 
5124 	/* Do not change setting during reset */
5125 	if (ice_is_reset_in_progress(pf->state)) {
5126 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5127 		return -EBUSY;
5128 	}
5129 
5130 	/* Multiple features can be changed in one call so keep features in
5131 	 * separate if/else statements to guarantee each feature is checked
5132 	 */
5133 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5134 		ice_vsi_manage_rss_lut(vsi, true);
5135 	else if (!(features & NETIF_F_RXHASH) &&
5136 		 netdev->features & NETIF_F_RXHASH)
5137 		ice_vsi_manage_rss_lut(vsi, false);
5138 
5139 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5140 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5141 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5142 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5143 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5144 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5145 
5146 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5147 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5148 		ret = ice_vsi_manage_vlan_insertion(vsi);
5149 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5150 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5151 		ret = ice_vsi_manage_vlan_insertion(vsi);
5152 
5153 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5154 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5155 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5156 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5157 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5158 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5159 
5160 	if ((features & NETIF_F_NTUPLE) &&
5161 	    !(netdev->features & NETIF_F_NTUPLE)) {
5162 		ice_vsi_manage_fdir(vsi, true);
5163 		ice_init_arfs(vsi);
5164 	} else if (!(features & NETIF_F_NTUPLE) &&
5165 		 (netdev->features & NETIF_F_NTUPLE)) {
5166 		ice_vsi_manage_fdir(vsi, false);
5167 		ice_clear_arfs(vsi);
5168 	}
5169 
5170 	return ret;
5171 }
5172 
5173 /**
5174  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5175  * @vsi: VSI to setup VLAN properties for
5176  */
5177 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5178 {
5179 	int ret = 0;
5180 
5181 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5182 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5183 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5184 		ret = ice_vsi_manage_vlan_insertion(vsi);
5185 
5186 	return ret;
5187 }
5188 
5189 /**
5190  * ice_vsi_cfg - Setup the VSI
5191  * @vsi: the VSI being configured
5192  *
5193  * Return 0 on success and negative value on error
5194  */
5195 int ice_vsi_cfg(struct ice_vsi *vsi)
5196 {
5197 	int err;
5198 
5199 	if (vsi->netdev) {
5200 		ice_set_rx_mode(vsi->netdev);
5201 
5202 		err = ice_vsi_vlan_setup(vsi);
5203 
5204 		if (err)
5205 			return err;
5206 	}
5207 	ice_vsi_cfg_dcb_rings(vsi);
5208 
5209 	err = ice_vsi_cfg_lan_txqs(vsi);
5210 	if (!err && ice_is_xdp_ena_vsi(vsi))
5211 		err = ice_vsi_cfg_xdp_txqs(vsi);
5212 	if (!err)
5213 		err = ice_vsi_cfg_rxqs(vsi);
5214 
5215 	return err;
5216 }
5217 
5218 /* THEORY OF MODERATION:
5219  * The below code creates custom DIM profiles for use by this driver, because
5220  * the ice driver hardware works differently than the hardware that DIMLIB was
5221  * originally made for. ice hardware doesn't have packet count limits that
5222  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5223  * and this code adds that capability to be used by the driver when it's using
5224  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5225  * for how to "respond" to traffic and interrupts, so this driver uses a
5226  * slightly different set of moderation parameters to get best performance.
5227  */
5228 struct ice_dim {
5229 	/* the throttle rate for interrupts, basically worst case delay before
5230 	 * an initial interrupt fires, value is stored in microseconds.
5231 	 */
5232 	u16 itr;
5233 	/* the rate limit for interrupts, which can cap a delay from a small
5234 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5235 	 * could yield as much as 500,000 interrupts per second, but with a
5236 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5237 	 * is stored in microseconds.
5238 	 */
5239 	u16 intrl;
5240 };
5241 
5242 /* Make a different profile for Rx that doesn't allow quite so aggressive
5243  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5244  * second. The INTRL/rate parameters here are only useful to cap small ITR
5245  * values, which is why for larger ITR's - like 128, which can only generate
5246  * 8k interrupts per second, there is no point to rate limit and the values
5247  * are set to zero. The rate limit values do affect latency, and so must
5248  * be reasonably small so to not impact latency sensitive tests.
5249  */
5250 static const struct ice_dim rx_profile[] = {
5251 	{2, 10},
5252 	{8, 16},
5253 	{32, 0},
5254 	{96, 0},
5255 	{128, 0}
5256 };
5257 
5258 /* The transmit profile, which has the same sorts of values
5259  * as the previous struct
5260  */
5261 static const struct ice_dim tx_profile[] = {
5262 	{2, 10},
5263 	{8, 16},
5264 	{64, 0},
5265 	{128, 0},
5266 	{256, 0}
5267 };
5268 
5269 static void ice_tx_dim_work(struct work_struct *work)
5270 {
5271 	struct ice_ring_container *rc;
5272 	struct ice_q_vector *q_vector;
5273 	struct dim *dim;
5274 	u16 itr, intrl;
5275 
5276 	dim = container_of(work, struct dim, work);
5277 	rc = container_of(dim, struct ice_ring_container, dim);
5278 	q_vector = container_of(rc, struct ice_q_vector, tx);
5279 
5280 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5281 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5282 
5283 	/* look up the values in our local table */
5284 	itr = tx_profile[dim->profile_ix].itr;
5285 	intrl = tx_profile[dim->profile_ix].intrl;
5286 
5287 	ice_write_itr(rc, itr);
5288 	ice_write_intrl(q_vector, intrl);
5289 
5290 	dim->state = DIM_START_MEASURE;
5291 }
5292 
5293 static void ice_rx_dim_work(struct work_struct *work)
5294 {
5295 	struct ice_ring_container *rc;
5296 	struct ice_q_vector *q_vector;
5297 	struct dim *dim;
5298 	u16 itr, intrl;
5299 
5300 	dim = container_of(work, struct dim, work);
5301 	rc = container_of(dim, struct ice_ring_container, dim);
5302 	q_vector = container_of(rc, struct ice_q_vector, rx);
5303 
5304 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5305 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5306 
5307 	/* look up the values in our local table */
5308 	itr = rx_profile[dim->profile_ix].itr;
5309 	intrl = rx_profile[dim->profile_ix].intrl;
5310 
5311 	ice_write_itr(rc, itr);
5312 	ice_write_intrl(q_vector, intrl);
5313 
5314 	dim->state = DIM_START_MEASURE;
5315 }
5316 
5317 /**
5318  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5319  * @vsi: the VSI being configured
5320  */
5321 static void ice_napi_enable_all(struct ice_vsi *vsi)
5322 {
5323 	int q_idx;
5324 
5325 	if (!vsi->netdev)
5326 		return;
5327 
5328 	ice_for_each_q_vector(vsi, q_idx) {
5329 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5330 
5331 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5332 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5333 
5334 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5335 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5336 
5337 		if (q_vector->rx.ring || q_vector->tx.ring)
5338 			napi_enable(&q_vector->napi);
5339 	}
5340 }
5341 
5342 /**
5343  * ice_up_complete - Finish the last steps of bringing up a connection
5344  * @vsi: The VSI being configured
5345  *
5346  * Return 0 on success and negative value on error
5347  */
5348 static int ice_up_complete(struct ice_vsi *vsi)
5349 {
5350 	struct ice_pf *pf = vsi->back;
5351 	int err;
5352 
5353 	ice_vsi_cfg_msix(vsi);
5354 
5355 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5356 	 * Tx queue group list was configured and the context bits were
5357 	 * programmed using ice_vsi_cfg_txqs
5358 	 */
5359 	err = ice_vsi_start_all_rx_rings(vsi);
5360 	if (err)
5361 		return err;
5362 
5363 	clear_bit(ICE_VSI_DOWN, vsi->state);
5364 	ice_napi_enable_all(vsi);
5365 	ice_vsi_ena_irq(vsi);
5366 
5367 	if (vsi->port_info &&
5368 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5369 	    vsi->netdev) {
5370 		ice_print_link_msg(vsi, true);
5371 		netif_tx_start_all_queues(vsi->netdev);
5372 		netif_carrier_on(vsi->netdev);
5373 	}
5374 
5375 	ice_service_task_schedule(pf);
5376 
5377 	return 0;
5378 }
5379 
5380 /**
5381  * ice_up - Bring the connection back up after being down
5382  * @vsi: VSI being configured
5383  */
5384 int ice_up(struct ice_vsi *vsi)
5385 {
5386 	int err;
5387 
5388 	err = ice_vsi_cfg(vsi);
5389 	if (!err)
5390 		err = ice_up_complete(vsi);
5391 
5392 	return err;
5393 }
5394 
5395 /**
5396  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5397  * @ring: Tx or Rx ring to read stats from
5398  * @pkts: packets stats counter
5399  * @bytes: bytes stats counter
5400  *
5401  * This function fetches stats from the ring considering the atomic operations
5402  * that needs to be performed to read u64 values in 32 bit machine.
5403  */
5404 static void
5405 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5406 {
5407 	unsigned int start;
5408 	*pkts = 0;
5409 	*bytes = 0;
5410 
5411 	if (!ring)
5412 		return;
5413 	do {
5414 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5415 		*pkts = ring->stats.pkts;
5416 		*bytes = ring->stats.bytes;
5417 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5418 }
5419 
5420 /**
5421  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5422  * @vsi: the VSI to be updated
5423  * @rings: rings to work on
5424  * @count: number of rings
5425  */
5426 static void
5427 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5428 			     u16 count)
5429 {
5430 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5431 	u16 i;
5432 
5433 	for (i = 0; i < count; i++) {
5434 		struct ice_ring *ring;
5435 		u64 pkts, bytes;
5436 
5437 		ring = READ_ONCE(rings[i]);
5438 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5439 		vsi_stats->tx_packets += pkts;
5440 		vsi_stats->tx_bytes += bytes;
5441 		vsi->tx_restart += ring->tx_stats.restart_q;
5442 		vsi->tx_busy += ring->tx_stats.tx_busy;
5443 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5444 	}
5445 }
5446 
5447 /**
5448  * ice_update_vsi_ring_stats - Update VSI stats counters
5449  * @vsi: the VSI to be updated
5450  */
5451 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5452 {
5453 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5454 	struct ice_ring *ring;
5455 	u64 pkts, bytes;
5456 	int i;
5457 
5458 	/* reset netdev stats */
5459 	vsi_stats->tx_packets = 0;
5460 	vsi_stats->tx_bytes = 0;
5461 	vsi_stats->rx_packets = 0;
5462 	vsi_stats->rx_bytes = 0;
5463 
5464 	/* reset non-netdev (extended) stats */
5465 	vsi->tx_restart = 0;
5466 	vsi->tx_busy = 0;
5467 	vsi->tx_linearize = 0;
5468 	vsi->rx_buf_failed = 0;
5469 	vsi->rx_page_failed = 0;
5470 
5471 	rcu_read_lock();
5472 
5473 	/* update Tx rings counters */
5474 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5475 
5476 	/* update Rx rings counters */
5477 	ice_for_each_rxq(vsi, i) {
5478 		ring = READ_ONCE(vsi->rx_rings[i]);
5479 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5480 		vsi_stats->rx_packets += pkts;
5481 		vsi_stats->rx_bytes += bytes;
5482 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5483 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5484 	}
5485 
5486 	/* update XDP Tx rings counters */
5487 	if (ice_is_xdp_ena_vsi(vsi))
5488 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5489 					     vsi->num_xdp_txq);
5490 
5491 	rcu_read_unlock();
5492 }
5493 
5494 /**
5495  * ice_update_vsi_stats - Update VSI stats counters
5496  * @vsi: the VSI to be updated
5497  */
5498 void ice_update_vsi_stats(struct ice_vsi *vsi)
5499 {
5500 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5501 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5502 	struct ice_pf *pf = vsi->back;
5503 
5504 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5505 	    test_bit(ICE_CFG_BUSY, pf->state))
5506 		return;
5507 
5508 	/* get stats as recorded by Tx/Rx rings */
5509 	ice_update_vsi_ring_stats(vsi);
5510 
5511 	/* get VSI stats as recorded by the hardware */
5512 	ice_update_eth_stats(vsi);
5513 
5514 	cur_ns->tx_errors = cur_es->tx_errors;
5515 	cur_ns->rx_dropped = cur_es->rx_discards;
5516 	cur_ns->tx_dropped = cur_es->tx_discards;
5517 	cur_ns->multicast = cur_es->rx_multicast;
5518 
5519 	/* update some more netdev stats if this is main VSI */
5520 	if (vsi->type == ICE_VSI_PF) {
5521 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5522 		cur_ns->rx_errors = pf->stats.crc_errors +
5523 				    pf->stats.illegal_bytes +
5524 				    pf->stats.rx_len_errors +
5525 				    pf->stats.rx_undersize +
5526 				    pf->hw_csum_rx_error +
5527 				    pf->stats.rx_jabber +
5528 				    pf->stats.rx_fragments +
5529 				    pf->stats.rx_oversize;
5530 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5531 		/* record drops from the port level */
5532 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5533 	}
5534 }
5535 
5536 /**
5537  * ice_update_pf_stats - Update PF port stats counters
5538  * @pf: PF whose stats needs to be updated
5539  */
5540 void ice_update_pf_stats(struct ice_pf *pf)
5541 {
5542 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5543 	struct ice_hw *hw = &pf->hw;
5544 	u16 fd_ctr_base;
5545 	u8 port;
5546 
5547 	port = hw->port_info->lport;
5548 	prev_ps = &pf->stats_prev;
5549 	cur_ps = &pf->stats;
5550 
5551 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5552 			  &prev_ps->eth.rx_bytes,
5553 			  &cur_ps->eth.rx_bytes);
5554 
5555 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5556 			  &prev_ps->eth.rx_unicast,
5557 			  &cur_ps->eth.rx_unicast);
5558 
5559 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5560 			  &prev_ps->eth.rx_multicast,
5561 			  &cur_ps->eth.rx_multicast);
5562 
5563 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5564 			  &prev_ps->eth.rx_broadcast,
5565 			  &cur_ps->eth.rx_broadcast);
5566 
5567 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5568 			  &prev_ps->eth.rx_discards,
5569 			  &cur_ps->eth.rx_discards);
5570 
5571 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5572 			  &prev_ps->eth.tx_bytes,
5573 			  &cur_ps->eth.tx_bytes);
5574 
5575 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5576 			  &prev_ps->eth.tx_unicast,
5577 			  &cur_ps->eth.tx_unicast);
5578 
5579 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5580 			  &prev_ps->eth.tx_multicast,
5581 			  &cur_ps->eth.tx_multicast);
5582 
5583 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5584 			  &prev_ps->eth.tx_broadcast,
5585 			  &cur_ps->eth.tx_broadcast);
5586 
5587 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5588 			  &prev_ps->tx_dropped_link_down,
5589 			  &cur_ps->tx_dropped_link_down);
5590 
5591 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5592 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5593 
5594 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5595 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5596 
5597 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5598 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5599 
5600 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5601 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5602 
5603 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5604 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5605 
5606 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5607 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5608 
5609 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5610 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5611 
5612 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5613 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5614 
5615 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5616 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5617 
5618 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5619 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5620 
5621 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5622 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5623 
5624 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5625 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5626 
5627 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5628 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5629 
5630 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5631 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5632 
5633 	fd_ctr_base = hw->fd_ctr_base;
5634 
5635 	ice_stat_update40(hw,
5636 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5637 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5638 			  &cur_ps->fd_sb_match);
5639 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5640 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5641 
5642 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5643 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5644 
5645 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5646 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5647 
5648 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5649 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5650 
5651 	ice_update_dcb_stats(pf);
5652 
5653 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5654 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5655 
5656 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5657 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5658 
5659 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5660 			  &prev_ps->mac_local_faults,
5661 			  &cur_ps->mac_local_faults);
5662 
5663 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5664 			  &prev_ps->mac_remote_faults,
5665 			  &cur_ps->mac_remote_faults);
5666 
5667 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5668 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5669 
5670 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5671 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5672 
5673 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5674 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5675 
5676 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5677 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5678 
5679 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5680 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5681 
5682 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5683 
5684 	pf->stat_prev_loaded = true;
5685 }
5686 
5687 /**
5688  * ice_get_stats64 - get statistics for network device structure
5689  * @netdev: network interface device structure
5690  * @stats: main device statistics structure
5691  */
5692 static
5693 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5694 {
5695 	struct ice_netdev_priv *np = netdev_priv(netdev);
5696 	struct rtnl_link_stats64 *vsi_stats;
5697 	struct ice_vsi *vsi = np->vsi;
5698 
5699 	vsi_stats = &vsi->net_stats;
5700 
5701 	if (!vsi->num_txq || !vsi->num_rxq)
5702 		return;
5703 
5704 	/* netdev packet/byte stats come from ring counter. These are obtained
5705 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5706 	 * But, only call the update routine and read the registers if VSI is
5707 	 * not down.
5708 	 */
5709 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5710 		ice_update_vsi_ring_stats(vsi);
5711 	stats->tx_packets = vsi_stats->tx_packets;
5712 	stats->tx_bytes = vsi_stats->tx_bytes;
5713 	stats->rx_packets = vsi_stats->rx_packets;
5714 	stats->rx_bytes = vsi_stats->rx_bytes;
5715 
5716 	/* The rest of the stats can be read from the hardware but instead we
5717 	 * just return values that the watchdog task has already obtained from
5718 	 * the hardware.
5719 	 */
5720 	stats->multicast = vsi_stats->multicast;
5721 	stats->tx_errors = vsi_stats->tx_errors;
5722 	stats->tx_dropped = vsi_stats->tx_dropped;
5723 	stats->rx_errors = vsi_stats->rx_errors;
5724 	stats->rx_dropped = vsi_stats->rx_dropped;
5725 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5726 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5727 }
5728 
5729 /**
5730  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5731  * @vsi: VSI having NAPI disabled
5732  */
5733 static void ice_napi_disable_all(struct ice_vsi *vsi)
5734 {
5735 	int q_idx;
5736 
5737 	if (!vsi->netdev)
5738 		return;
5739 
5740 	ice_for_each_q_vector(vsi, q_idx) {
5741 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5742 
5743 		if (q_vector->rx.ring || q_vector->tx.ring)
5744 			napi_disable(&q_vector->napi);
5745 
5746 		cancel_work_sync(&q_vector->tx.dim.work);
5747 		cancel_work_sync(&q_vector->rx.dim.work);
5748 	}
5749 }
5750 
5751 /**
5752  * ice_down - Shutdown the connection
5753  * @vsi: The VSI being stopped
5754  */
5755 int ice_down(struct ice_vsi *vsi)
5756 {
5757 	int i, tx_err, rx_err, link_err = 0;
5758 
5759 	/* Caller of this function is expected to set the
5760 	 * vsi->state ICE_DOWN bit
5761 	 */
5762 	if (vsi->netdev) {
5763 		netif_carrier_off(vsi->netdev);
5764 		netif_tx_disable(vsi->netdev);
5765 	}
5766 
5767 	ice_vsi_dis_irq(vsi);
5768 
5769 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5770 	if (tx_err)
5771 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5772 			   vsi->vsi_num, tx_err);
5773 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5774 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5775 		if (tx_err)
5776 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5777 				   vsi->vsi_num, tx_err);
5778 	}
5779 
5780 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
5781 	if (rx_err)
5782 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5783 			   vsi->vsi_num, rx_err);
5784 
5785 	ice_napi_disable_all(vsi);
5786 
5787 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5788 		link_err = ice_force_phys_link_state(vsi, false);
5789 		if (link_err)
5790 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5791 				   vsi->vsi_num, link_err);
5792 	}
5793 
5794 	ice_for_each_txq(vsi, i)
5795 		ice_clean_tx_ring(vsi->tx_rings[i]);
5796 
5797 	ice_for_each_rxq(vsi, i)
5798 		ice_clean_rx_ring(vsi->rx_rings[i]);
5799 
5800 	if (tx_err || rx_err || link_err) {
5801 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5802 			   vsi->vsi_num, vsi->vsw->sw_id);
5803 		return -EIO;
5804 	}
5805 
5806 	return 0;
5807 }
5808 
5809 /**
5810  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5811  * @vsi: VSI having resources allocated
5812  *
5813  * Return 0 on success, negative on failure
5814  */
5815 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5816 {
5817 	int i, err = 0;
5818 
5819 	if (!vsi->num_txq) {
5820 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5821 			vsi->vsi_num);
5822 		return -EINVAL;
5823 	}
5824 
5825 	ice_for_each_txq(vsi, i) {
5826 		struct ice_ring *ring = vsi->tx_rings[i];
5827 
5828 		if (!ring)
5829 			return -EINVAL;
5830 
5831 		ring->netdev = vsi->netdev;
5832 		err = ice_setup_tx_ring(ring);
5833 		if (err)
5834 			break;
5835 	}
5836 
5837 	return err;
5838 }
5839 
5840 /**
5841  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5842  * @vsi: VSI having resources allocated
5843  *
5844  * Return 0 on success, negative on failure
5845  */
5846 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5847 {
5848 	int i, err = 0;
5849 
5850 	if (!vsi->num_rxq) {
5851 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5852 			vsi->vsi_num);
5853 		return -EINVAL;
5854 	}
5855 
5856 	ice_for_each_rxq(vsi, i) {
5857 		struct ice_ring *ring = vsi->rx_rings[i];
5858 
5859 		if (!ring)
5860 			return -EINVAL;
5861 
5862 		ring->netdev = vsi->netdev;
5863 		err = ice_setup_rx_ring(ring);
5864 		if (err)
5865 			break;
5866 	}
5867 
5868 	return err;
5869 }
5870 
5871 /**
5872  * ice_vsi_open_ctrl - open control VSI for use
5873  * @vsi: the VSI to open
5874  *
5875  * Initialization of the Control VSI
5876  *
5877  * Returns 0 on success, negative value on error
5878  */
5879 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5880 {
5881 	char int_name[ICE_INT_NAME_STR_LEN];
5882 	struct ice_pf *pf = vsi->back;
5883 	struct device *dev;
5884 	int err;
5885 
5886 	dev = ice_pf_to_dev(pf);
5887 	/* allocate descriptors */
5888 	err = ice_vsi_setup_tx_rings(vsi);
5889 	if (err)
5890 		goto err_setup_tx;
5891 
5892 	err = ice_vsi_setup_rx_rings(vsi);
5893 	if (err)
5894 		goto err_setup_rx;
5895 
5896 	err = ice_vsi_cfg(vsi);
5897 	if (err)
5898 		goto err_setup_rx;
5899 
5900 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
5901 		 dev_driver_string(dev), dev_name(dev));
5902 	err = ice_vsi_req_irq_msix(vsi, int_name);
5903 	if (err)
5904 		goto err_setup_rx;
5905 
5906 	ice_vsi_cfg_msix(vsi);
5907 
5908 	err = ice_vsi_start_all_rx_rings(vsi);
5909 	if (err)
5910 		goto err_up_complete;
5911 
5912 	clear_bit(ICE_VSI_DOWN, vsi->state);
5913 	ice_vsi_ena_irq(vsi);
5914 
5915 	return 0;
5916 
5917 err_up_complete:
5918 	ice_down(vsi);
5919 err_setup_rx:
5920 	ice_vsi_free_rx_rings(vsi);
5921 err_setup_tx:
5922 	ice_vsi_free_tx_rings(vsi);
5923 
5924 	return err;
5925 }
5926 
5927 /**
5928  * ice_vsi_open - Called when a network interface is made active
5929  * @vsi: the VSI to open
5930  *
5931  * Initialization of the VSI
5932  *
5933  * Returns 0 on success, negative value on error
5934  */
5935 static int ice_vsi_open(struct ice_vsi *vsi)
5936 {
5937 	char int_name[ICE_INT_NAME_STR_LEN];
5938 	struct ice_pf *pf = vsi->back;
5939 	int err;
5940 
5941 	/* allocate descriptors */
5942 	err = ice_vsi_setup_tx_rings(vsi);
5943 	if (err)
5944 		goto err_setup_tx;
5945 
5946 	err = ice_vsi_setup_rx_rings(vsi);
5947 	if (err)
5948 		goto err_setup_rx;
5949 
5950 	err = ice_vsi_cfg(vsi);
5951 	if (err)
5952 		goto err_setup_rx;
5953 
5954 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5955 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
5956 	err = ice_vsi_req_irq_msix(vsi, int_name);
5957 	if (err)
5958 		goto err_setup_rx;
5959 
5960 	/* Notify the stack of the actual queue counts. */
5961 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5962 	if (err)
5963 		goto err_set_qs;
5964 
5965 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5966 	if (err)
5967 		goto err_set_qs;
5968 
5969 	err = ice_up_complete(vsi);
5970 	if (err)
5971 		goto err_up_complete;
5972 
5973 	return 0;
5974 
5975 err_up_complete:
5976 	ice_down(vsi);
5977 err_set_qs:
5978 	ice_vsi_free_irq(vsi);
5979 err_setup_rx:
5980 	ice_vsi_free_rx_rings(vsi);
5981 err_setup_tx:
5982 	ice_vsi_free_tx_rings(vsi);
5983 
5984 	return err;
5985 }
5986 
5987 /**
5988  * ice_vsi_release_all - Delete all VSIs
5989  * @pf: PF from which all VSIs are being removed
5990  */
5991 static void ice_vsi_release_all(struct ice_pf *pf)
5992 {
5993 	int err, i;
5994 
5995 	if (!pf->vsi)
5996 		return;
5997 
5998 	ice_for_each_vsi(pf, i) {
5999 		if (!pf->vsi[i])
6000 			continue;
6001 
6002 		err = ice_vsi_release(pf->vsi[i]);
6003 		if (err)
6004 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6005 				i, err, pf->vsi[i]->vsi_num);
6006 	}
6007 }
6008 
6009 /**
6010  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6011  * @pf: pointer to the PF instance
6012  * @type: VSI type to rebuild
6013  *
6014  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6015  */
6016 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6017 {
6018 	struct device *dev = ice_pf_to_dev(pf);
6019 	enum ice_status status;
6020 	int i, err;
6021 
6022 	ice_for_each_vsi(pf, i) {
6023 		struct ice_vsi *vsi = pf->vsi[i];
6024 
6025 		if (!vsi || vsi->type != type)
6026 			continue;
6027 
6028 		/* rebuild the VSI */
6029 		err = ice_vsi_rebuild(vsi, true);
6030 		if (err) {
6031 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6032 				err, vsi->idx, ice_vsi_type_str(type));
6033 			return err;
6034 		}
6035 
6036 		/* replay filters for the VSI */
6037 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6038 		if (status) {
6039 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6040 				ice_stat_str(status), vsi->idx,
6041 				ice_vsi_type_str(type));
6042 			return -EIO;
6043 		}
6044 
6045 		/* Re-map HW VSI number, using VSI handle that has been
6046 		 * previously validated in ice_replay_vsi() call above
6047 		 */
6048 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6049 
6050 		/* enable the VSI */
6051 		err = ice_ena_vsi(vsi, false);
6052 		if (err) {
6053 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6054 				err, vsi->idx, ice_vsi_type_str(type));
6055 			return err;
6056 		}
6057 
6058 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6059 			 ice_vsi_type_str(type));
6060 	}
6061 
6062 	return 0;
6063 }
6064 
6065 /**
6066  * ice_update_pf_netdev_link - Update PF netdev link status
6067  * @pf: pointer to the PF instance
6068  */
6069 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6070 {
6071 	bool link_up;
6072 	int i;
6073 
6074 	ice_for_each_vsi(pf, i) {
6075 		struct ice_vsi *vsi = pf->vsi[i];
6076 
6077 		if (!vsi || vsi->type != ICE_VSI_PF)
6078 			return;
6079 
6080 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6081 		if (link_up) {
6082 			netif_carrier_on(pf->vsi[i]->netdev);
6083 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6084 		} else {
6085 			netif_carrier_off(pf->vsi[i]->netdev);
6086 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6087 		}
6088 	}
6089 }
6090 
6091 /**
6092  * ice_rebuild - rebuild after reset
6093  * @pf: PF to rebuild
6094  * @reset_type: type of reset
6095  *
6096  * Do not rebuild VF VSI in this flow because that is already handled via
6097  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6098  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6099  * to reset/rebuild all the VF VSI twice.
6100  */
6101 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6102 {
6103 	struct device *dev = ice_pf_to_dev(pf);
6104 	struct ice_hw *hw = &pf->hw;
6105 	enum ice_status ret;
6106 	int err;
6107 
6108 	if (test_bit(ICE_DOWN, pf->state))
6109 		goto clear_recovery;
6110 
6111 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6112 
6113 	ret = ice_init_all_ctrlq(hw);
6114 	if (ret) {
6115 		dev_err(dev, "control queues init failed %s\n",
6116 			ice_stat_str(ret));
6117 		goto err_init_ctrlq;
6118 	}
6119 
6120 	/* if DDP was previously loaded successfully */
6121 	if (!ice_is_safe_mode(pf)) {
6122 		/* reload the SW DB of filter tables */
6123 		if (reset_type == ICE_RESET_PFR)
6124 			ice_fill_blk_tbls(hw);
6125 		else
6126 			/* Reload DDP Package after CORER/GLOBR reset */
6127 			ice_load_pkg(NULL, pf);
6128 	}
6129 
6130 	ret = ice_clear_pf_cfg(hw);
6131 	if (ret) {
6132 		dev_err(dev, "clear PF configuration failed %s\n",
6133 			ice_stat_str(ret));
6134 		goto err_init_ctrlq;
6135 	}
6136 
6137 	if (pf->first_sw->dflt_vsi_ena)
6138 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6139 	/* clear the default VSI configuration if it exists */
6140 	pf->first_sw->dflt_vsi = NULL;
6141 	pf->first_sw->dflt_vsi_ena = false;
6142 
6143 	ice_clear_pxe_mode(hw);
6144 
6145 	ret = ice_get_caps(hw);
6146 	if (ret) {
6147 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6148 		goto err_init_ctrlq;
6149 	}
6150 
6151 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6152 	if (ret) {
6153 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6154 		goto err_init_ctrlq;
6155 	}
6156 
6157 	err = ice_sched_init_port(hw->port_info);
6158 	if (err)
6159 		goto err_sched_init_port;
6160 
6161 	/* start misc vector */
6162 	err = ice_req_irq_msix_misc(pf);
6163 	if (err) {
6164 		dev_err(dev, "misc vector setup failed: %d\n", err);
6165 		goto err_sched_init_port;
6166 	}
6167 
6168 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6169 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6170 		if (!rd32(hw, PFQF_FD_SIZE)) {
6171 			u16 unused, guar, b_effort;
6172 
6173 			guar = hw->func_caps.fd_fltr_guar;
6174 			b_effort = hw->func_caps.fd_fltr_best_effort;
6175 
6176 			/* force guaranteed filter pool for PF */
6177 			ice_alloc_fd_guar_item(hw, &unused, guar);
6178 			/* force shared filter pool for PF */
6179 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6180 		}
6181 	}
6182 
6183 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6184 		ice_dcb_rebuild(pf);
6185 
6186 	/* rebuild PF VSI */
6187 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6188 	if (err) {
6189 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6190 		goto err_vsi_rebuild;
6191 	}
6192 
6193 	/* If Flow Director is active */
6194 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6195 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6196 		if (err) {
6197 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6198 			goto err_vsi_rebuild;
6199 		}
6200 
6201 		/* replay HW Flow Director recipes */
6202 		if (hw->fdir_prof)
6203 			ice_fdir_replay_flows(hw);
6204 
6205 		/* replay Flow Director filters */
6206 		ice_fdir_replay_fltrs(pf);
6207 
6208 		ice_rebuild_arfs(pf);
6209 	}
6210 
6211 	ice_update_pf_netdev_link(pf);
6212 
6213 	/* tell the firmware we are up */
6214 	ret = ice_send_version(pf);
6215 	if (ret) {
6216 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6217 			ice_stat_str(ret));
6218 		goto err_vsi_rebuild;
6219 	}
6220 
6221 	ice_replay_post(hw);
6222 
6223 	/* if we get here, reset flow is successful */
6224 	clear_bit(ICE_RESET_FAILED, pf->state);
6225 	return;
6226 
6227 err_vsi_rebuild:
6228 err_sched_init_port:
6229 	ice_sched_cleanup_all(hw);
6230 err_init_ctrlq:
6231 	ice_shutdown_all_ctrlq(hw);
6232 	set_bit(ICE_RESET_FAILED, pf->state);
6233 clear_recovery:
6234 	/* set this bit in PF state to control service task scheduling */
6235 	set_bit(ICE_NEEDS_RESTART, pf->state);
6236 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6237 }
6238 
6239 /**
6240  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6241  * @vsi: Pointer to VSI structure
6242  */
6243 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6244 {
6245 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6246 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6247 	else
6248 		return ICE_RXBUF_3072;
6249 }
6250 
6251 /**
6252  * ice_change_mtu - NDO callback to change the MTU
6253  * @netdev: network interface device structure
6254  * @new_mtu: new value for maximum frame size
6255  *
6256  * Returns 0 on success, negative on failure
6257  */
6258 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6259 {
6260 	struct ice_netdev_priv *np = netdev_priv(netdev);
6261 	struct ice_vsi *vsi = np->vsi;
6262 	struct ice_pf *pf = vsi->back;
6263 	u8 count = 0;
6264 
6265 	if (new_mtu == (int)netdev->mtu) {
6266 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6267 		return 0;
6268 	}
6269 
6270 	if (ice_is_xdp_ena_vsi(vsi)) {
6271 		int frame_size = ice_max_xdp_frame_size(vsi);
6272 
6273 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6274 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6275 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6276 			return -EINVAL;
6277 		}
6278 	}
6279 
6280 	/* if a reset is in progress, wait for some time for it to complete */
6281 	do {
6282 		if (ice_is_reset_in_progress(pf->state)) {
6283 			count++;
6284 			usleep_range(1000, 2000);
6285 		} else {
6286 			break;
6287 		}
6288 
6289 	} while (count < 100);
6290 
6291 	if (count == 100) {
6292 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6293 		return -EBUSY;
6294 	}
6295 
6296 	netdev->mtu = (unsigned int)new_mtu;
6297 
6298 	/* if VSI is up, bring it down and then back up */
6299 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6300 		int err;
6301 
6302 		err = ice_down(vsi);
6303 		if (err) {
6304 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6305 			return err;
6306 		}
6307 
6308 		err = ice_up(vsi);
6309 		if (err) {
6310 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6311 			return err;
6312 		}
6313 	}
6314 
6315 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6316 	return 0;
6317 }
6318 
6319 /**
6320  * ice_aq_str - convert AQ err code to a string
6321  * @aq_err: the AQ error code to convert
6322  */
6323 const char *ice_aq_str(enum ice_aq_err aq_err)
6324 {
6325 	switch (aq_err) {
6326 	case ICE_AQ_RC_OK:
6327 		return "OK";
6328 	case ICE_AQ_RC_EPERM:
6329 		return "ICE_AQ_RC_EPERM";
6330 	case ICE_AQ_RC_ENOENT:
6331 		return "ICE_AQ_RC_ENOENT";
6332 	case ICE_AQ_RC_ENOMEM:
6333 		return "ICE_AQ_RC_ENOMEM";
6334 	case ICE_AQ_RC_EBUSY:
6335 		return "ICE_AQ_RC_EBUSY";
6336 	case ICE_AQ_RC_EEXIST:
6337 		return "ICE_AQ_RC_EEXIST";
6338 	case ICE_AQ_RC_EINVAL:
6339 		return "ICE_AQ_RC_EINVAL";
6340 	case ICE_AQ_RC_ENOSPC:
6341 		return "ICE_AQ_RC_ENOSPC";
6342 	case ICE_AQ_RC_ENOSYS:
6343 		return "ICE_AQ_RC_ENOSYS";
6344 	case ICE_AQ_RC_EMODE:
6345 		return "ICE_AQ_RC_EMODE";
6346 	case ICE_AQ_RC_ENOSEC:
6347 		return "ICE_AQ_RC_ENOSEC";
6348 	case ICE_AQ_RC_EBADSIG:
6349 		return "ICE_AQ_RC_EBADSIG";
6350 	case ICE_AQ_RC_ESVN:
6351 		return "ICE_AQ_RC_ESVN";
6352 	case ICE_AQ_RC_EBADMAN:
6353 		return "ICE_AQ_RC_EBADMAN";
6354 	case ICE_AQ_RC_EBADBUF:
6355 		return "ICE_AQ_RC_EBADBUF";
6356 	}
6357 
6358 	return "ICE_AQ_RC_UNKNOWN";
6359 }
6360 
6361 /**
6362  * ice_stat_str - convert status err code to a string
6363  * @stat_err: the status error code to convert
6364  */
6365 const char *ice_stat_str(enum ice_status stat_err)
6366 {
6367 	switch (stat_err) {
6368 	case ICE_SUCCESS:
6369 		return "OK";
6370 	case ICE_ERR_PARAM:
6371 		return "ICE_ERR_PARAM";
6372 	case ICE_ERR_NOT_IMPL:
6373 		return "ICE_ERR_NOT_IMPL";
6374 	case ICE_ERR_NOT_READY:
6375 		return "ICE_ERR_NOT_READY";
6376 	case ICE_ERR_NOT_SUPPORTED:
6377 		return "ICE_ERR_NOT_SUPPORTED";
6378 	case ICE_ERR_BAD_PTR:
6379 		return "ICE_ERR_BAD_PTR";
6380 	case ICE_ERR_INVAL_SIZE:
6381 		return "ICE_ERR_INVAL_SIZE";
6382 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6383 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6384 	case ICE_ERR_RESET_FAILED:
6385 		return "ICE_ERR_RESET_FAILED";
6386 	case ICE_ERR_FW_API_VER:
6387 		return "ICE_ERR_FW_API_VER";
6388 	case ICE_ERR_NO_MEMORY:
6389 		return "ICE_ERR_NO_MEMORY";
6390 	case ICE_ERR_CFG:
6391 		return "ICE_ERR_CFG";
6392 	case ICE_ERR_OUT_OF_RANGE:
6393 		return "ICE_ERR_OUT_OF_RANGE";
6394 	case ICE_ERR_ALREADY_EXISTS:
6395 		return "ICE_ERR_ALREADY_EXISTS";
6396 	case ICE_ERR_NVM:
6397 		return "ICE_ERR_NVM";
6398 	case ICE_ERR_NVM_CHECKSUM:
6399 		return "ICE_ERR_NVM_CHECKSUM";
6400 	case ICE_ERR_BUF_TOO_SHORT:
6401 		return "ICE_ERR_BUF_TOO_SHORT";
6402 	case ICE_ERR_NVM_BLANK_MODE:
6403 		return "ICE_ERR_NVM_BLANK_MODE";
6404 	case ICE_ERR_IN_USE:
6405 		return "ICE_ERR_IN_USE";
6406 	case ICE_ERR_MAX_LIMIT:
6407 		return "ICE_ERR_MAX_LIMIT";
6408 	case ICE_ERR_RESET_ONGOING:
6409 		return "ICE_ERR_RESET_ONGOING";
6410 	case ICE_ERR_HW_TABLE:
6411 		return "ICE_ERR_HW_TABLE";
6412 	case ICE_ERR_DOES_NOT_EXIST:
6413 		return "ICE_ERR_DOES_NOT_EXIST";
6414 	case ICE_ERR_FW_DDP_MISMATCH:
6415 		return "ICE_ERR_FW_DDP_MISMATCH";
6416 	case ICE_ERR_AQ_ERROR:
6417 		return "ICE_ERR_AQ_ERROR";
6418 	case ICE_ERR_AQ_TIMEOUT:
6419 		return "ICE_ERR_AQ_TIMEOUT";
6420 	case ICE_ERR_AQ_FULL:
6421 		return "ICE_ERR_AQ_FULL";
6422 	case ICE_ERR_AQ_NO_WORK:
6423 		return "ICE_ERR_AQ_NO_WORK";
6424 	case ICE_ERR_AQ_EMPTY:
6425 		return "ICE_ERR_AQ_EMPTY";
6426 	case ICE_ERR_AQ_FW_CRITICAL:
6427 		return "ICE_ERR_AQ_FW_CRITICAL";
6428 	}
6429 
6430 	return "ICE_ERR_UNKNOWN";
6431 }
6432 
6433 /**
6434  * ice_set_rss_lut - Set RSS LUT
6435  * @vsi: Pointer to VSI structure
6436  * @lut: Lookup table
6437  * @lut_size: Lookup table size
6438  *
6439  * Returns 0 on success, negative on failure
6440  */
6441 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6442 {
6443 	struct ice_aq_get_set_rss_lut_params params = {};
6444 	struct ice_hw *hw = &vsi->back->hw;
6445 	enum ice_status status;
6446 
6447 	if (!lut)
6448 		return -EINVAL;
6449 
6450 	params.vsi_handle = vsi->idx;
6451 	params.lut_size = lut_size;
6452 	params.lut_type = vsi->rss_lut_type;
6453 	params.lut = lut;
6454 
6455 	status = ice_aq_set_rss_lut(hw, &params);
6456 	if (status) {
6457 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6458 			ice_stat_str(status),
6459 			ice_aq_str(hw->adminq.sq_last_status));
6460 		return -EIO;
6461 	}
6462 
6463 	return 0;
6464 }
6465 
6466 /**
6467  * ice_set_rss_key - Set RSS key
6468  * @vsi: Pointer to the VSI structure
6469  * @seed: RSS hash seed
6470  *
6471  * Returns 0 on success, negative on failure
6472  */
6473 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6474 {
6475 	struct ice_hw *hw = &vsi->back->hw;
6476 	enum ice_status status;
6477 
6478 	if (!seed)
6479 		return -EINVAL;
6480 
6481 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6482 	if (status) {
6483 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6484 			ice_stat_str(status),
6485 			ice_aq_str(hw->adminq.sq_last_status));
6486 		return -EIO;
6487 	}
6488 
6489 	return 0;
6490 }
6491 
6492 /**
6493  * ice_get_rss_lut - Get RSS LUT
6494  * @vsi: Pointer to VSI structure
6495  * @lut: Buffer to store the lookup table entries
6496  * @lut_size: Size of buffer to store the lookup table entries
6497  *
6498  * Returns 0 on success, negative on failure
6499  */
6500 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6501 {
6502 	struct ice_aq_get_set_rss_lut_params params = {};
6503 	struct ice_hw *hw = &vsi->back->hw;
6504 	enum ice_status status;
6505 
6506 	if (!lut)
6507 		return -EINVAL;
6508 
6509 	params.vsi_handle = vsi->idx;
6510 	params.lut_size = lut_size;
6511 	params.lut_type = vsi->rss_lut_type;
6512 	params.lut = lut;
6513 
6514 	status = ice_aq_get_rss_lut(hw, &params);
6515 	if (status) {
6516 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6517 			ice_stat_str(status),
6518 			ice_aq_str(hw->adminq.sq_last_status));
6519 		return -EIO;
6520 	}
6521 
6522 	return 0;
6523 }
6524 
6525 /**
6526  * ice_get_rss_key - Get RSS key
6527  * @vsi: Pointer to VSI structure
6528  * @seed: Buffer to store the key in
6529  *
6530  * Returns 0 on success, negative on failure
6531  */
6532 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6533 {
6534 	struct ice_hw *hw = &vsi->back->hw;
6535 	enum ice_status status;
6536 
6537 	if (!seed)
6538 		return -EINVAL;
6539 
6540 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6541 	if (status) {
6542 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6543 			ice_stat_str(status),
6544 			ice_aq_str(hw->adminq.sq_last_status));
6545 		return -EIO;
6546 	}
6547 
6548 	return 0;
6549 }
6550 
6551 /**
6552  * ice_bridge_getlink - Get the hardware bridge mode
6553  * @skb: skb buff
6554  * @pid: process ID
6555  * @seq: RTNL message seq
6556  * @dev: the netdev being configured
6557  * @filter_mask: filter mask passed in
6558  * @nlflags: netlink flags passed in
6559  *
6560  * Return the bridge mode (VEB/VEPA)
6561  */
6562 static int
6563 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6564 		   struct net_device *dev, u32 filter_mask, int nlflags)
6565 {
6566 	struct ice_netdev_priv *np = netdev_priv(dev);
6567 	struct ice_vsi *vsi = np->vsi;
6568 	struct ice_pf *pf = vsi->back;
6569 	u16 bmode;
6570 
6571 	bmode = pf->first_sw->bridge_mode;
6572 
6573 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6574 				       filter_mask, NULL);
6575 }
6576 
6577 /**
6578  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6579  * @vsi: Pointer to VSI structure
6580  * @bmode: Hardware bridge mode (VEB/VEPA)
6581  *
6582  * Returns 0 on success, negative on failure
6583  */
6584 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6585 {
6586 	struct ice_aqc_vsi_props *vsi_props;
6587 	struct ice_hw *hw = &vsi->back->hw;
6588 	struct ice_vsi_ctx *ctxt;
6589 	enum ice_status status;
6590 	int ret = 0;
6591 
6592 	vsi_props = &vsi->info;
6593 
6594 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6595 	if (!ctxt)
6596 		return -ENOMEM;
6597 
6598 	ctxt->info = vsi->info;
6599 
6600 	if (bmode == BRIDGE_MODE_VEB)
6601 		/* change from VEPA to VEB mode */
6602 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6603 	else
6604 		/* change from VEB to VEPA mode */
6605 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6606 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6607 
6608 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6609 	if (status) {
6610 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6611 			bmode, ice_stat_str(status),
6612 			ice_aq_str(hw->adminq.sq_last_status));
6613 		ret = -EIO;
6614 		goto out;
6615 	}
6616 	/* Update sw flags for book keeping */
6617 	vsi_props->sw_flags = ctxt->info.sw_flags;
6618 
6619 out:
6620 	kfree(ctxt);
6621 	return ret;
6622 }
6623 
6624 /**
6625  * ice_bridge_setlink - Set the hardware bridge mode
6626  * @dev: the netdev being configured
6627  * @nlh: RTNL message
6628  * @flags: bridge setlink flags
6629  * @extack: netlink extended ack
6630  *
6631  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6632  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6633  * not already set for all VSIs connected to this switch. And also update the
6634  * unicast switch filter rules for the corresponding switch of the netdev.
6635  */
6636 static int
6637 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6638 		   u16 __always_unused flags,
6639 		   struct netlink_ext_ack __always_unused *extack)
6640 {
6641 	struct ice_netdev_priv *np = netdev_priv(dev);
6642 	struct ice_pf *pf = np->vsi->back;
6643 	struct nlattr *attr, *br_spec;
6644 	struct ice_hw *hw = &pf->hw;
6645 	enum ice_status status;
6646 	struct ice_sw *pf_sw;
6647 	int rem, v, err = 0;
6648 
6649 	pf_sw = pf->first_sw;
6650 	/* find the attribute in the netlink message */
6651 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6652 
6653 	nla_for_each_nested(attr, br_spec, rem) {
6654 		__u16 mode;
6655 
6656 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6657 			continue;
6658 		mode = nla_get_u16(attr);
6659 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6660 			return -EINVAL;
6661 		/* Continue  if bridge mode is not being flipped */
6662 		if (mode == pf_sw->bridge_mode)
6663 			continue;
6664 		/* Iterates through the PF VSI list and update the loopback
6665 		 * mode of the VSI
6666 		 */
6667 		ice_for_each_vsi(pf, v) {
6668 			if (!pf->vsi[v])
6669 				continue;
6670 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6671 			if (err)
6672 				return err;
6673 		}
6674 
6675 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6676 		/* Update the unicast switch filter rules for the corresponding
6677 		 * switch of the netdev
6678 		 */
6679 		status = ice_update_sw_rule_bridge_mode(hw);
6680 		if (status) {
6681 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6682 				   mode, ice_stat_str(status),
6683 				   ice_aq_str(hw->adminq.sq_last_status));
6684 			/* revert hw->evb_veb */
6685 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6686 			return -EIO;
6687 		}
6688 
6689 		pf_sw->bridge_mode = mode;
6690 	}
6691 
6692 	return 0;
6693 }
6694 
6695 /**
6696  * ice_tx_timeout - Respond to a Tx Hang
6697  * @netdev: network interface device structure
6698  * @txqueue: Tx queue
6699  */
6700 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6701 {
6702 	struct ice_netdev_priv *np = netdev_priv(netdev);
6703 	struct ice_ring *tx_ring = NULL;
6704 	struct ice_vsi *vsi = np->vsi;
6705 	struct ice_pf *pf = vsi->back;
6706 	u32 i;
6707 
6708 	pf->tx_timeout_count++;
6709 
6710 	/* Check if PFC is enabled for the TC to which the queue belongs
6711 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6712 	 * need to reset and rebuild
6713 	 */
6714 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6715 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6716 			 txqueue);
6717 		return;
6718 	}
6719 
6720 	/* now that we have an index, find the tx_ring struct */
6721 	for (i = 0; i < vsi->num_txq; i++)
6722 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6723 			if (txqueue == vsi->tx_rings[i]->q_index) {
6724 				tx_ring = vsi->tx_rings[i];
6725 				break;
6726 			}
6727 
6728 	/* Reset recovery level if enough time has elapsed after last timeout.
6729 	 * Also ensure no new reset action happens before next timeout period.
6730 	 */
6731 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6732 		pf->tx_timeout_recovery_level = 1;
6733 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6734 				       netdev->watchdog_timeo)))
6735 		return;
6736 
6737 	if (tx_ring) {
6738 		struct ice_hw *hw = &pf->hw;
6739 		u32 head, val = 0;
6740 
6741 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6742 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6743 		/* Read interrupt register */
6744 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6745 
6746 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6747 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6748 			    head, tx_ring->next_to_use, val);
6749 	}
6750 
6751 	pf->tx_timeout_last_recovery = jiffies;
6752 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6753 		    pf->tx_timeout_recovery_level, txqueue);
6754 
6755 	switch (pf->tx_timeout_recovery_level) {
6756 	case 1:
6757 		set_bit(ICE_PFR_REQ, pf->state);
6758 		break;
6759 	case 2:
6760 		set_bit(ICE_CORER_REQ, pf->state);
6761 		break;
6762 	case 3:
6763 		set_bit(ICE_GLOBR_REQ, pf->state);
6764 		break;
6765 	default:
6766 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6767 		set_bit(ICE_DOWN, pf->state);
6768 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
6769 		set_bit(ICE_SERVICE_DIS, pf->state);
6770 		break;
6771 	}
6772 
6773 	ice_service_task_schedule(pf);
6774 	pf->tx_timeout_recovery_level++;
6775 }
6776 
6777 /**
6778  * ice_open - Called when a network interface becomes active
6779  * @netdev: network interface device structure
6780  *
6781  * The open entry point is called when a network interface is made
6782  * active by the system (IFF_UP). At this point all resources needed
6783  * for transmit and receive operations are allocated, the interrupt
6784  * handler is registered with the OS, the netdev watchdog is enabled,
6785  * and the stack is notified that the interface is ready.
6786  *
6787  * Returns 0 on success, negative value on failure
6788  */
6789 int ice_open(struct net_device *netdev)
6790 {
6791 	struct ice_netdev_priv *np = netdev_priv(netdev);
6792 	struct ice_pf *pf = np->vsi->back;
6793 
6794 	if (ice_is_reset_in_progress(pf->state)) {
6795 		netdev_err(netdev, "can't open net device while reset is in progress");
6796 		return -EBUSY;
6797 	}
6798 
6799 	return ice_open_internal(netdev);
6800 }
6801 
6802 /**
6803  * ice_open_internal - Called when a network interface becomes active
6804  * @netdev: network interface device structure
6805  *
6806  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
6807  * handling routine
6808  *
6809  * Returns 0 on success, negative value on failure
6810  */
6811 int ice_open_internal(struct net_device *netdev)
6812 {
6813 	struct ice_netdev_priv *np = netdev_priv(netdev);
6814 	struct ice_vsi *vsi = np->vsi;
6815 	struct ice_pf *pf = vsi->back;
6816 	struct ice_port_info *pi;
6817 	enum ice_status status;
6818 	int err;
6819 
6820 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
6821 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6822 		return -EIO;
6823 	}
6824 
6825 	netif_carrier_off(netdev);
6826 
6827 	pi = vsi->port_info;
6828 	status = ice_update_link_info(pi);
6829 	if (status) {
6830 		netdev_err(netdev, "Failed to get link info, error %s\n",
6831 			   ice_stat_str(status));
6832 		return -EIO;
6833 	}
6834 
6835 	/* Set PHY if there is media, otherwise, turn off PHY */
6836 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6837 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6838 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
6839 			err = ice_init_phy_user_cfg(pi);
6840 			if (err) {
6841 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6842 					   err);
6843 				return err;
6844 			}
6845 		}
6846 
6847 		err = ice_configure_phy(vsi);
6848 		if (err) {
6849 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
6850 				   err);
6851 			return err;
6852 		}
6853 	} else {
6854 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6855 		ice_set_link(vsi, false);
6856 	}
6857 
6858 	err = ice_vsi_open(vsi);
6859 	if (err)
6860 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6861 			   vsi->vsi_num, vsi->vsw->sw_id);
6862 
6863 	/* Update existing tunnels information */
6864 	udp_tunnel_get_rx_info(netdev);
6865 
6866 	return err;
6867 }
6868 
6869 /**
6870  * ice_stop - Disables a network interface
6871  * @netdev: network interface device structure
6872  *
6873  * The stop entry point is called when an interface is de-activated by the OS,
6874  * and the netdevice enters the DOWN state. The hardware is still under the
6875  * driver's control, but the netdev interface is disabled.
6876  *
6877  * Returns success only - not allowed to fail
6878  */
6879 int ice_stop(struct net_device *netdev)
6880 {
6881 	struct ice_netdev_priv *np = netdev_priv(netdev);
6882 	struct ice_vsi *vsi = np->vsi;
6883 	struct ice_pf *pf = vsi->back;
6884 
6885 	if (ice_is_reset_in_progress(pf->state)) {
6886 		netdev_err(netdev, "can't stop net device while reset is in progress");
6887 		return -EBUSY;
6888 	}
6889 
6890 	ice_vsi_close(vsi);
6891 
6892 	return 0;
6893 }
6894 
6895 /**
6896  * ice_features_check - Validate encapsulated packet conforms to limits
6897  * @skb: skb buffer
6898  * @netdev: This port's netdev
6899  * @features: Offload features that the stack believes apply
6900  */
6901 static netdev_features_t
6902 ice_features_check(struct sk_buff *skb,
6903 		   struct net_device __always_unused *netdev,
6904 		   netdev_features_t features)
6905 {
6906 	size_t len;
6907 
6908 	/* No point in doing any of this if neither checksum nor GSO are
6909 	 * being requested for this frame. We can rule out both by just
6910 	 * checking for CHECKSUM_PARTIAL
6911 	 */
6912 	if (skb->ip_summed != CHECKSUM_PARTIAL)
6913 		return features;
6914 
6915 	/* We cannot support GSO if the MSS is going to be less than
6916 	 * 64 bytes. If it is then we need to drop support for GSO.
6917 	 */
6918 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6919 		features &= ~NETIF_F_GSO_MASK;
6920 
6921 	len = skb_network_header(skb) - skb->data;
6922 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
6923 		goto out_rm_features;
6924 
6925 	len = skb_transport_header(skb) - skb_network_header(skb);
6926 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6927 		goto out_rm_features;
6928 
6929 	if (skb->encapsulation) {
6930 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
6931 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
6932 			goto out_rm_features;
6933 
6934 		len = skb_inner_transport_header(skb) -
6935 		      skb_inner_network_header(skb);
6936 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6937 			goto out_rm_features;
6938 	}
6939 
6940 	return features;
6941 out_rm_features:
6942 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6943 }
6944 
6945 static const struct net_device_ops ice_netdev_safe_mode_ops = {
6946 	.ndo_open = ice_open,
6947 	.ndo_stop = ice_stop,
6948 	.ndo_start_xmit = ice_start_xmit,
6949 	.ndo_set_mac_address = ice_set_mac_address,
6950 	.ndo_validate_addr = eth_validate_addr,
6951 	.ndo_change_mtu = ice_change_mtu,
6952 	.ndo_get_stats64 = ice_get_stats64,
6953 	.ndo_tx_timeout = ice_tx_timeout,
6954 	.ndo_bpf = ice_xdp_safe_mode,
6955 };
6956 
6957 static const struct net_device_ops ice_netdev_ops = {
6958 	.ndo_open = ice_open,
6959 	.ndo_stop = ice_stop,
6960 	.ndo_start_xmit = ice_start_xmit,
6961 	.ndo_features_check = ice_features_check,
6962 	.ndo_set_rx_mode = ice_set_rx_mode,
6963 	.ndo_set_mac_address = ice_set_mac_address,
6964 	.ndo_validate_addr = eth_validate_addr,
6965 	.ndo_change_mtu = ice_change_mtu,
6966 	.ndo_get_stats64 = ice_get_stats64,
6967 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
6968 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
6969 	.ndo_set_vf_mac = ice_set_vf_mac,
6970 	.ndo_get_vf_config = ice_get_vf_cfg,
6971 	.ndo_set_vf_trust = ice_set_vf_trust,
6972 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
6973 	.ndo_set_vf_link_state = ice_set_vf_link_state,
6974 	.ndo_get_vf_stats = ice_get_vf_stats,
6975 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6976 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6977 	.ndo_set_features = ice_set_features,
6978 	.ndo_bridge_getlink = ice_bridge_getlink,
6979 	.ndo_bridge_setlink = ice_bridge_setlink,
6980 	.ndo_fdb_add = ice_fdb_add,
6981 	.ndo_fdb_del = ice_fdb_del,
6982 #ifdef CONFIG_RFS_ACCEL
6983 	.ndo_rx_flow_steer = ice_rx_flow_steer,
6984 #endif
6985 	.ndo_tx_timeout = ice_tx_timeout,
6986 	.ndo_bpf = ice_xdp,
6987 	.ndo_xdp_xmit = ice_xdp_xmit,
6988 	.ndo_xsk_wakeup = ice_xsk_wakeup,
6989 };
6990