1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
18 * ice tracepoint functions. This must be done exactly once across the
19 * ice driver.
20 */
21 #define CREATE_TRACE_POINTS
22 #include "ice_trace.h"
23 #include "ice_eswitch.h"
24 #include "ice_tc_lib.h"
25 #include "ice_vsi_vlan_ops.h"
26 #include <net/xdp_sock_drv.h>
27
28 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
29 static const char ice_driver_string[] = DRV_SUMMARY;
30 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
31
32 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
33 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
34 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
35
36 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
37 MODULE_DESCRIPTION(DRV_SUMMARY);
38 MODULE_LICENSE("GPL v2");
39 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
40
41 static int debug = -1;
42 module_param(debug, int, 0644);
43 #ifndef CONFIG_DYNAMIC_DEBUG
44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
45 #else
46 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
47 #endif /* !CONFIG_DYNAMIC_DEBUG */
48
49 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
50 EXPORT_SYMBOL(ice_xdp_locking_key);
51
52 /**
53 * ice_hw_to_dev - Get device pointer from the hardware structure
54 * @hw: pointer to the device HW structure
55 *
56 * Used to access the device pointer from compilation units which can't easily
57 * include the definition of struct ice_pf without leading to circular header
58 * dependencies.
59 */
ice_hw_to_dev(struct ice_hw * hw)60 struct device *ice_hw_to_dev(struct ice_hw *hw)
61 {
62 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
63
64 return &pf->pdev->dev;
65 }
66
67 static struct workqueue_struct *ice_wq;
68 struct workqueue_struct *ice_lag_wq;
69 static const struct net_device_ops ice_netdev_safe_mode_ops;
70 static const struct net_device_ops ice_netdev_ops;
71
72 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
73
74 static void ice_vsi_release_all(struct ice_pf *pf);
75
76 static int ice_rebuild_channels(struct ice_pf *pf);
77 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
78
79 static int
80 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
81 void *cb_priv, enum tc_setup_type type, void *type_data,
82 void *data,
83 void (*cleanup)(struct flow_block_cb *block_cb));
84
netif_is_ice(const struct net_device * dev)85 bool netif_is_ice(const struct net_device *dev)
86 {
87 return dev && (dev->netdev_ops == &ice_netdev_ops);
88 }
89
90 /**
91 * ice_get_tx_pending - returns number of Tx descriptors not processed
92 * @ring: the ring of descriptors
93 */
ice_get_tx_pending(struct ice_tx_ring * ring)94 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
95 {
96 u16 head, tail;
97
98 head = ring->next_to_clean;
99 tail = ring->next_to_use;
100
101 if (head != tail)
102 return (head < tail) ?
103 tail - head : (tail + ring->count - head);
104 return 0;
105 }
106
107 /**
108 * ice_check_for_hang_subtask - check for and recover hung queues
109 * @pf: pointer to PF struct
110 */
ice_check_for_hang_subtask(struct ice_pf * pf)111 static void ice_check_for_hang_subtask(struct ice_pf *pf)
112 {
113 struct ice_vsi *vsi = NULL;
114 struct ice_hw *hw;
115 unsigned int i;
116 int packets;
117 u32 v;
118
119 ice_for_each_vsi(pf, v)
120 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
121 vsi = pf->vsi[v];
122 break;
123 }
124
125 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
126 return;
127
128 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
129 return;
130
131 hw = &vsi->back->hw;
132
133 ice_for_each_txq(vsi, i) {
134 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
135 struct ice_ring_stats *ring_stats;
136
137 if (!tx_ring)
138 continue;
139 if (ice_ring_ch_enabled(tx_ring))
140 continue;
141
142 ring_stats = tx_ring->ring_stats;
143 if (!ring_stats)
144 continue;
145
146 if (tx_ring->desc) {
147 /* If packet counter has not changed the queue is
148 * likely stalled, so force an interrupt for this
149 * queue.
150 *
151 * prev_pkt would be negative if there was no
152 * pending work.
153 */
154 packets = ring_stats->stats.pkts & INT_MAX;
155 if (ring_stats->tx_stats.prev_pkt == packets) {
156 /* Trigger sw interrupt to revive the queue */
157 ice_trigger_sw_intr(hw, tx_ring->q_vector);
158 continue;
159 }
160
161 /* Memory barrier between read of packet count and call
162 * to ice_get_tx_pending()
163 */
164 smp_rmb();
165 ring_stats->tx_stats.prev_pkt =
166 ice_get_tx_pending(tx_ring) ? packets : -1;
167 }
168 }
169 }
170
171 /**
172 * ice_init_mac_fltr - Set initial MAC filters
173 * @pf: board private structure
174 *
175 * Set initial set of MAC filters for PF VSI; configure filters for permanent
176 * address and broadcast address. If an error is encountered, netdevice will be
177 * unregistered.
178 */
ice_init_mac_fltr(struct ice_pf * pf)179 static int ice_init_mac_fltr(struct ice_pf *pf)
180 {
181 struct ice_vsi *vsi;
182 u8 *perm_addr;
183
184 vsi = ice_get_main_vsi(pf);
185 if (!vsi)
186 return -EINVAL;
187
188 perm_addr = vsi->port_info->mac.perm_addr;
189 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
190 }
191
192 /**
193 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
194 * @netdev: the net device on which the sync is happening
195 * @addr: MAC address to sync
196 *
197 * This is a callback function which is called by the in kernel device sync
198 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
199 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
200 * MAC filters from the hardware.
201 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)202 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
203 {
204 struct ice_netdev_priv *np = netdev_priv(netdev);
205 struct ice_vsi *vsi = np->vsi;
206
207 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
208 ICE_FWD_TO_VSI))
209 return -EINVAL;
210
211 return 0;
212 }
213
214 /**
215 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
216 * @netdev: the net device on which the unsync is happening
217 * @addr: MAC address to unsync
218 *
219 * This is a callback function which is called by the in kernel device unsync
220 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
221 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
222 * delete the MAC filters from the hardware.
223 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)224 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
225 {
226 struct ice_netdev_priv *np = netdev_priv(netdev);
227 struct ice_vsi *vsi = np->vsi;
228
229 /* Under some circumstances, we might receive a request to delete our
230 * own device address from our uc list. Because we store the device
231 * address in the VSI's MAC filter list, we need to ignore such
232 * requests and not delete our device address from this list.
233 */
234 if (ether_addr_equal(addr, netdev->dev_addr))
235 return 0;
236
237 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
238 ICE_FWD_TO_VSI))
239 return -EINVAL;
240
241 return 0;
242 }
243
244 /**
245 * ice_vsi_fltr_changed - check if filter state changed
246 * @vsi: VSI to be checked
247 *
248 * returns true if filter state has changed, false otherwise.
249 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)250 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
251 {
252 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
253 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
254 }
255
256 /**
257 * ice_set_promisc - Enable promiscuous mode for a given PF
258 * @vsi: the VSI being configured
259 * @promisc_m: mask of promiscuous config bits
260 *
261 */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)262 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
263 {
264 int status;
265
266 if (vsi->type != ICE_VSI_PF)
267 return 0;
268
269 if (ice_vsi_has_non_zero_vlans(vsi)) {
270 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
271 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
272 promisc_m);
273 } else {
274 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
275 promisc_m, 0);
276 }
277 if (status && status != -EEXIST)
278 return status;
279
280 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
281 vsi->vsi_num, promisc_m);
282 return 0;
283 }
284
285 /**
286 * ice_clear_promisc - Disable promiscuous mode for a given PF
287 * @vsi: the VSI being configured
288 * @promisc_m: mask of promiscuous config bits
289 *
290 */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)291 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
292 {
293 int status;
294
295 if (vsi->type != ICE_VSI_PF)
296 return 0;
297
298 if (ice_vsi_has_non_zero_vlans(vsi)) {
299 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
300 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
301 promisc_m);
302 } else {
303 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
304 promisc_m, 0);
305 }
306
307 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
308 vsi->vsi_num, promisc_m);
309 return status;
310 }
311
312 /**
313 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
314 * @vsi: ptr to the VSI
315 *
316 * Push any outstanding VSI filter changes through the AdminQ.
317 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)318 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
319 {
320 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
321 struct device *dev = ice_pf_to_dev(vsi->back);
322 struct net_device *netdev = vsi->netdev;
323 bool promisc_forced_on = false;
324 struct ice_pf *pf = vsi->back;
325 struct ice_hw *hw = &pf->hw;
326 u32 changed_flags = 0;
327 int err;
328
329 if (!vsi->netdev)
330 return -EINVAL;
331
332 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
333 usleep_range(1000, 2000);
334
335 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
336 vsi->current_netdev_flags = vsi->netdev->flags;
337
338 INIT_LIST_HEAD(&vsi->tmp_sync_list);
339 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
340
341 if (ice_vsi_fltr_changed(vsi)) {
342 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
343 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
344
345 /* grab the netdev's addr_list_lock */
346 netif_addr_lock_bh(netdev);
347 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
348 ice_add_mac_to_unsync_list);
349 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
350 ice_add_mac_to_unsync_list);
351 /* our temp lists are populated. release lock */
352 netif_addr_unlock_bh(netdev);
353 }
354
355 /* Remove MAC addresses in the unsync list */
356 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
357 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
358 if (err) {
359 netdev_err(netdev, "Failed to delete MAC filters\n");
360 /* if we failed because of alloc failures, just bail */
361 if (err == -ENOMEM)
362 goto out;
363 }
364
365 /* Add MAC addresses in the sync list */
366 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
367 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
368 /* If filter is added successfully or already exists, do not go into
369 * 'if' condition and report it as error. Instead continue processing
370 * rest of the function.
371 */
372 if (err && err != -EEXIST) {
373 netdev_err(netdev, "Failed to add MAC filters\n");
374 /* If there is no more space for new umac filters, VSI
375 * should go into promiscuous mode. There should be some
376 * space reserved for promiscuous filters.
377 */
378 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
379 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
380 vsi->state)) {
381 promisc_forced_on = true;
382 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
383 vsi->vsi_num);
384 } else {
385 goto out;
386 }
387 }
388 err = 0;
389 /* check for changes in promiscuous modes */
390 if (changed_flags & IFF_ALLMULTI) {
391 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
392 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
393 if (err) {
394 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
395 goto out_promisc;
396 }
397 } else {
398 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
399 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
400 if (err) {
401 vsi->current_netdev_flags |= IFF_ALLMULTI;
402 goto out_promisc;
403 }
404 }
405 }
406
407 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
408 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
409 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
410 if (vsi->current_netdev_flags & IFF_PROMISC) {
411 /* Apply Rx filter rule to get traffic from wire */
412 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
413 err = ice_set_dflt_vsi(vsi);
414 if (err && err != -EEXIST) {
415 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
416 err, vsi->vsi_num);
417 vsi->current_netdev_flags &=
418 ~IFF_PROMISC;
419 goto out_promisc;
420 }
421 err = 0;
422 vlan_ops->dis_rx_filtering(vsi);
423
424 /* promiscuous mode implies allmulticast so
425 * that VSIs that are in promiscuous mode are
426 * subscribed to multicast packets coming to
427 * the port
428 */
429 err = ice_set_promisc(vsi,
430 ICE_MCAST_PROMISC_BITS);
431 if (err)
432 goto out_promisc;
433 }
434 } else {
435 /* Clear Rx filter to remove traffic from wire */
436 if (ice_is_vsi_dflt_vsi(vsi)) {
437 err = ice_clear_dflt_vsi(vsi);
438 if (err) {
439 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
440 err, vsi->vsi_num);
441 vsi->current_netdev_flags |=
442 IFF_PROMISC;
443 goto out_promisc;
444 }
445 if (vsi->netdev->features &
446 NETIF_F_HW_VLAN_CTAG_FILTER)
447 vlan_ops->ena_rx_filtering(vsi);
448 }
449
450 /* disable allmulti here, but only if allmulti is not
451 * still enabled for the netdev
452 */
453 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
454 err = ice_clear_promisc(vsi,
455 ICE_MCAST_PROMISC_BITS);
456 if (err) {
457 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
458 err, vsi->vsi_num);
459 }
460 }
461 }
462 }
463 goto exit;
464
465 out_promisc:
466 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
467 goto exit;
468 out:
469 /* if something went wrong then set the changed flag so we try again */
470 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
471 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
472 exit:
473 clear_bit(ICE_CFG_BUSY, vsi->state);
474 return err;
475 }
476
477 /**
478 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
479 * @pf: board private structure
480 */
ice_sync_fltr_subtask(struct ice_pf * pf)481 static void ice_sync_fltr_subtask(struct ice_pf *pf)
482 {
483 int v;
484
485 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
486 return;
487
488 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
489
490 ice_for_each_vsi(pf, v)
491 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
492 ice_vsi_sync_fltr(pf->vsi[v])) {
493 /* come back and try again later */
494 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
495 break;
496 }
497 }
498
499 /**
500 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
501 * @pf: the PF
502 * @locked: is the rtnl_lock already held
503 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)504 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
505 {
506 int node;
507 int v;
508
509 ice_for_each_vsi(pf, v)
510 if (pf->vsi[v])
511 ice_dis_vsi(pf->vsi[v], locked);
512
513 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
514 pf->pf_agg_node[node].num_vsis = 0;
515
516 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
517 pf->vf_agg_node[node].num_vsis = 0;
518 }
519
520 /**
521 * ice_clear_sw_switch_recipes - clear switch recipes
522 * @pf: board private structure
523 *
524 * Mark switch recipes as not created in sw structures. There are cases where
525 * rules (especially advanced rules) need to be restored, either re-read from
526 * hardware or added again. For example after the reset. 'recp_created' flag
527 * prevents from doing that and need to be cleared upfront.
528 */
ice_clear_sw_switch_recipes(struct ice_pf * pf)529 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
530 {
531 struct ice_sw_recipe *recp;
532 u8 i;
533
534 recp = pf->hw.switch_info->recp_list;
535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
536 recp[i].recp_created = false;
537 }
538
539 /**
540 * ice_prepare_for_reset - prep for reset
541 * @pf: board private structure
542 * @reset_type: reset type requested
543 *
544 * Inform or close all dependent features in prep for reset.
545 */
546 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)547 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
548 {
549 struct ice_hw *hw = &pf->hw;
550 struct ice_vsi *vsi;
551 struct ice_vf *vf;
552 unsigned int bkt;
553
554 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
555
556 /* already prepared for reset */
557 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
558 return;
559
560 ice_unplug_aux_dev(pf);
561
562 /* Notify VFs of impending reset */
563 if (ice_check_sq_alive(hw, &hw->mailboxq))
564 ice_vc_notify_reset(pf);
565
566 /* Disable VFs until reset is completed */
567 mutex_lock(&pf->vfs.table_lock);
568 ice_for_each_vf(pf, bkt, vf)
569 ice_set_vf_state_dis(vf);
570 mutex_unlock(&pf->vfs.table_lock);
571
572 if (ice_is_eswitch_mode_switchdev(pf)) {
573 if (reset_type != ICE_RESET_PFR)
574 ice_clear_sw_switch_recipes(pf);
575 }
576
577 /* release ADQ specific HW and SW resources */
578 vsi = ice_get_main_vsi(pf);
579 if (!vsi)
580 goto skip;
581
582 /* to be on safe side, reset orig_rss_size so that normal flow
583 * of deciding rss_size can take precedence
584 */
585 vsi->orig_rss_size = 0;
586
587 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
588 if (reset_type == ICE_RESET_PFR) {
589 vsi->old_ena_tc = vsi->all_enatc;
590 vsi->old_numtc = vsi->all_numtc;
591 } else {
592 ice_remove_q_channels(vsi, true);
593
594 /* for other reset type, do not support channel rebuild
595 * hence reset needed info
596 */
597 vsi->old_ena_tc = 0;
598 vsi->all_enatc = 0;
599 vsi->old_numtc = 0;
600 vsi->all_numtc = 0;
601 vsi->req_txq = 0;
602 vsi->req_rxq = 0;
603 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
604 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
605 }
606 }
607 skip:
608
609 /* clear SW filtering DB */
610 ice_clear_hw_tbls(hw);
611 /* disable the VSIs and their queues that are not already DOWN */
612 ice_pf_dis_all_vsi(pf, false);
613
614 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
615 ice_ptp_prepare_for_reset(pf);
616
617 if (ice_is_feature_supported(pf, ICE_F_GNSS))
618 ice_gnss_exit(pf);
619
620 if (hw->port_info)
621 ice_sched_clear_port(hw->port_info);
622
623 ice_shutdown_all_ctrlq(hw);
624
625 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
626 }
627
628 /**
629 * ice_do_reset - Initiate one of many types of resets
630 * @pf: board private structure
631 * @reset_type: reset type requested before this function was called.
632 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)633 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
634 {
635 struct device *dev = ice_pf_to_dev(pf);
636 struct ice_hw *hw = &pf->hw;
637
638 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
639
640 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
641 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
642 reset_type = ICE_RESET_CORER;
643 }
644
645 ice_prepare_for_reset(pf, reset_type);
646
647 /* trigger the reset */
648 if (ice_reset(hw, reset_type)) {
649 dev_err(dev, "reset %d failed\n", reset_type);
650 set_bit(ICE_RESET_FAILED, pf->state);
651 clear_bit(ICE_RESET_OICR_RECV, pf->state);
652 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
653 clear_bit(ICE_PFR_REQ, pf->state);
654 clear_bit(ICE_CORER_REQ, pf->state);
655 clear_bit(ICE_GLOBR_REQ, pf->state);
656 wake_up(&pf->reset_wait_queue);
657 return;
658 }
659
660 /* PFR is a bit of a special case because it doesn't result in an OICR
661 * interrupt. So for PFR, rebuild after the reset and clear the reset-
662 * associated state bits.
663 */
664 if (reset_type == ICE_RESET_PFR) {
665 pf->pfr_count++;
666 ice_rebuild(pf, reset_type);
667 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
668 clear_bit(ICE_PFR_REQ, pf->state);
669 wake_up(&pf->reset_wait_queue);
670 ice_reset_all_vfs(pf);
671 }
672 }
673
674 /**
675 * ice_reset_subtask - Set up for resetting the device and driver
676 * @pf: board private structure
677 */
ice_reset_subtask(struct ice_pf * pf)678 static void ice_reset_subtask(struct ice_pf *pf)
679 {
680 enum ice_reset_req reset_type = ICE_RESET_INVAL;
681
682 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
683 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
684 * of reset is pending and sets bits in pf->state indicating the reset
685 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
686 * prepare for pending reset if not already (for PF software-initiated
687 * global resets the software should already be prepared for it as
688 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
689 * by firmware or software on other PFs, that bit is not set so prepare
690 * for the reset now), poll for reset done, rebuild and return.
691 */
692 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
693 /* Perform the largest reset requested */
694 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
695 reset_type = ICE_RESET_CORER;
696 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
697 reset_type = ICE_RESET_GLOBR;
698 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
699 reset_type = ICE_RESET_EMPR;
700 /* return if no valid reset type requested */
701 if (reset_type == ICE_RESET_INVAL)
702 return;
703 ice_prepare_for_reset(pf, reset_type);
704
705 /* make sure we are ready to rebuild */
706 if (ice_check_reset(&pf->hw)) {
707 set_bit(ICE_RESET_FAILED, pf->state);
708 } else {
709 /* done with reset. start rebuild */
710 pf->hw.reset_ongoing = false;
711 ice_rebuild(pf, reset_type);
712 /* clear bit to resume normal operations, but
713 * ICE_NEEDS_RESTART bit is set in case rebuild failed
714 */
715 clear_bit(ICE_RESET_OICR_RECV, pf->state);
716 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
717 clear_bit(ICE_PFR_REQ, pf->state);
718 clear_bit(ICE_CORER_REQ, pf->state);
719 clear_bit(ICE_GLOBR_REQ, pf->state);
720 wake_up(&pf->reset_wait_queue);
721 ice_reset_all_vfs(pf);
722 }
723
724 return;
725 }
726
727 /* No pending resets to finish processing. Check for new resets */
728 if (test_bit(ICE_PFR_REQ, pf->state)) {
729 reset_type = ICE_RESET_PFR;
730 if (pf->lag && pf->lag->bonded) {
731 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
732 reset_type = ICE_RESET_CORER;
733 }
734 }
735 if (test_bit(ICE_CORER_REQ, pf->state))
736 reset_type = ICE_RESET_CORER;
737 if (test_bit(ICE_GLOBR_REQ, pf->state))
738 reset_type = ICE_RESET_GLOBR;
739 /* If no valid reset type requested just return */
740 if (reset_type == ICE_RESET_INVAL)
741 return;
742
743 /* reset if not already down or busy */
744 if (!test_bit(ICE_DOWN, pf->state) &&
745 !test_bit(ICE_CFG_BUSY, pf->state)) {
746 ice_do_reset(pf, reset_type);
747 }
748 }
749
750 /**
751 * ice_print_topo_conflict - print topology conflict message
752 * @vsi: the VSI whose topology status is being checked
753 */
ice_print_topo_conflict(struct ice_vsi * vsi)754 static void ice_print_topo_conflict(struct ice_vsi *vsi)
755 {
756 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
757 case ICE_AQ_LINK_TOPO_CONFLICT:
758 case ICE_AQ_LINK_MEDIA_CONFLICT:
759 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
760 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
761 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
762 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
763 break;
764 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
765 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
766 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
767 else
768 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
769 break;
770 default:
771 break;
772 }
773 }
774
775 /**
776 * ice_print_link_msg - print link up or down message
777 * @vsi: the VSI whose link status is being queried
778 * @isup: boolean for if the link is now up or down
779 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)780 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
781 {
782 struct ice_aqc_get_phy_caps_data *caps;
783 const char *an_advertised;
784 const char *fec_req;
785 const char *speed;
786 const char *fec;
787 const char *fc;
788 const char *an;
789 int status;
790
791 if (!vsi)
792 return;
793
794 if (vsi->current_isup == isup)
795 return;
796
797 vsi->current_isup = isup;
798
799 if (!isup) {
800 netdev_info(vsi->netdev, "NIC Link is Down\n");
801 return;
802 }
803
804 switch (vsi->port_info->phy.link_info.link_speed) {
805 case ICE_AQ_LINK_SPEED_100GB:
806 speed = "100 G";
807 break;
808 case ICE_AQ_LINK_SPEED_50GB:
809 speed = "50 G";
810 break;
811 case ICE_AQ_LINK_SPEED_40GB:
812 speed = "40 G";
813 break;
814 case ICE_AQ_LINK_SPEED_25GB:
815 speed = "25 G";
816 break;
817 case ICE_AQ_LINK_SPEED_20GB:
818 speed = "20 G";
819 break;
820 case ICE_AQ_LINK_SPEED_10GB:
821 speed = "10 G";
822 break;
823 case ICE_AQ_LINK_SPEED_5GB:
824 speed = "5 G";
825 break;
826 case ICE_AQ_LINK_SPEED_2500MB:
827 speed = "2.5 G";
828 break;
829 case ICE_AQ_LINK_SPEED_1000MB:
830 speed = "1 G";
831 break;
832 case ICE_AQ_LINK_SPEED_100MB:
833 speed = "100 M";
834 break;
835 default:
836 speed = "Unknown ";
837 break;
838 }
839
840 switch (vsi->port_info->fc.current_mode) {
841 case ICE_FC_FULL:
842 fc = "Rx/Tx";
843 break;
844 case ICE_FC_TX_PAUSE:
845 fc = "Tx";
846 break;
847 case ICE_FC_RX_PAUSE:
848 fc = "Rx";
849 break;
850 case ICE_FC_NONE:
851 fc = "None";
852 break;
853 default:
854 fc = "Unknown";
855 break;
856 }
857
858 /* Get FEC mode based on negotiated link info */
859 switch (vsi->port_info->phy.link_info.fec_info) {
860 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
861 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
862 fec = "RS-FEC";
863 break;
864 case ICE_AQ_LINK_25G_KR_FEC_EN:
865 fec = "FC-FEC/BASE-R";
866 break;
867 default:
868 fec = "NONE";
869 break;
870 }
871
872 /* check if autoneg completed, might be false due to not supported */
873 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
874 an = "True";
875 else
876 an = "False";
877
878 /* Get FEC mode requested based on PHY caps last SW configuration */
879 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
880 if (!caps) {
881 fec_req = "Unknown";
882 an_advertised = "Unknown";
883 goto done;
884 }
885
886 status = ice_aq_get_phy_caps(vsi->port_info, false,
887 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
888 if (status)
889 netdev_info(vsi->netdev, "Get phy capability failed.\n");
890
891 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
892
893 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
894 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
895 fec_req = "RS-FEC";
896 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
897 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
898 fec_req = "FC-FEC/BASE-R";
899 else
900 fec_req = "NONE";
901
902 kfree(caps);
903
904 done:
905 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
906 speed, fec_req, fec, an_advertised, an, fc);
907 ice_print_topo_conflict(vsi);
908 }
909
910 /**
911 * ice_vsi_link_event - update the VSI's netdev
912 * @vsi: the VSI on which the link event occurred
913 * @link_up: whether or not the VSI needs to be set up or down
914 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)915 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
916 {
917 if (!vsi)
918 return;
919
920 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
921 return;
922
923 if (vsi->type == ICE_VSI_PF) {
924 if (link_up == netif_carrier_ok(vsi->netdev))
925 return;
926
927 if (link_up) {
928 netif_carrier_on(vsi->netdev);
929 netif_tx_wake_all_queues(vsi->netdev);
930 } else {
931 netif_carrier_off(vsi->netdev);
932 netif_tx_stop_all_queues(vsi->netdev);
933 }
934 }
935 }
936
937 /**
938 * ice_set_dflt_mib - send a default config MIB to the FW
939 * @pf: private PF struct
940 *
941 * This function sends a default configuration MIB to the FW.
942 *
943 * If this function errors out at any point, the driver is still able to
944 * function. The main impact is that LFC may not operate as expected.
945 * Therefore an error state in this function should be treated with a DBG
946 * message and continue on with driver rebuild/reenable.
947 */
ice_set_dflt_mib(struct ice_pf * pf)948 static void ice_set_dflt_mib(struct ice_pf *pf)
949 {
950 struct device *dev = ice_pf_to_dev(pf);
951 u8 mib_type, *buf, *lldpmib = NULL;
952 u16 len, typelen, offset = 0;
953 struct ice_lldp_org_tlv *tlv;
954 struct ice_hw *hw = &pf->hw;
955 u32 ouisubtype;
956
957 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
958 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
959 if (!lldpmib) {
960 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
961 __func__);
962 return;
963 }
964
965 /* Add ETS CFG TLV */
966 tlv = (struct ice_lldp_org_tlv *)lldpmib;
967 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
968 ICE_IEEE_ETS_TLV_LEN);
969 tlv->typelen = htons(typelen);
970 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
971 ICE_IEEE_SUBTYPE_ETS_CFG);
972 tlv->ouisubtype = htonl(ouisubtype);
973
974 buf = tlv->tlvinfo;
975 buf[0] = 0;
976
977 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
978 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
979 * Octets 13 - 20 are TSA values - leave as zeros
980 */
981 buf[5] = 0x64;
982 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
983 offset += len + 2;
984 tlv = (struct ice_lldp_org_tlv *)
985 ((char *)tlv + sizeof(tlv->typelen) + len);
986
987 /* Add ETS REC TLV */
988 buf = tlv->tlvinfo;
989 tlv->typelen = htons(typelen);
990
991 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
992 ICE_IEEE_SUBTYPE_ETS_REC);
993 tlv->ouisubtype = htonl(ouisubtype);
994
995 /* First octet of buf is reserved
996 * Octets 1 - 4 map UP to TC - all UPs map to zero
997 * Octets 5 - 12 are BW values - set TC 0 to 100%.
998 * Octets 13 - 20 are TSA value - leave as zeros
999 */
1000 buf[5] = 0x64;
1001 offset += len + 2;
1002 tlv = (struct ice_lldp_org_tlv *)
1003 ((char *)tlv + sizeof(tlv->typelen) + len);
1004
1005 /* Add PFC CFG TLV */
1006 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1007 ICE_IEEE_PFC_TLV_LEN);
1008 tlv->typelen = htons(typelen);
1009
1010 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1011 ICE_IEEE_SUBTYPE_PFC_CFG);
1012 tlv->ouisubtype = htonl(ouisubtype);
1013
1014 /* Octet 1 left as all zeros - PFC disabled */
1015 buf[0] = 0x08;
1016 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1017 offset += len + 2;
1018
1019 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1020 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1021
1022 kfree(lldpmib);
1023 }
1024
1025 /**
1026 * ice_check_phy_fw_load - check if PHY FW load failed
1027 * @pf: pointer to PF struct
1028 * @link_cfg_err: bitmap from the link info structure
1029 *
1030 * check if external PHY FW load failed and print an error message if it did
1031 */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1032 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1033 {
1034 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1035 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1036 return;
1037 }
1038
1039 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1040 return;
1041
1042 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1043 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1044 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1045 }
1046 }
1047
1048 /**
1049 * ice_check_module_power
1050 * @pf: pointer to PF struct
1051 * @link_cfg_err: bitmap from the link info structure
1052 *
1053 * check module power level returned by a previous call to aq_get_link_info
1054 * and print error messages if module power level is not supported
1055 */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1056 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1057 {
1058 /* if module power level is supported, clear the flag */
1059 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1060 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1061 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1062 return;
1063 }
1064
1065 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1066 * above block didn't clear this bit, there's nothing to do
1067 */
1068 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1069 return;
1070
1071 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1072 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1073 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1074 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1075 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1076 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1077 }
1078 }
1079
1080 /**
1081 * ice_check_link_cfg_err - check if link configuration failed
1082 * @pf: pointer to the PF struct
1083 * @link_cfg_err: bitmap from the link info structure
1084 *
1085 * print if any link configuration failure happens due to the value in the
1086 * link_cfg_err parameter in the link info structure
1087 */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1088 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1089 {
1090 ice_check_module_power(pf, link_cfg_err);
1091 ice_check_phy_fw_load(pf, link_cfg_err);
1092 }
1093
1094 /**
1095 * ice_link_event - process the link event
1096 * @pf: PF that the link event is associated with
1097 * @pi: port_info for the port that the link event is associated with
1098 * @link_up: true if the physical link is up and false if it is down
1099 * @link_speed: current link speed received from the link event
1100 *
1101 * Returns 0 on success and negative on failure
1102 */
1103 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1104 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1105 u16 link_speed)
1106 {
1107 struct device *dev = ice_pf_to_dev(pf);
1108 struct ice_phy_info *phy_info;
1109 struct ice_vsi *vsi;
1110 u16 old_link_speed;
1111 bool old_link;
1112 int status;
1113
1114 phy_info = &pi->phy;
1115 phy_info->link_info_old = phy_info->link_info;
1116
1117 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1118 old_link_speed = phy_info->link_info_old.link_speed;
1119
1120 /* update the link info structures and re-enable link events,
1121 * don't bail on failure due to other book keeping needed
1122 */
1123 status = ice_update_link_info(pi);
1124 if (status)
1125 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1126 pi->lport, status,
1127 ice_aq_str(pi->hw->adminq.sq_last_status));
1128
1129 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1130
1131 /* Check if the link state is up after updating link info, and treat
1132 * this event as an UP event since the link is actually UP now.
1133 */
1134 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1135 link_up = true;
1136
1137 vsi = ice_get_main_vsi(pf);
1138 if (!vsi || !vsi->port_info)
1139 return -EINVAL;
1140
1141 /* turn off PHY if media was removed */
1142 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1143 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1144 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1145 ice_set_link(vsi, false);
1146 }
1147
1148 /* if the old link up/down and speed is the same as the new */
1149 if (link_up == old_link && link_speed == old_link_speed)
1150 return 0;
1151
1152 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1153
1154 if (ice_is_dcb_active(pf)) {
1155 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1156 ice_dcb_rebuild(pf);
1157 } else {
1158 if (link_up)
1159 ice_set_dflt_mib(pf);
1160 }
1161 ice_vsi_link_event(vsi, link_up);
1162 ice_print_link_msg(vsi, link_up);
1163
1164 ice_vc_notify_link_state(pf);
1165
1166 return 0;
1167 }
1168
1169 /**
1170 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1171 * @pf: board private structure
1172 */
ice_watchdog_subtask(struct ice_pf * pf)1173 static void ice_watchdog_subtask(struct ice_pf *pf)
1174 {
1175 int i;
1176
1177 /* if interface is down do nothing */
1178 if (test_bit(ICE_DOWN, pf->state) ||
1179 test_bit(ICE_CFG_BUSY, pf->state))
1180 return;
1181
1182 /* make sure we don't do these things too often */
1183 if (time_before(jiffies,
1184 pf->serv_tmr_prev + pf->serv_tmr_period))
1185 return;
1186
1187 pf->serv_tmr_prev = jiffies;
1188
1189 /* Update the stats for active netdevs so the network stack
1190 * can look at updated numbers whenever it cares to
1191 */
1192 ice_update_pf_stats(pf);
1193 ice_for_each_vsi(pf, i)
1194 if (pf->vsi[i] && pf->vsi[i]->netdev)
1195 ice_update_vsi_stats(pf->vsi[i]);
1196 }
1197
1198 /**
1199 * ice_init_link_events - enable/initialize link events
1200 * @pi: pointer to the port_info instance
1201 *
1202 * Returns -EIO on failure, 0 on success
1203 */
ice_init_link_events(struct ice_port_info * pi)1204 static int ice_init_link_events(struct ice_port_info *pi)
1205 {
1206 u16 mask;
1207
1208 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1209 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1210 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1211
1212 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1213 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1214 pi->lport);
1215 return -EIO;
1216 }
1217
1218 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1219 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1220 pi->lport);
1221 return -EIO;
1222 }
1223
1224 return 0;
1225 }
1226
1227 /**
1228 * ice_handle_link_event - handle link event via ARQ
1229 * @pf: PF that the link event is associated with
1230 * @event: event structure containing link status info
1231 */
1232 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1233 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1234 {
1235 struct ice_aqc_get_link_status_data *link_data;
1236 struct ice_port_info *port_info;
1237 int status;
1238
1239 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1240 port_info = pf->hw.port_info;
1241 if (!port_info)
1242 return -EINVAL;
1243
1244 status = ice_link_event(pf, port_info,
1245 !!(link_data->link_info & ICE_AQ_LINK_UP),
1246 le16_to_cpu(link_data->link_speed));
1247 if (status)
1248 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1249 status);
1250
1251 return status;
1252 }
1253
1254 /**
1255 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1256 * @pf: pointer to the PF private structure
1257 * @task: intermediate helper storage and identifier for waiting
1258 * @opcode: the opcode to wait for
1259 *
1260 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1261 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1262 *
1263 * Calls are separated to allow caller registering for event before sending
1264 * the command, which mitigates a race between registering and FW responding.
1265 *
1266 * To obtain only the descriptor contents, pass an task->event with null
1267 * msg_buf. If the complete data buffer is desired, allocate the
1268 * task->event.msg_buf with enough space ahead of time.
1269 */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1270 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1271 u16 opcode)
1272 {
1273 INIT_HLIST_NODE(&task->entry);
1274 task->opcode = opcode;
1275 task->state = ICE_AQ_TASK_WAITING;
1276
1277 spin_lock_bh(&pf->aq_wait_lock);
1278 hlist_add_head(&task->entry, &pf->aq_wait_list);
1279 spin_unlock_bh(&pf->aq_wait_lock);
1280 }
1281
1282 /**
1283 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1284 * @pf: pointer to the PF private structure
1285 * @task: ptr prepared by ice_aq_prep_for_event()
1286 * @timeout: how long to wait, in jiffies
1287 *
1288 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1289 * current thread will be put to sleep until the specified event occurs or
1290 * until the given timeout is reached.
1291 *
1292 * Returns: zero on success, or a negative error code on failure.
1293 */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1294 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1295 unsigned long timeout)
1296 {
1297 enum ice_aq_task_state *state = &task->state;
1298 struct device *dev = ice_pf_to_dev(pf);
1299 unsigned long start = jiffies;
1300 long ret;
1301 int err;
1302
1303 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1304 *state != ICE_AQ_TASK_WAITING,
1305 timeout);
1306 switch (*state) {
1307 case ICE_AQ_TASK_NOT_PREPARED:
1308 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1309 err = -EINVAL;
1310 break;
1311 case ICE_AQ_TASK_WAITING:
1312 err = ret < 0 ? ret : -ETIMEDOUT;
1313 break;
1314 case ICE_AQ_TASK_CANCELED:
1315 err = ret < 0 ? ret : -ECANCELED;
1316 break;
1317 case ICE_AQ_TASK_COMPLETE:
1318 err = ret < 0 ? ret : 0;
1319 break;
1320 default:
1321 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1322 err = -EINVAL;
1323 break;
1324 }
1325
1326 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1327 jiffies_to_msecs(jiffies - start),
1328 jiffies_to_msecs(timeout),
1329 task->opcode);
1330
1331 spin_lock_bh(&pf->aq_wait_lock);
1332 hlist_del(&task->entry);
1333 spin_unlock_bh(&pf->aq_wait_lock);
1334
1335 return err;
1336 }
1337
1338 /**
1339 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1340 * @pf: pointer to the PF private structure
1341 * @opcode: the opcode of the event
1342 * @event: the event to check
1343 *
1344 * Loops over the current list of pending threads waiting for an AdminQ event.
1345 * For each matching task, copy the contents of the event into the task
1346 * structure and wake up the thread.
1347 *
1348 * If multiple threads wait for the same opcode, they will all be woken up.
1349 *
1350 * Note that event->msg_buf will only be duplicated if the event has a buffer
1351 * with enough space already allocated. Otherwise, only the descriptor and
1352 * message length will be copied.
1353 *
1354 * Returns: true if an event was found, false otherwise
1355 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1356 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1357 struct ice_rq_event_info *event)
1358 {
1359 struct ice_rq_event_info *task_ev;
1360 struct ice_aq_task *task;
1361 bool found = false;
1362
1363 spin_lock_bh(&pf->aq_wait_lock);
1364 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1365 if (task->state != ICE_AQ_TASK_WAITING)
1366 continue;
1367 if (task->opcode != opcode)
1368 continue;
1369
1370 task_ev = &task->event;
1371 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1372 task_ev->msg_len = event->msg_len;
1373
1374 /* Only copy the data buffer if a destination was set */
1375 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1376 memcpy(task_ev->msg_buf, event->msg_buf,
1377 event->buf_len);
1378 task_ev->buf_len = event->buf_len;
1379 }
1380
1381 task->state = ICE_AQ_TASK_COMPLETE;
1382 found = true;
1383 }
1384 spin_unlock_bh(&pf->aq_wait_lock);
1385
1386 if (found)
1387 wake_up(&pf->aq_wait_queue);
1388 }
1389
1390 /**
1391 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1392 * @pf: the PF private structure
1393 *
1394 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1395 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1396 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1397 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1398 {
1399 struct ice_aq_task *task;
1400
1401 spin_lock_bh(&pf->aq_wait_lock);
1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1403 task->state = ICE_AQ_TASK_CANCELED;
1404 spin_unlock_bh(&pf->aq_wait_lock);
1405
1406 wake_up(&pf->aq_wait_queue);
1407 }
1408
1409 #define ICE_MBX_OVERFLOW_WATERMARK 64
1410
1411 /**
1412 * __ice_clean_ctrlq - helper function to clean controlq rings
1413 * @pf: ptr to struct ice_pf
1414 * @q_type: specific Control queue type
1415 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1416 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1417 {
1418 struct device *dev = ice_pf_to_dev(pf);
1419 struct ice_rq_event_info event;
1420 struct ice_hw *hw = &pf->hw;
1421 struct ice_ctl_q_info *cq;
1422 u16 pending, i = 0;
1423 const char *qtype;
1424 u32 oldval, val;
1425
1426 /* Do not clean control queue if/when PF reset fails */
1427 if (test_bit(ICE_RESET_FAILED, pf->state))
1428 return 0;
1429
1430 switch (q_type) {
1431 case ICE_CTL_Q_ADMIN:
1432 cq = &hw->adminq;
1433 qtype = "Admin";
1434 break;
1435 case ICE_CTL_Q_SB:
1436 cq = &hw->sbq;
1437 qtype = "Sideband";
1438 break;
1439 case ICE_CTL_Q_MAILBOX:
1440 cq = &hw->mailboxq;
1441 qtype = "Mailbox";
1442 /* we are going to try to detect a malicious VF, so set the
1443 * state to begin detection
1444 */
1445 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1446 break;
1447 default:
1448 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1449 return 0;
1450 }
1451
1452 /* check for error indications - PF_xx_AxQLEN register layout for
1453 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1454 */
1455 val = rd32(hw, cq->rq.len);
1456 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1457 PF_FW_ARQLEN_ARQCRIT_M)) {
1458 oldval = val;
1459 if (val & PF_FW_ARQLEN_ARQVFE_M)
1460 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1461 qtype);
1462 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1463 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1464 qtype);
1465 }
1466 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1467 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1468 qtype);
1469 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1470 PF_FW_ARQLEN_ARQCRIT_M);
1471 if (oldval != val)
1472 wr32(hw, cq->rq.len, val);
1473 }
1474
1475 val = rd32(hw, cq->sq.len);
1476 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1477 PF_FW_ATQLEN_ATQCRIT_M)) {
1478 oldval = val;
1479 if (val & PF_FW_ATQLEN_ATQVFE_M)
1480 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1481 qtype);
1482 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1483 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1484 qtype);
1485 }
1486 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1487 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1488 qtype);
1489 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1490 PF_FW_ATQLEN_ATQCRIT_M);
1491 if (oldval != val)
1492 wr32(hw, cq->sq.len, val);
1493 }
1494
1495 event.buf_len = cq->rq_buf_size;
1496 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1497 if (!event.msg_buf)
1498 return 0;
1499
1500 do {
1501 struct ice_mbx_data data = {};
1502 u16 opcode;
1503 int ret;
1504
1505 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1506 if (ret == -EALREADY)
1507 break;
1508 if (ret) {
1509 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1510 ret);
1511 break;
1512 }
1513
1514 opcode = le16_to_cpu(event.desc.opcode);
1515
1516 /* Notify any thread that might be waiting for this event */
1517 ice_aq_check_events(pf, opcode, &event);
1518
1519 switch (opcode) {
1520 case ice_aqc_opc_get_link_status:
1521 if (ice_handle_link_event(pf, &event))
1522 dev_err(dev, "Could not handle link event\n");
1523 break;
1524 case ice_aqc_opc_event_lan_overflow:
1525 ice_vf_lan_overflow_event(pf, &event);
1526 break;
1527 case ice_mbx_opc_send_msg_to_pf:
1528 data.num_msg_proc = i;
1529 data.num_pending_arq = pending;
1530 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1531 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1532
1533 ice_vc_process_vf_msg(pf, &event, &data);
1534 break;
1535 case ice_aqc_opc_fw_logging:
1536 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1537 break;
1538 case ice_aqc_opc_lldp_set_mib_change:
1539 ice_dcb_process_lldp_set_mib_change(pf, &event);
1540 break;
1541 default:
1542 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1543 qtype, opcode);
1544 break;
1545 }
1546 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1547
1548 kfree(event.msg_buf);
1549
1550 return pending && (i == ICE_DFLT_IRQ_WORK);
1551 }
1552
1553 /**
1554 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1555 * @hw: pointer to hardware info
1556 * @cq: control queue information
1557 *
1558 * returns true if there are pending messages in a queue, false if there aren't
1559 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1560 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1561 {
1562 u16 ntu;
1563
1564 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1565 return cq->rq.next_to_clean != ntu;
1566 }
1567
1568 /**
1569 * ice_clean_adminq_subtask - clean the AdminQ rings
1570 * @pf: board private structure
1571 */
ice_clean_adminq_subtask(struct ice_pf * pf)1572 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1573 {
1574 struct ice_hw *hw = &pf->hw;
1575
1576 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1577 return;
1578
1579 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1580 return;
1581
1582 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1583
1584 /* There might be a situation where new messages arrive to a control
1585 * queue between processing the last message and clearing the
1586 * EVENT_PENDING bit. So before exiting, check queue head again (using
1587 * ice_ctrlq_pending) and process new messages if any.
1588 */
1589 if (ice_ctrlq_pending(hw, &hw->adminq))
1590 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1591
1592 ice_flush(hw);
1593 }
1594
1595 /**
1596 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1597 * @pf: board private structure
1598 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1599 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1600 {
1601 struct ice_hw *hw = &pf->hw;
1602
1603 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1604 return;
1605
1606 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1607 return;
1608
1609 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1610
1611 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1612 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1613
1614 ice_flush(hw);
1615 }
1616
1617 /**
1618 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1619 * @pf: board private structure
1620 */
ice_clean_sbq_subtask(struct ice_pf * pf)1621 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1622 {
1623 struct ice_hw *hw = &pf->hw;
1624
1625 /* Nothing to do here if sideband queue is not supported */
1626 if (!ice_is_sbq_supported(hw)) {
1627 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1628 return;
1629 }
1630
1631 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1632 return;
1633
1634 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1635 return;
1636
1637 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1638
1639 if (ice_ctrlq_pending(hw, &hw->sbq))
1640 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1641
1642 ice_flush(hw);
1643 }
1644
1645 /**
1646 * ice_service_task_schedule - schedule the service task to wake up
1647 * @pf: board private structure
1648 *
1649 * If not already scheduled, this puts the task into the work queue.
1650 */
ice_service_task_schedule(struct ice_pf * pf)1651 void ice_service_task_schedule(struct ice_pf *pf)
1652 {
1653 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1654 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1655 !test_bit(ICE_NEEDS_RESTART, pf->state))
1656 queue_work(ice_wq, &pf->serv_task);
1657 }
1658
1659 /**
1660 * ice_service_task_complete - finish up the service task
1661 * @pf: board private structure
1662 */
ice_service_task_complete(struct ice_pf * pf)1663 static void ice_service_task_complete(struct ice_pf *pf)
1664 {
1665 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1666
1667 /* force memory (pf->state) to sync before next service task */
1668 smp_mb__before_atomic();
1669 clear_bit(ICE_SERVICE_SCHED, pf->state);
1670 }
1671
1672 /**
1673 * ice_service_task_stop - stop service task and cancel works
1674 * @pf: board private structure
1675 *
1676 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1677 * 1 otherwise.
1678 */
ice_service_task_stop(struct ice_pf * pf)1679 static int ice_service_task_stop(struct ice_pf *pf)
1680 {
1681 int ret;
1682
1683 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1684
1685 if (pf->serv_tmr.function)
1686 del_timer_sync(&pf->serv_tmr);
1687 if (pf->serv_task.func)
1688 cancel_work_sync(&pf->serv_task);
1689
1690 clear_bit(ICE_SERVICE_SCHED, pf->state);
1691 return ret;
1692 }
1693
1694 /**
1695 * ice_service_task_restart - restart service task and schedule works
1696 * @pf: board private structure
1697 *
1698 * This function is needed for suspend and resume works (e.g WoL scenario)
1699 */
ice_service_task_restart(struct ice_pf * pf)1700 static void ice_service_task_restart(struct ice_pf *pf)
1701 {
1702 clear_bit(ICE_SERVICE_DIS, pf->state);
1703 ice_service_task_schedule(pf);
1704 }
1705
1706 /**
1707 * ice_service_timer - timer callback to schedule service task
1708 * @t: pointer to timer_list
1709 */
ice_service_timer(struct timer_list * t)1710 static void ice_service_timer(struct timer_list *t)
1711 {
1712 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1713
1714 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1715 ice_service_task_schedule(pf);
1716 }
1717
1718 /**
1719 * ice_handle_mdd_event - handle malicious driver detect event
1720 * @pf: pointer to the PF structure
1721 *
1722 * Called from service task. OICR interrupt handler indicates MDD event.
1723 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1724 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1725 * disable the queue, the PF can be configured to reset the VF using ethtool
1726 * private flag mdd-auto-reset-vf.
1727 */
ice_handle_mdd_event(struct ice_pf * pf)1728 static void ice_handle_mdd_event(struct ice_pf *pf)
1729 {
1730 struct device *dev = ice_pf_to_dev(pf);
1731 struct ice_hw *hw = &pf->hw;
1732 struct ice_vf *vf;
1733 unsigned int bkt;
1734 u32 reg;
1735
1736 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1737 /* Since the VF MDD event logging is rate limited, check if
1738 * there are pending MDD events.
1739 */
1740 ice_print_vfs_mdd_events(pf);
1741 return;
1742 }
1743
1744 /* find what triggered an MDD event */
1745 reg = rd32(hw, GL_MDET_TX_PQM);
1746 if (reg & GL_MDET_TX_PQM_VALID_M) {
1747 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1748 GL_MDET_TX_PQM_PF_NUM_S;
1749 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1750 GL_MDET_TX_PQM_VF_NUM_S;
1751 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1752 GL_MDET_TX_PQM_MAL_TYPE_S;
1753 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1754 GL_MDET_TX_PQM_QNUM_S);
1755
1756 if (netif_msg_tx_err(pf))
1757 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1758 event, queue, pf_num, vf_num);
1759 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1760 }
1761
1762 reg = rd32(hw, GL_MDET_TX_TCLAN);
1763 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1764 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1765 GL_MDET_TX_TCLAN_PF_NUM_S;
1766 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1767 GL_MDET_TX_TCLAN_VF_NUM_S;
1768 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1769 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1770 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1771 GL_MDET_TX_TCLAN_QNUM_S);
1772
1773 if (netif_msg_tx_err(pf))
1774 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1775 event, queue, pf_num, vf_num);
1776 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1777 }
1778
1779 reg = rd32(hw, GL_MDET_RX);
1780 if (reg & GL_MDET_RX_VALID_M) {
1781 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1782 GL_MDET_RX_PF_NUM_S;
1783 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1784 GL_MDET_RX_VF_NUM_S;
1785 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1786 GL_MDET_RX_MAL_TYPE_S;
1787 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1788 GL_MDET_RX_QNUM_S);
1789
1790 if (netif_msg_rx_err(pf))
1791 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1792 event, queue, pf_num, vf_num);
1793 wr32(hw, GL_MDET_RX, 0xffffffff);
1794 }
1795
1796 /* check to see if this PF caused an MDD event */
1797 reg = rd32(hw, PF_MDET_TX_PQM);
1798 if (reg & PF_MDET_TX_PQM_VALID_M) {
1799 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1800 if (netif_msg_tx_err(pf))
1801 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1802 }
1803
1804 reg = rd32(hw, PF_MDET_TX_TCLAN);
1805 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1806 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1807 if (netif_msg_tx_err(pf))
1808 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1809 }
1810
1811 reg = rd32(hw, PF_MDET_RX);
1812 if (reg & PF_MDET_RX_VALID_M) {
1813 wr32(hw, PF_MDET_RX, 0xFFFF);
1814 if (netif_msg_rx_err(pf))
1815 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1816 }
1817
1818 /* Check to see if one of the VFs caused an MDD event, and then
1819 * increment counters and set print pending
1820 */
1821 mutex_lock(&pf->vfs.table_lock);
1822 ice_for_each_vf(pf, bkt, vf) {
1823 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1824 if (reg & VP_MDET_TX_PQM_VALID_M) {
1825 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1826 vf->mdd_tx_events.count++;
1827 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1828 if (netif_msg_tx_err(pf))
1829 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1830 vf->vf_id);
1831 }
1832
1833 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1834 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1835 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1836 vf->mdd_tx_events.count++;
1837 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1838 if (netif_msg_tx_err(pf))
1839 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1840 vf->vf_id);
1841 }
1842
1843 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1844 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1845 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1846 vf->mdd_tx_events.count++;
1847 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1848 if (netif_msg_tx_err(pf))
1849 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1850 vf->vf_id);
1851 }
1852
1853 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1854 if (reg & VP_MDET_RX_VALID_M) {
1855 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1856 vf->mdd_rx_events.count++;
1857 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1858 if (netif_msg_rx_err(pf))
1859 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1860 vf->vf_id);
1861
1862 /* Since the queue is disabled on VF Rx MDD events, the
1863 * PF can be configured to reset the VF through ethtool
1864 * private flag mdd-auto-reset-vf.
1865 */
1866 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1867 /* VF MDD event counters will be cleared by
1868 * reset, so print the event prior to reset.
1869 */
1870 ice_print_vf_rx_mdd_event(vf);
1871 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1872 }
1873 }
1874 }
1875 mutex_unlock(&pf->vfs.table_lock);
1876
1877 ice_print_vfs_mdd_events(pf);
1878 }
1879
1880 /**
1881 * ice_force_phys_link_state - Force the physical link state
1882 * @vsi: VSI to force the physical link state to up/down
1883 * @link_up: true/false indicates to set the physical link to up/down
1884 *
1885 * Force the physical link state by getting the current PHY capabilities from
1886 * hardware and setting the PHY config based on the determined capabilities. If
1887 * link changes a link event will be triggered because both the Enable Automatic
1888 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1889 *
1890 * Returns 0 on success, negative on failure
1891 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1892 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1893 {
1894 struct ice_aqc_get_phy_caps_data *pcaps;
1895 struct ice_aqc_set_phy_cfg_data *cfg;
1896 struct ice_port_info *pi;
1897 struct device *dev;
1898 int retcode;
1899
1900 if (!vsi || !vsi->port_info || !vsi->back)
1901 return -EINVAL;
1902 if (vsi->type != ICE_VSI_PF)
1903 return 0;
1904
1905 dev = ice_pf_to_dev(vsi->back);
1906
1907 pi = vsi->port_info;
1908
1909 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1910 if (!pcaps)
1911 return -ENOMEM;
1912
1913 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1914 NULL);
1915 if (retcode) {
1916 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1917 vsi->vsi_num, retcode);
1918 retcode = -EIO;
1919 goto out;
1920 }
1921
1922 /* No change in link */
1923 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1924 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1925 goto out;
1926
1927 /* Use the current user PHY configuration. The current user PHY
1928 * configuration is initialized during probe from PHY capabilities
1929 * software mode, and updated on set PHY configuration.
1930 */
1931 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1932 if (!cfg) {
1933 retcode = -ENOMEM;
1934 goto out;
1935 }
1936
1937 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1938 if (link_up)
1939 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1940 else
1941 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1942
1943 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1944 if (retcode) {
1945 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1946 vsi->vsi_num, retcode);
1947 retcode = -EIO;
1948 }
1949
1950 kfree(cfg);
1951 out:
1952 kfree(pcaps);
1953 return retcode;
1954 }
1955
1956 /**
1957 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1958 * @pi: port info structure
1959 *
1960 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1961 */
ice_init_nvm_phy_type(struct ice_port_info * pi)1962 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1963 {
1964 struct ice_aqc_get_phy_caps_data *pcaps;
1965 struct ice_pf *pf = pi->hw->back;
1966 int err;
1967
1968 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1969 if (!pcaps)
1970 return -ENOMEM;
1971
1972 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1973 pcaps, NULL);
1974
1975 if (err) {
1976 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1977 goto out;
1978 }
1979
1980 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1981 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1982
1983 out:
1984 kfree(pcaps);
1985 return err;
1986 }
1987
1988 /**
1989 * ice_init_link_dflt_override - Initialize link default override
1990 * @pi: port info structure
1991 *
1992 * Initialize link default override and PHY total port shutdown during probe
1993 */
ice_init_link_dflt_override(struct ice_port_info * pi)1994 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1995 {
1996 struct ice_link_default_override_tlv *ldo;
1997 struct ice_pf *pf = pi->hw->back;
1998
1999 ldo = &pf->link_dflt_override;
2000 if (ice_get_link_default_override(ldo, pi))
2001 return;
2002
2003 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2004 return;
2005
2006 /* Enable Total Port Shutdown (override/replace link-down-on-close
2007 * ethtool private flag) for ports with Port Disable bit set.
2008 */
2009 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2010 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2011 }
2012
2013 /**
2014 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2015 * @pi: port info structure
2016 *
2017 * If default override is enabled, initialize the user PHY cfg speed and FEC
2018 * settings using the default override mask from the NVM.
2019 *
2020 * The PHY should only be configured with the default override settings the
2021 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2022 * is used to indicate that the user PHY cfg default override is initialized
2023 * and the PHY has not been configured with the default override settings. The
2024 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2025 * configured.
2026 *
2027 * This function should be called only if the FW doesn't support default
2028 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2029 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2030 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2031 {
2032 struct ice_link_default_override_tlv *ldo;
2033 struct ice_aqc_set_phy_cfg_data *cfg;
2034 struct ice_phy_info *phy = &pi->phy;
2035 struct ice_pf *pf = pi->hw->back;
2036
2037 ldo = &pf->link_dflt_override;
2038
2039 /* If link default override is enabled, use to mask NVM PHY capabilities
2040 * for speed and FEC default configuration.
2041 */
2042 cfg = &phy->curr_user_phy_cfg;
2043
2044 if (ldo->phy_type_low || ldo->phy_type_high) {
2045 cfg->phy_type_low = pf->nvm_phy_type_lo &
2046 cpu_to_le64(ldo->phy_type_low);
2047 cfg->phy_type_high = pf->nvm_phy_type_hi &
2048 cpu_to_le64(ldo->phy_type_high);
2049 }
2050 cfg->link_fec_opt = ldo->fec_options;
2051 phy->curr_user_fec_req = ICE_FEC_AUTO;
2052
2053 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2054 }
2055
2056 /**
2057 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2058 * @pi: port info structure
2059 *
2060 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2061 * mode to default. The PHY defaults are from get PHY capabilities topology
2062 * with media so call when media is first available. An error is returned if
2063 * called when media is not available. The PHY initialization completed state is
2064 * set here.
2065 *
2066 * These configurations are used when setting PHY
2067 * configuration. The user PHY configuration is updated on set PHY
2068 * configuration. Returns 0 on success, negative on failure
2069 */
ice_init_phy_user_cfg(struct ice_port_info * pi)2070 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2071 {
2072 struct ice_aqc_get_phy_caps_data *pcaps;
2073 struct ice_phy_info *phy = &pi->phy;
2074 struct ice_pf *pf = pi->hw->back;
2075 int err;
2076
2077 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2078 return -EIO;
2079
2080 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2081 if (!pcaps)
2082 return -ENOMEM;
2083
2084 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2085 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2086 pcaps, NULL);
2087 else
2088 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2089 pcaps, NULL);
2090 if (err) {
2091 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2092 goto err_out;
2093 }
2094
2095 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2096
2097 /* check if lenient mode is supported and enabled */
2098 if (ice_fw_supports_link_override(pi->hw) &&
2099 !(pcaps->module_compliance_enforcement &
2100 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2101 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2102
2103 /* if the FW supports default PHY configuration mode, then the driver
2104 * does not have to apply link override settings. If not,
2105 * initialize user PHY configuration with link override values
2106 */
2107 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2108 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2109 ice_init_phy_cfg_dflt_override(pi);
2110 goto out;
2111 }
2112 }
2113
2114 /* if link default override is not enabled, set user flow control and
2115 * FEC settings based on what get_phy_caps returned
2116 */
2117 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2118 pcaps->link_fec_options);
2119 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2120
2121 out:
2122 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2123 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2124 err_out:
2125 kfree(pcaps);
2126 return err;
2127 }
2128
2129 /**
2130 * ice_configure_phy - configure PHY
2131 * @vsi: VSI of PHY
2132 *
2133 * Set the PHY configuration. If the current PHY configuration is the same as
2134 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2135 * configure the based get PHY capabilities for topology with media.
2136 */
ice_configure_phy(struct ice_vsi * vsi)2137 static int ice_configure_phy(struct ice_vsi *vsi)
2138 {
2139 struct device *dev = ice_pf_to_dev(vsi->back);
2140 struct ice_port_info *pi = vsi->port_info;
2141 struct ice_aqc_get_phy_caps_data *pcaps;
2142 struct ice_aqc_set_phy_cfg_data *cfg;
2143 struct ice_phy_info *phy = &pi->phy;
2144 struct ice_pf *pf = vsi->back;
2145 int err;
2146
2147 /* Ensure we have media as we cannot configure a medialess port */
2148 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2149 return -ENOMEDIUM;
2150
2151 ice_print_topo_conflict(vsi);
2152
2153 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2154 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2155 return -EPERM;
2156
2157 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2158 return ice_force_phys_link_state(vsi, true);
2159
2160 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2161 if (!pcaps)
2162 return -ENOMEM;
2163
2164 /* Get current PHY config */
2165 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2166 NULL);
2167 if (err) {
2168 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2169 vsi->vsi_num, err);
2170 goto done;
2171 }
2172
2173 /* If PHY enable link is configured and configuration has not changed,
2174 * there's nothing to do
2175 */
2176 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2177 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2178 goto done;
2179
2180 /* Use PHY topology as baseline for configuration */
2181 memset(pcaps, 0, sizeof(*pcaps));
2182 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2183 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2184 pcaps, NULL);
2185 else
2186 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2187 pcaps, NULL);
2188 if (err) {
2189 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2190 vsi->vsi_num, err);
2191 goto done;
2192 }
2193
2194 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2195 if (!cfg) {
2196 err = -ENOMEM;
2197 goto done;
2198 }
2199
2200 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2201
2202 /* Speed - If default override pending, use curr_user_phy_cfg set in
2203 * ice_init_phy_user_cfg_ldo.
2204 */
2205 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2206 vsi->back->state)) {
2207 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2208 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2209 } else {
2210 u64 phy_low = 0, phy_high = 0;
2211
2212 ice_update_phy_type(&phy_low, &phy_high,
2213 pi->phy.curr_user_speed_req);
2214 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2215 cfg->phy_type_high = pcaps->phy_type_high &
2216 cpu_to_le64(phy_high);
2217 }
2218
2219 /* Can't provide what was requested; use PHY capabilities */
2220 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2221 cfg->phy_type_low = pcaps->phy_type_low;
2222 cfg->phy_type_high = pcaps->phy_type_high;
2223 }
2224
2225 /* FEC */
2226 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2227
2228 /* Can't provide what was requested; use PHY capabilities */
2229 if (cfg->link_fec_opt !=
2230 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2231 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2232 cfg->link_fec_opt = pcaps->link_fec_options;
2233 }
2234
2235 /* Flow Control - always supported; no need to check against
2236 * capabilities
2237 */
2238 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2239
2240 /* Enable link and link update */
2241 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2242
2243 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2244 if (err)
2245 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2246 vsi->vsi_num, err);
2247
2248 kfree(cfg);
2249 done:
2250 kfree(pcaps);
2251 return err;
2252 }
2253
2254 /**
2255 * ice_check_media_subtask - Check for media
2256 * @pf: pointer to PF struct
2257 *
2258 * If media is available, then initialize PHY user configuration if it is not
2259 * been, and configure the PHY if the interface is up.
2260 */
ice_check_media_subtask(struct ice_pf * pf)2261 static void ice_check_media_subtask(struct ice_pf *pf)
2262 {
2263 struct ice_port_info *pi;
2264 struct ice_vsi *vsi;
2265 int err;
2266
2267 /* No need to check for media if it's already present */
2268 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2269 return;
2270
2271 vsi = ice_get_main_vsi(pf);
2272 if (!vsi)
2273 return;
2274
2275 /* Refresh link info and check if media is present */
2276 pi = vsi->port_info;
2277 err = ice_update_link_info(pi);
2278 if (err)
2279 return;
2280
2281 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2282
2283 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2284 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2285 ice_init_phy_user_cfg(pi);
2286
2287 /* PHY settings are reset on media insertion, reconfigure
2288 * PHY to preserve settings.
2289 */
2290 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2291 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2292 return;
2293
2294 err = ice_configure_phy(vsi);
2295 if (!err)
2296 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2297
2298 /* A Link Status Event will be generated; the event handler
2299 * will complete bringing the interface up
2300 */
2301 }
2302 }
2303
2304 /**
2305 * ice_service_task - manage and run subtasks
2306 * @work: pointer to work_struct contained by the PF struct
2307 */
ice_service_task(struct work_struct * work)2308 static void ice_service_task(struct work_struct *work)
2309 {
2310 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2311 unsigned long start_time = jiffies;
2312
2313 /* subtasks */
2314
2315 /* process reset requests first */
2316 ice_reset_subtask(pf);
2317
2318 /* bail if a reset/recovery cycle is pending or rebuild failed */
2319 if (ice_is_reset_in_progress(pf->state) ||
2320 test_bit(ICE_SUSPENDED, pf->state) ||
2321 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2322 ice_service_task_complete(pf);
2323 return;
2324 }
2325
2326 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2327 struct iidc_event *event;
2328
2329 event = kzalloc(sizeof(*event), GFP_KERNEL);
2330 if (event) {
2331 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2332 /* report the entire OICR value to AUX driver */
2333 swap(event->reg, pf->oicr_err_reg);
2334 ice_send_event_to_aux(pf, event);
2335 kfree(event);
2336 }
2337 }
2338
2339 /* unplug aux dev per request, if an unplug request came in
2340 * while processing a plug request, this will handle it
2341 */
2342 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2343 ice_unplug_aux_dev(pf);
2344
2345 /* Plug aux device per request */
2346 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2347 ice_plug_aux_dev(pf);
2348
2349 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2350 struct iidc_event *event;
2351
2352 event = kzalloc(sizeof(*event), GFP_KERNEL);
2353 if (event) {
2354 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2355 ice_send_event_to_aux(pf, event);
2356 kfree(event);
2357 }
2358 }
2359
2360 ice_clean_adminq_subtask(pf);
2361 ice_check_media_subtask(pf);
2362 ice_check_for_hang_subtask(pf);
2363 ice_sync_fltr_subtask(pf);
2364 ice_handle_mdd_event(pf);
2365 ice_watchdog_subtask(pf);
2366
2367 if (ice_is_safe_mode(pf)) {
2368 ice_service_task_complete(pf);
2369 return;
2370 }
2371
2372 ice_process_vflr_event(pf);
2373 ice_clean_mailboxq_subtask(pf);
2374 ice_clean_sbq_subtask(pf);
2375 ice_sync_arfs_fltrs(pf);
2376 ice_flush_fdir_ctx(pf);
2377
2378 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2379 ice_service_task_complete(pf);
2380
2381 /* If the tasks have taken longer than one service timer period
2382 * or there is more work to be done, reset the service timer to
2383 * schedule the service task now.
2384 */
2385 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2386 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2387 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2388 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2389 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2390 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2391 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2392 mod_timer(&pf->serv_tmr, jiffies);
2393 }
2394
2395 /**
2396 * ice_set_ctrlq_len - helper function to set controlq length
2397 * @hw: pointer to the HW instance
2398 */
ice_set_ctrlq_len(struct ice_hw * hw)2399 static void ice_set_ctrlq_len(struct ice_hw *hw)
2400 {
2401 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2402 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2403 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2404 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2405 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2406 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2407 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2408 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2409 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2410 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2411 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2412 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2413 }
2414
2415 /**
2416 * ice_schedule_reset - schedule a reset
2417 * @pf: board private structure
2418 * @reset: reset being requested
2419 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2420 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2421 {
2422 struct device *dev = ice_pf_to_dev(pf);
2423
2424 /* bail out if earlier reset has failed */
2425 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2426 dev_dbg(dev, "earlier reset has failed\n");
2427 return -EIO;
2428 }
2429 /* bail if reset/recovery already in progress */
2430 if (ice_is_reset_in_progress(pf->state)) {
2431 dev_dbg(dev, "Reset already in progress\n");
2432 return -EBUSY;
2433 }
2434
2435 switch (reset) {
2436 case ICE_RESET_PFR:
2437 set_bit(ICE_PFR_REQ, pf->state);
2438 break;
2439 case ICE_RESET_CORER:
2440 set_bit(ICE_CORER_REQ, pf->state);
2441 break;
2442 case ICE_RESET_GLOBR:
2443 set_bit(ICE_GLOBR_REQ, pf->state);
2444 break;
2445 default:
2446 return -EINVAL;
2447 }
2448
2449 ice_service_task_schedule(pf);
2450 return 0;
2451 }
2452
2453 /**
2454 * ice_irq_affinity_notify - Callback for affinity changes
2455 * @notify: context as to what irq was changed
2456 * @mask: the new affinity mask
2457 *
2458 * This is a callback function used by the irq_set_affinity_notifier function
2459 * so that we may register to receive changes to the irq affinity masks.
2460 */
2461 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2462 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2463 const cpumask_t *mask)
2464 {
2465 struct ice_q_vector *q_vector =
2466 container_of(notify, struct ice_q_vector, affinity_notify);
2467
2468 cpumask_copy(&q_vector->affinity_mask, mask);
2469 }
2470
2471 /**
2472 * ice_irq_affinity_release - Callback for affinity notifier release
2473 * @ref: internal core kernel usage
2474 *
2475 * This is a callback function used by the irq_set_affinity_notifier function
2476 * to inform the current notification subscriber that they will no longer
2477 * receive notifications.
2478 */
ice_irq_affinity_release(struct kref __always_unused * ref)2479 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2480
2481 /**
2482 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2483 * @vsi: the VSI being configured
2484 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2485 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2486 {
2487 struct ice_hw *hw = &vsi->back->hw;
2488 int i;
2489
2490 ice_for_each_q_vector(vsi, i)
2491 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2492
2493 ice_flush(hw);
2494 return 0;
2495 }
2496
2497 /**
2498 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2499 * @vsi: the VSI being configured
2500 * @basename: name for the vector
2501 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2502 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2503 {
2504 int q_vectors = vsi->num_q_vectors;
2505 struct ice_pf *pf = vsi->back;
2506 struct device *dev;
2507 int rx_int_idx = 0;
2508 int tx_int_idx = 0;
2509 int vector, err;
2510 int irq_num;
2511
2512 dev = ice_pf_to_dev(pf);
2513 for (vector = 0; vector < q_vectors; vector++) {
2514 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2515
2516 irq_num = q_vector->irq.virq;
2517
2518 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2519 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2520 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2521 tx_int_idx++;
2522 } else if (q_vector->rx.rx_ring) {
2523 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2524 "%s-%s-%d", basename, "rx", rx_int_idx++);
2525 } else if (q_vector->tx.tx_ring) {
2526 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2527 "%s-%s-%d", basename, "tx", tx_int_idx++);
2528 } else {
2529 /* skip this unused q_vector */
2530 continue;
2531 }
2532 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2533 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2534 IRQF_SHARED, q_vector->name,
2535 q_vector);
2536 else
2537 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2538 0, q_vector->name, q_vector);
2539 if (err) {
2540 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2541 err);
2542 goto free_q_irqs;
2543 }
2544
2545 /* register for affinity change notifications */
2546 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2547 struct irq_affinity_notify *affinity_notify;
2548
2549 affinity_notify = &q_vector->affinity_notify;
2550 affinity_notify->notify = ice_irq_affinity_notify;
2551 affinity_notify->release = ice_irq_affinity_release;
2552 irq_set_affinity_notifier(irq_num, affinity_notify);
2553 }
2554
2555 /* assign the mask for this irq */
2556 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2557 }
2558
2559 err = ice_set_cpu_rx_rmap(vsi);
2560 if (err) {
2561 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2562 vsi->vsi_num, ERR_PTR(err));
2563 goto free_q_irqs;
2564 }
2565
2566 vsi->irqs_ready = true;
2567 return 0;
2568
2569 free_q_irqs:
2570 while (vector--) {
2571 irq_num = vsi->q_vectors[vector]->irq.virq;
2572 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2573 irq_set_affinity_notifier(irq_num, NULL);
2574 irq_set_affinity_hint(irq_num, NULL);
2575 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2576 }
2577 return err;
2578 }
2579
2580 /**
2581 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2582 * @vsi: VSI to setup Tx rings used by XDP
2583 *
2584 * Return 0 on success and negative value on error
2585 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2586 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2587 {
2588 struct device *dev = ice_pf_to_dev(vsi->back);
2589 struct ice_tx_desc *tx_desc;
2590 int i, j;
2591
2592 ice_for_each_xdp_txq(vsi, i) {
2593 u16 xdp_q_idx = vsi->alloc_txq + i;
2594 struct ice_ring_stats *ring_stats;
2595 struct ice_tx_ring *xdp_ring;
2596
2597 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2598 if (!xdp_ring)
2599 goto free_xdp_rings;
2600
2601 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2602 if (!ring_stats) {
2603 ice_free_tx_ring(xdp_ring);
2604 goto free_xdp_rings;
2605 }
2606
2607 xdp_ring->ring_stats = ring_stats;
2608 xdp_ring->q_index = xdp_q_idx;
2609 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2610 xdp_ring->vsi = vsi;
2611 xdp_ring->netdev = NULL;
2612 xdp_ring->dev = dev;
2613 xdp_ring->count = vsi->num_tx_desc;
2614 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2615 if (ice_setup_tx_ring(xdp_ring))
2616 goto free_xdp_rings;
2617 ice_set_ring_xdp(xdp_ring);
2618 spin_lock_init(&xdp_ring->tx_lock);
2619 for (j = 0; j < xdp_ring->count; j++) {
2620 tx_desc = ICE_TX_DESC(xdp_ring, j);
2621 tx_desc->cmd_type_offset_bsz = 0;
2622 }
2623 }
2624
2625 return 0;
2626
2627 free_xdp_rings:
2628 for (; i >= 0; i--) {
2629 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2630 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2631 vsi->xdp_rings[i]->ring_stats = NULL;
2632 ice_free_tx_ring(vsi->xdp_rings[i]);
2633 }
2634 }
2635 return -ENOMEM;
2636 }
2637
2638 /**
2639 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2640 * @vsi: VSI to set the bpf prog on
2641 * @prog: the bpf prog pointer
2642 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2643 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2644 {
2645 struct bpf_prog *old_prog;
2646 int i;
2647
2648 old_prog = xchg(&vsi->xdp_prog, prog);
2649 ice_for_each_rxq(vsi, i)
2650 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2651
2652 if (old_prog)
2653 bpf_prog_put(old_prog);
2654 }
2655
2656 /**
2657 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2658 * @vsi: VSI to bring up Tx rings used by XDP
2659 * @prog: bpf program that will be assigned to VSI
2660 * @cfg_type: create from scratch or restore the existing configuration
2661 *
2662 * Return 0 on success and negative value on error
2663 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2664 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2665 enum ice_xdp_cfg cfg_type)
2666 {
2667 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2668 int xdp_rings_rem = vsi->num_xdp_txq;
2669 struct ice_pf *pf = vsi->back;
2670 struct ice_qs_cfg xdp_qs_cfg = {
2671 .qs_mutex = &pf->avail_q_mutex,
2672 .pf_map = pf->avail_txqs,
2673 .pf_map_size = pf->max_pf_txqs,
2674 .q_count = vsi->num_xdp_txq,
2675 .scatter_count = ICE_MAX_SCATTER_TXQS,
2676 .vsi_map = vsi->txq_map,
2677 .vsi_map_offset = vsi->alloc_txq,
2678 .mapping_mode = ICE_VSI_MAP_CONTIG
2679 };
2680 struct device *dev;
2681 int i, v_idx;
2682 int status;
2683
2684 dev = ice_pf_to_dev(pf);
2685 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2686 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2687 if (!vsi->xdp_rings)
2688 return -ENOMEM;
2689
2690 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2691 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2692 goto err_map_xdp;
2693
2694 if (static_key_enabled(&ice_xdp_locking_key))
2695 netdev_warn(vsi->netdev,
2696 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2697
2698 if (ice_xdp_alloc_setup_rings(vsi))
2699 goto clear_xdp_rings;
2700
2701 /* follow the logic from ice_vsi_map_rings_to_vectors */
2702 ice_for_each_q_vector(vsi, v_idx) {
2703 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2704 int xdp_rings_per_v, q_id, q_base;
2705
2706 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2707 vsi->num_q_vectors - v_idx);
2708 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2709
2710 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2711 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2712
2713 xdp_ring->q_vector = q_vector;
2714 xdp_ring->next = q_vector->tx.tx_ring;
2715 q_vector->tx.tx_ring = xdp_ring;
2716 }
2717 xdp_rings_rem -= xdp_rings_per_v;
2718 }
2719
2720 ice_for_each_rxq(vsi, i) {
2721 if (static_key_enabled(&ice_xdp_locking_key)) {
2722 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2723 } else {
2724 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2725 struct ice_tx_ring *ring;
2726
2727 ice_for_each_tx_ring(ring, q_vector->tx) {
2728 if (ice_ring_is_xdp(ring)) {
2729 vsi->rx_rings[i]->xdp_ring = ring;
2730 break;
2731 }
2732 }
2733 }
2734 ice_tx_xsk_pool(vsi, i);
2735 }
2736
2737 /* omit the scheduler update if in reset path; XDP queues will be
2738 * taken into account at the end of ice_vsi_rebuild, where
2739 * ice_cfg_vsi_lan is being called
2740 */
2741 if (cfg_type == ICE_XDP_CFG_PART)
2742 return 0;
2743
2744 /* tell the Tx scheduler that right now we have
2745 * additional queues
2746 */
2747 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2748 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2749
2750 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2751 max_txqs);
2752 if (status) {
2753 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2754 status);
2755 goto clear_xdp_rings;
2756 }
2757
2758 /* assign the prog only when it's not already present on VSI;
2759 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2760 * VSI rebuild that happens under ethtool -L can expose us to
2761 * the bpf_prog refcount issues as we would be swapping same
2762 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2763 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2764 * this is not harmful as dev_xdp_install bumps the refcount
2765 * before calling the op exposed by the driver;
2766 */
2767 if (!ice_is_xdp_ena_vsi(vsi))
2768 ice_vsi_assign_bpf_prog(vsi, prog);
2769
2770 return 0;
2771 clear_xdp_rings:
2772 ice_for_each_xdp_txq(vsi, i)
2773 if (vsi->xdp_rings[i]) {
2774 kfree_rcu(vsi->xdp_rings[i], rcu);
2775 vsi->xdp_rings[i] = NULL;
2776 }
2777
2778 err_map_xdp:
2779 mutex_lock(&pf->avail_q_mutex);
2780 ice_for_each_xdp_txq(vsi, i) {
2781 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2782 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2783 }
2784 mutex_unlock(&pf->avail_q_mutex);
2785
2786 devm_kfree(dev, vsi->xdp_rings);
2787 return -ENOMEM;
2788 }
2789
2790 /**
2791 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2792 * @vsi: VSI to remove XDP rings
2793 * @cfg_type: disable XDP permanently or allow it to be restored later
2794 *
2795 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2796 * resources
2797 */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2798 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2799 {
2800 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2801 struct ice_pf *pf = vsi->back;
2802 int i, v_idx;
2803
2804 /* q_vectors are freed in reset path so there's no point in detaching
2805 * rings
2806 */
2807 if (cfg_type == ICE_XDP_CFG_PART)
2808 goto free_qmap;
2809
2810 ice_for_each_q_vector(vsi, v_idx) {
2811 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2812 struct ice_tx_ring *ring;
2813
2814 ice_for_each_tx_ring(ring, q_vector->tx)
2815 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2816 break;
2817
2818 /* restore the value of last node prior to XDP setup */
2819 q_vector->tx.tx_ring = ring;
2820 }
2821
2822 free_qmap:
2823 mutex_lock(&pf->avail_q_mutex);
2824 ice_for_each_xdp_txq(vsi, i) {
2825 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2826 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2827 }
2828 mutex_unlock(&pf->avail_q_mutex);
2829
2830 ice_for_each_xdp_txq(vsi, i)
2831 if (vsi->xdp_rings[i]) {
2832 if (vsi->xdp_rings[i]->desc) {
2833 synchronize_rcu();
2834 ice_free_tx_ring(vsi->xdp_rings[i]);
2835 }
2836 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2837 vsi->xdp_rings[i]->ring_stats = NULL;
2838 kfree_rcu(vsi->xdp_rings[i], rcu);
2839 vsi->xdp_rings[i] = NULL;
2840 }
2841
2842 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2843 vsi->xdp_rings = NULL;
2844
2845 if (static_key_enabled(&ice_xdp_locking_key))
2846 static_branch_dec(&ice_xdp_locking_key);
2847
2848 if (cfg_type == ICE_XDP_CFG_PART)
2849 return 0;
2850
2851 ice_vsi_assign_bpf_prog(vsi, NULL);
2852
2853 /* notify Tx scheduler that we destroyed XDP queues and bring
2854 * back the old number of child nodes
2855 */
2856 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2857 max_txqs[i] = vsi->num_txq;
2858
2859 /* change number of XDP Tx queues to 0 */
2860 vsi->num_xdp_txq = 0;
2861
2862 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2863 max_txqs);
2864 }
2865
2866 /**
2867 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2868 * @vsi: VSI to schedule napi on
2869 */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2870 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2871 {
2872 int i;
2873
2874 ice_for_each_rxq(vsi, i) {
2875 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2876
2877 if (rx_ring->xsk_pool)
2878 napi_schedule(&rx_ring->q_vector->napi);
2879 }
2880 }
2881
2882 /**
2883 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2884 * @vsi: VSI to determine the count of XDP Tx qs
2885 *
2886 * returns 0 if Tx qs count is higher than at least half of CPU count,
2887 * -ENOMEM otherwise
2888 */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2889 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2890 {
2891 u16 avail = ice_get_avail_txq_count(vsi->back);
2892 u16 cpus = num_possible_cpus();
2893
2894 if (avail < cpus / 2)
2895 return -ENOMEM;
2896
2897 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2898
2899 if (vsi->num_xdp_txq < cpus)
2900 static_branch_inc(&ice_xdp_locking_key);
2901
2902 return 0;
2903 }
2904
2905 /**
2906 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2907 * @vsi: Pointer to VSI structure
2908 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)2909 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2910 {
2911 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2912 return ICE_RXBUF_1664;
2913 else
2914 return ICE_RXBUF_3072;
2915 }
2916
2917 /**
2918 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2919 * @vsi: VSI to setup XDP for
2920 * @prog: XDP program
2921 * @extack: netlink extended ack
2922 */
2923 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2924 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2925 struct netlink_ext_ack *extack)
2926 {
2927 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2928 bool if_running = netif_running(vsi->netdev);
2929 int ret = 0, xdp_ring_err = 0;
2930
2931 if (prog && !prog->aux->xdp_has_frags) {
2932 if (frame_size > ice_max_xdp_frame_size(vsi)) {
2933 NL_SET_ERR_MSG_MOD(extack,
2934 "MTU is too large for linear frames and XDP prog does not support frags");
2935 return -EOPNOTSUPP;
2936 }
2937 }
2938
2939 /* hot swap progs and avoid toggling link */
2940 if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2941 ice_vsi_assign_bpf_prog(vsi, prog);
2942 return 0;
2943 }
2944
2945 /* need to stop netdev while setting up the program for Rx rings */
2946 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2947 ret = ice_down(vsi);
2948 if (ret) {
2949 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2950 return ret;
2951 }
2952 }
2953
2954 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2955 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2956 if (xdp_ring_err) {
2957 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2958 } else {
2959 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
2960 ICE_XDP_CFG_FULL);
2961 if (xdp_ring_err)
2962 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2963 }
2964 xdp_features_set_redirect_target(vsi->netdev, true);
2965 /* reallocate Rx queues that are used for zero-copy */
2966 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2967 if (xdp_ring_err)
2968 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2969 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2970 xdp_features_clear_redirect_target(vsi->netdev);
2971 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
2972 if (xdp_ring_err)
2973 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2974 /* reallocate Rx queues that were used for zero-copy */
2975 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2976 if (xdp_ring_err)
2977 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2978 }
2979
2980 if (if_running)
2981 ret = ice_up(vsi);
2982
2983 if (!ret && prog)
2984 ice_vsi_rx_napi_schedule(vsi);
2985
2986 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2987 }
2988
2989 /**
2990 * ice_xdp_safe_mode - XDP handler for safe mode
2991 * @dev: netdevice
2992 * @xdp: XDP command
2993 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)2994 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2995 struct netdev_bpf *xdp)
2996 {
2997 NL_SET_ERR_MSG_MOD(xdp->extack,
2998 "Please provide working DDP firmware package in order to use XDP\n"
2999 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3000 return -EOPNOTSUPP;
3001 }
3002
3003 /**
3004 * ice_xdp - implements XDP handler
3005 * @dev: netdevice
3006 * @xdp: XDP command
3007 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3008 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3009 {
3010 struct ice_netdev_priv *np = netdev_priv(dev);
3011 struct ice_vsi *vsi = np->vsi;
3012
3013 if (vsi->type != ICE_VSI_PF) {
3014 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3015 return -EINVAL;
3016 }
3017
3018 switch (xdp->command) {
3019 case XDP_SETUP_PROG:
3020 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3021 case XDP_SETUP_XSK_POOL:
3022 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3023 xdp->xsk.queue_id);
3024 default:
3025 return -EINVAL;
3026 }
3027 }
3028
3029 /**
3030 * ice_ena_misc_vector - enable the non-queue interrupts
3031 * @pf: board private structure
3032 */
ice_ena_misc_vector(struct ice_pf * pf)3033 static void ice_ena_misc_vector(struct ice_pf *pf)
3034 {
3035 struct ice_hw *hw = &pf->hw;
3036 u32 val;
3037
3038 /* Disable anti-spoof detection interrupt to prevent spurious event
3039 * interrupts during a function reset. Anti-spoof functionally is
3040 * still supported.
3041 */
3042 val = rd32(hw, GL_MDCK_TX_TDPU);
3043 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3044 wr32(hw, GL_MDCK_TX_TDPU, val);
3045
3046 /* clear things first */
3047 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3048 rd32(hw, PFINT_OICR); /* read to clear */
3049
3050 val = (PFINT_OICR_ECC_ERR_M |
3051 PFINT_OICR_MAL_DETECT_M |
3052 PFINT_OICR_GRST_M |
3053 PFINT_OICR_PCI_EXCEPTION_M |
3054 PFINT_OICR_VFLR_M |
3055 PFINT_OICR_HMC_ERR_M |
3056 PFINT_OICR_PE_PUSH_M |
3057 PFINT_OICR_PE_CRITERR_M);
3058
3059 wr32(hw, PFINT_OICR_ENA, val);
3060
3061 /* SW_ITR_IDX = 0, but don't change INTENA */
3062 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3063 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3064 }
3065
3066 /**
3067 * ice_misc_intr - misc interrupt handler
3068 * @irq: interrupt number
3069 * @data: pointer to a q_vector
3070 */
ice_misc_intr(int __always_unused irq,void * data)3071 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3072 {
3073 struct ice_pf *pf = (struct ice_pf *)data;
3074 struct ice_hw *hw = &pf->hw;
3075 struct device *dev;
3076 u32 oicr, ena_mask;
3077
3078 dev = ice_pf_to_dev(pf);
3079 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3080 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3081 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3082
3083 oicr = rd32(hw, PFINT_OICR);
3084 ena_mask = rd32(hw, PFINT_OICR_ENA);
3085
3086 if (oicr & PFINT_OICR_SWINT_M) {
3087 ena_mask &= ~PFINT_OICR_SWINT_M;
3088 pf->sw_int_count++;
3089 }
3090
3091 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3092 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3093 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3094 }
3095 if (oicr & PFINT_OICR_VFLR_M) {
3096 /* disable any further VFLR event notifications */
3097 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3098 u32 reg = rd32(hw, PFINT_OICR_ENA);
3099
3100 reg &= ~PFINT_OICR_VFLR_M;
3101 wr32(hw, PFINT_OICR_ENA, reg);
3102 } else {
3103 ena_mask &= ~PFINT_OICR_VFLR_M;
3104 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3105 }
3106 }
3107
3108 if (oicr & PFINT_OICR_GRST_M) {
3109 u32 reset;
3110
3111 /* we have a reset warning */
3112 ena_mask &= ~PFINT_OICR_GRST_M;
3113 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3114 GLGEN_RSTAT_RESET_TYPE_S;
3115
3116 if (reset == ICE_RESET_CORER)
3117 pf->corer_count++;
3118 else if (reset == ICE_RESET_GLOBR)
3119 pf->globr_count++;
3120 else if (reset == ICE_RESET_EMPR)
3121 pf->empr_count++;
3122 else
3123 dev_dbg(dev, "Invalid reset type %d\n", reset);
3124
3125 /* If a reset cycle isn't already in progress, we set a bit in
3126 * pf->state so that the service task can start a reset/rebuild.
3127 */
3128 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3129 if (reset == ICE_RESET_CORER)
3130 set_bit(ICE_CORER_RECV, pf->state);
3131 else if (reset == ICE_RESET_GLOBR)
3132 set_bit(ICE_GLOBR_RECV, pf->state);
3133 else
3134 set_bit(ICE_EMPR_RECV, pf->state);
3135
3136 /* There are couple of different bits at play here.
3137 * hw->reset_ongoing indicates whether the hardware is
3138 * in reset. This is set to true when a reset interrupt
3139 * is received and set back to false after the driver
3140 * has determined that the hardware is out of reset.
3141 *
3142 * ICE_RESET_OICR_RECV in pf->state indicates
3143 * that a post reset rebuild is required before the
3144 * driver is operational again. This is set above.
3145 *
3146 * As this is the start of the reset/rebuild cycle, set
3147 * both to indicate that.
3148 */
3149 hw->reset_ongoing = true;
3150 }
3151 }
3152
3153 if (oicr & PFINT_OICR_TSYN_TX_M) {
3154 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3155 if (!hw->reset_ongoing)
3156 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3157 }
3158
3159 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3160 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3161 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3162
3163 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3164
3165 if (hw->func_caps.ts_func_info.src_tmr_owned) {
3166 /* Save EVENTs from GLTSYN register */
3167 pf->ptp.ext_ts_irq |= gltsyn_stat &
3168 (GLTSYN_STAT_EVENT0_M |
3169 GLTSYN_STAT_EVENT1_M |
3170 GLTSYN_STAT_EVENT2_M);
3171
3172 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3173 }
3174 }
3175
3176 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3177 if (oicr & ICE_AUX_CRIT_ERR) {
3178 pf->oicr_err_reg |= oicr;
3179 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3180 ena_mask &= ~ICE_AUX_CRIT_ERR;
3181 }
3182
3183 /* Report any remaining unexpected interrupts */
3184 oicr &= ena_mask;
3185 if (oicr) {
3186 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3187 /* If a critical error is pending there is no choice but to
3188 * reset the device.
3189 */
3190 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3191 PFINT_OICR_ECC_ERR_M)) {
3192 set_bit(ICE_PFR_REQ, pf->state);
3193 }
3194 }
3195
3196 return IRQ_WAKE_THREAD;
3197 }
3198
3199 /**
3200 * ice_misc_intr_thread_fn - misc interrupt thread function
3201 * @irq: interrupt number
3202 * @data: pointer to a q_vector
3203 */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3204 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3205 {
3206 struct ice_pf *pf = data;
3207 struct ice_hw *hw;
3208
3209 hw = &pf->hw;
3210
3211 if (ice_is_reset_in_progress(pf->state))
3212 return IRQ_HANDLED;
3213
3214 ice_service_task_schedule(pf);
3215
3216 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3217 ice_ptp_extts_event(pf);
3218
3219 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3220 /* Process outstanding Tx timestamps. If there is more work,
3221 * re-arm the interrupt to trigger again.
3222 */
3223 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3224 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3225 ice_flush(hw);
3226 }
3227 }
3228
3229 ice_irq_dynamic_ena(hw, NULL, NULL);
3230
3231 return IRQ_HANDLED;
3232 }
3233
3234 /**
3235 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3236 * @hw: pointer to HW structure
3237 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3238 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3239 {
3240 /* disable Admin queue Interrupt causes */
3241 wr32(hw, PFINT_FW_CTL,
3242 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3243
3244 /* disable Mailbox queue Interrupt causes */
3245 wr32(hw, PFINT_MBX_CTL,
3246 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3247
3248 wr32(hw, PFINT_SB_CTL,
3249 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3250
3251 /* disable Control queue Interrupt causes */
3252 wr32(hw, PFINT_OICR_CTL,
3253 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3254
3255 ice_flush(hw);
3256 }
3257
3258 /**
3259 * ice_free_irq_msix_misc - Unroll misc vector setup
3260 * @pf: board private structure
3261 */
ice_free_irq_msix_misc(struct ice_pf * pf)3262 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3263 {
3264 int misc_irq_num = pf->oicr_irq.virq;
3265 struct ice_hw *hw = &pf->hw;
3266
3267 ice_dis_ctrlq_interrupts(hw);
3268
3269 /* disable OICR interrupt */
3270 wr32(hw, PFINT_OICR_ENA, 0);
3271 ice_flush(hw);
3272
3273 synchronize_irq(misc_irq_num);
3274 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3275
3276 ice_free_irq(pf, pf->oicr_irq);
3277 }
3278
3279 /**
3280 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3281 * @hw: pointer to HW structure
3282 * @reg_idx: HW vector index to associate the control queue interrupts with
3283 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3284 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3285 {
3286 u32 val;
3287
3288 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3289 PFINT_OICR_CTL_CAUSE_ENA_M);
3290 wr32(hw, PFINT_OICR_CTL, val);
3291
3292 /* enable Admin queue Interrupt causes */
3293 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3294 PFINT_FW_CTL_CAUSE_ENA_M);
3295 wr32(hw, PFINT_FW_CTL, val);
3296
3297 /* enable Mailbox queue Interrupt causes */
3298 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3299 PFINT_MBX_CTL_CAUSE_ENA_M);
3300 wr32(hw, PFINT_MBX_CTL, val);
3301
3302 /* This enables Sideband queue Interrupt causes */
3303 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3304 PFINT_SB_CTL_CAUSE_ENA_M);
3305 wr32(hw, PFINT_SB_CTL, val);
3306
3307 ice_flush(hw);
3308 }
3309
3310 /**
3311 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3312 * @pf: board private structure
3313 *
3314 * This sets up the handler for MSIX 0, which is used to manage the
3315 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3316 * when in MSI or Legacy interrupt mode.
3317 */
ice_req_irq_msix_misc(struct ice_pf * pf)3318 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3319 {
3320 struct device *dev = ice_pf_to_dev(pf);
3321 struct ice_hw *hw = &pf->hw;
3322 struct msi_map oicr_irq;
3323 int err = 0;
3324
3325 if (!pf->int_name[0])
3326 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3327 dev_driver_string(dev), dev_name(dev));
3328
3329 /* Do not request IRQ but do enable OICR interrupt since settings are
3330 * lost during reset. Note that this function is called only during
3331 * rebuild path and not while reset is in progress.
3332 */
3333 if (ice_is_reset_in_progress(pf->state))
3334 goto skip_req_irq;
3335
3336 /* reserve one vector in irq_tracker for misc interrupts */
3337 oicr_irq = ice_alloc_irq(pf, false);
3338 if (oicr_irq.index < 0)
3339 return oicr_irq.index;
3340
3341 pf->oicr_irq = oicr_irq;
3342 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3343 ice_misc_intr_thread_fn, 0,
3344 pf->int_name, pf);
3345 if (err) {
3346 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3347 pf->int_name, err);
3348 ice_free_irq(pf, pf->oicr_irq);
3349 return err;
3350 }
3351
3352 skip_req_irq:
3353 ice_ena_misc_vector(pf);
3354
3355 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3356 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3357 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3358
3359 ice_flush(hw);
3360 ice_irq_dynamic_ena(hw, NULL, NULL);
3361
3362 return 0;
3363 }
3364
3365 /**
3366 * ice_napi_add - register NAPI handler for the VSI
3367 * @vsi: VSI for which NAPI handler is to be registered
3368 *
3369 * This function is only called in the driver's load path. Registering the NAPI
3370 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3371 * reset/rebuild, etc.)
3372 */
ice_napi_add(struct ice_vsi * vsi)3373 static void ice_napi_add(struct ice_vsi *vsi)
3374 {
3375 int v_idx;
3376
3377 if (!vsi->netdev)
3378 return;
3379
3380 ice_for_each_q_vector(vsi, v_idx)
3381 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3382 ice_napi_poll);
3383 }
3384
3385 /**
3386 * ice_set_ops - set netdev and ethtools ops for the given netdev
3387 * @vsi: the VSI associated with the new netdev
3388 */
ice_set_ops(struct ice_vsi * vsi)3389 static void ice_set_ops(struct ice_vsi *vsi)
3390 {
3391 struct net_device *netdev = vsi->netdev;
3392 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3393
3394 if (ice_is_safe_mode(pf)) {
3395 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3396 ice_set_ethtool_safe_mode_ops(netdev);
3397 return;
3398 }
3399
3400 netdev->netdev_ops = &ice_netdev_ops;
3401 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3402 ice_set_ethtool_ops(netdev);
3403
3404 if (vsi->type != ICE_VSI_PF)
3405 return;
3406
3407 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3408 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3409 NETDEV_XDP_ACT_RX_SG;
3410 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3411 }
3412
3413 /**
3414 * ice_set_netdev_features - set features for the given netdev
3415 * @netdev: netdev instance
3416 */
ice_set_netdev_features(struct net_device * netdev)3417 static void ice_set_netdev_features(struct net_device *netdev)
3418 {
3419 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3420 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3421 netdev_features_t csumo_features;
3422 netdev_features_t vlano_features;
3423 netdev_features_t dflt_features;
3424 netdev_features_t tso_features;
3425
3426 if (ice_is_safe_mode(pf)) {
3427 /* safe mode */
3428 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3429 netdev->hw_features = netdev->features;
3430 return;
3431 }
3432
3433 dflt_features = NETIF_F_SG |
3434 NETIF_F_HIGHDMA |
3435 NETIF_F_NTUPLE |
3436 NETIF_F_RXHASH;
3437
3438 csumo_features = NETIF_F_RXCSUM |
3439 NETIF_F_IP_CSUM |
3440 NETIF_F_SCTP_CRC |
3441 NETIF_F_IPV6_CSUM;
3442
3443 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3444 NETIF_F_HW_VLAN_CTAG_TX |
3445 NETIF_F_HW_VLAN_CTAG_RX;
3446
3447 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3448 if (is_dvm_ena)
3449 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3450
3451 tso_features = NETIF_F_TSO |
3452 NETIF_F_TSO_ECN |
3453 NETIF_F_TSO6 |
3454 NETIF_F_GSO_GRE |
3455 NETIF_F_GSO_UDP_TUNNEL |
3456 NETIF_F_GSO_GRE_CSUM |
3457 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3458 NETIF_F_GSO_PARTIAL |
3459 NETIF_F_GSO_IPXIP4 |
3460 NETIF_F_GSO_IPXIP6 |
3461 NETIF_F_GSO_UDP_L4;
3462
3463 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3464 NETIF_F_GSO_GRE_CSUM;
3465 /* set features that user can change */
3466 netdev->hw_features = dflt_features | csumo_features |
3467 vlano_features | tso_features;
3468
3469 /* add support for HW_CSUM on packets with MPLS header */
3470 netdev->mpls_features = NETIF_F_HW_CSUM |
3471 NETIF_F_TSO |
3472 NETIF_F_TSO6;
3473
3474 /* enable features */
3475 netdev->features |= netdev->hw_features;
3476
3477 netdev->hw_features |= NETIF_F_HW_TC;
3478 netdev->hw_features |= NETIF_F_LOOPBACK;
3479
3480 /* encap and VLAN devices inherit default, csumo and tso features */
3481 netdev->hw_enc_features |= dflt_features | csumo_features |
3482 tso_features;
3483 netdev->vlan_features |= dflt_features | csumo_features |
3484 tso_features;
3485
3486 /* advertise support but don't enable by default since only one type of
3487 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3488 * type turns on the other has to be turned off. This is enforced by the
3489 * ice_fix_features() ndo callback.
3490 */
3491 if (is_dvm_ena)
3492 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3493 NETIF_F_HW_VLAN_STAG_TX;
3494
3495 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3496 * be changed at runtime
3497 */
3498 netdev->hw_features |= NETIF_F_RXFCS;
3499
3500 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3501 }
3502
3503 /**
3504 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3505 * @lut: Lookup table
3506 * @rss_table_size: Lookup table size
3507 * @rss_size: Range of queue number for hashing
3508 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3509 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3510 {
3511 u16 i;
3512
3513 for (i = 0; i < rss_table_size; i++)
3514 lut[i] = i % rss_size;
3515 }
3516
3517 /**
3518 * ice_pf_vsi_setup - Set up a PF VSI
3519 * @pf: board private structure
3520 * @pi: pointer to the port_info instance
3521 *
3522 * Returns pointer to the successfully allocated VSI software struct
3523 * on success, otherwise returns NULL on failure.
3524 */
3525 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3526 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3527 {
3528 struct ice_vsi_cfg_params params = {};
3529
3530 params.type = ICE_VSI_PF;
3531 params.pi = pi;
3532 params.flags = ICE_VSI_FLAG_INIT;
3533
3534 return ice_vsi_setup(pf, ¶ms);
3535 }
3536
3537 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3538 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3539 struct ice_channel *ch)
3540 {
3541 struct ice_vsi_cfg_params params = {};
3542
3543 params.type = ICE_VSI_CHNL;
3544 params.pi = pi;
3545 params.ch = ch;
3546 params.flags = ICE_VSI_FLAG_INIT;
3547
3548 return ice_vsi_setup(pf, ¶ms);
3549 }
3550
3551 /**
3552 * ice_ctrl_vsi_setup - Set up a control VSI
3553 * @pf: board private structure
3554 * @pi: pointer to the port_info instance
3555 *
3556 * Returns pointer to the successfully allocated VSI software struct
3557 * on success, otherwise returns NULL on failure.
3558 */
3559 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3560 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3561 {
3562 struct ice_vsi_cfg_params params = {};
3563
3564 params.type = ICE_VSI_CTRL;
3565 params.pi = pi;
3566 params.flags = ICE_VSI_FLAG_INIT;
3567
3568 return ice_vsi_setup(pf, ¶ms);
3569 }
3570
3571 /**
3572 * ice_lb_vsi_setup - Set up a loopback VSI
3573 * @pf: board private structure
3574 * @pi: pointer to the port_info instance
3575 *
3576 * Returns pointer to the successfully allocated VSI software struct
3577 * on success, otherwise returns NULL on failure.
3578 */
3579 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3580 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3581 {
3582 struct ice_vsi_cfg_params params = {};
3583
3584 params.type = ICE_VSI_LB;
3585 params.pi = pi;
3586 params.flags = ICE_VSI_FLAG_INIT;
3587
3588 return ice_vsi_setup(pf, ¶ms);
3589 }
3590
3591 /**
3592 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3593 * @netdev: network interface to be adjusted
3594 * @proto: VLAN TPID
3595 * @vid: VLAN ID to be added
3596 *
3597 * net_device_ops implementation for adding VLAN IDs
3598 */
3599 static int
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3600 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3601 {
3602 struct ice_netdev_priv *np = netdev_priv(netdev);
3603 struct ice_vsi_vlan_ops *vlan_ops;
3604 struct ice_vsi *vsi = np->vsi;
3605 struct ice_vlan vlan;
3606 int ret;
3607
3608 /* VLAN 0 is added by default during load/reset */
3609 if (!vid)
3610 return 0;
3611
3612 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3613 usleep_range(1000, 2000);
3614
3615 /* Add multicast promisc rule for the VLAN ID to be added if
3616 * all-multicast is currently enabled.
3617 */
3618 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3619 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3620 ICE_MCAST_VLAN_PROMISC_BITS,
3621 vid);
3622 if (ret)
3623 goto finish;
3624 }
3625
3626 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3627
3628 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3629 * packets aren't pruned by the device's internal switch on Rx
3630 */
3631 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3632 ret = vlan_ops->add_vlan(vsi, &vlan);
3633 if (ret)
3634 goto finish;
3635
3636 /* If all-multicast is currently enabled and this VLAN ID is only one
3637 * besides VLAN-0 we have to update look-up type of multicast promisc
3638 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3639 */
3640 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3641 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3642 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3643 ICE_MCAST_PROMISC_BITS, 0);
3644 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3645 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3646 }
3647
3648 finish:
3649 clear_bit(ICE_CFG_BUSY, vsi->state);
3650
3651 return ret;
3652 }
3653
3654 /**
3655 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3656 * @netdev: network interface to be adjusted
3657 * @proto: VLAN TPID
3658 * @vid: VLAN ID to be removed
3659 *
3660 * net_device_ops implementation for removing VLAN IDs
3661 */
3662 static int
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3663 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3664 {
3665 struct ice_netdev_priv *np = netdev_priv(netdev);
3666 struct ice_vsi_vlan_ops *vlan_ops;
3667 struct ice_vsi *vsi = np->vsi;
3668 struct ice_vlan vlan;
3669 int ret;
3670
3671 /* don't allow removal of VLAN 0 */
3672 if (!vid)
3673 return 0;
3674
3675 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3676 usleep_range(1000, 2000);
3677
3678 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3679 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3680 if (ret) {
3681 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3682 vsi->vsi_num);
3683 vsi->current_netdev_flags |= IFF_ALLMULTI;
3684 }
3685
3686 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3687
3688 /* Make sure VLAN delete is successful before updating VLAN
3689 * information
3690 */
3691 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3692 ret = vlan_ops->del_vlan(vsi, &vlan);
3693 if (ret)
3694 goto finish;
3695
3696 /* Remove multicast promisc rule for the removed VLAN ID if
3697 * all-multicast is enabled.
3698 */
3699 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3700 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3701 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3702
3703 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3704 /* Update look-up type of multicast promisc rule for VLAN 0
3705 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3706 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3707 */
3708 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3709 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3710 ICE_MCAST_VLAN_PROMISC_BITS,
3711 0);
3712 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3713 ICE_MCAST_PROMISC_BITS, 0);
3714 }
3715 }
3716
3717 finish:
3718 clear_bit(ICE_CFG_BUSY, vsi->state);
3719
3720 return ret;
3721 }
3722
3723 /**
3724 * ice_rep_indr_tc_block_unbind
3725 * @cb_priv: indirection block private data
3726 */
ice_rep_indr_tc_block_unbind(void * cb_priv)3727 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3728 {
3729 struct ice_indr_block_priv *indr_priv = cb_priv;
3730
3731 list_del(&indr_priv->list);
3732 kfree(indr_priv);
3733 }
3734
3735 /**
3736 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3737 * @vsi: VSI struct which has the netdev
3738 */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3739 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3740 {
3741 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3742
3743 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3744 ice_rep_indr_tc_block_unbind);
3745 }
3746
3747 /**
3748 * ice_tc_indir_block_register - Register TC indirect block notifications
3749 * @vsi: VSI struct which has the netdev
3750 *
3751 * Returns 0 on success, negative value on failure
3752 */
ice_tc_indir_block_register(struct ice_vsi * vsi)3753 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3754 {
3755 struct ice_netdev_priv *np;
3756
3757 if (!vsi || !vsi->netdev)
3758 return -EINVAL;
3759
3760 np = netdev_priv(vsi->netdev);
3761
3762 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3763 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3764 }
3765
3766 /**
3767 * ice_get_avail_q_count - Get count of queues in use
3768 * @pf_qmap: bitmap to get queue use count from
3769 * @lock: pointer to a mutex that protects access to pf_qmap
3770 * @size: size of the bitmap
3771 */
3772 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3773 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3774 {
3775 unsigned long bit;
3776 u16 count = 0;
3777
3778 mutex_lock(lock);
3779 for_each_clear_bit(bit, pf_qmap, size)
3780 count++;
3781 mutex_unlock(lock);
3782
3783 return count;
3784 }
3785
3786 /**
3787 * ice_get_avail_txq_count - Get count of Tx queues in use
3788 * @pf: pointer to an ice_pf instance
3789 */
ice_get_avail_txq_count(struct ice_pf * pf)3790 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3791 {
3792 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3793 pf->max_pf_txqs);
3794 }
3795
3796 /**
3797 * ice_get_avail_rxq_count - Get count of Rx queues in use
3798 * @pf: pointer to an ice_pf instance
3799 */
ice_get_avail_rxq_count(struct ice_pf * pf)3800 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3801 {
3802 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3803 pf->max_pf_rxqs);
3804 }
3805
3806 /**
3807 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3808 * @pf: board private structure to initialize
3809 */
ice_deinit_pf(struct ice_pf * pf)3810 static void ice_deinit_pf(struct ice_pf *pf)
3811 {
3812 ice_service_task_stop(pf);
3813 mutex_destroy(&pf->lag_mutex);
3814 mutex_destroy(&pf->adev_mutex);
3815 mutex_destroy(&pf->sw_mutex);
3816 mutex_destroy(&pf->tc_mutex);
3817 mutex_destroy(&pf->avail_q_mutex);
3818 mutex_destroy(&pf->vfs.table_lock);
3819
3820 if (pf->avail_txqs) {
3821 bitmap_free(pf->avail_txqs);
3822 pf->avail_txqs = NULL;
3823 }
3824
3825 if (pf->avail_rxqs) {
3826 bitmap_free(pf->avail_rxqs);
3827 pf->avail_rxqs = NULL;
3828 }
3829
3830 if (pf->ptp.clock)
3831 ptp_clock_unregister(pf->ptp.clock);
3832 }
3833
3834 /**
3835 * ice_set_pf_caps - set PFs capability flags
3836 * @pf: pointer to the PF instance
3837 */
ice_set_pf_caps(struct ice_pf * pf)3838 static void ice_set_pf_caps(struct ice_pf *pf)
3839 {
3840 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3841
3842 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3843 if (func_caps->common_cap.rdma)
3844 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3845 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3846 if (func_caps->common_cap.dcb)
3847 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3848 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3849 if (func_caps->common_cap.sr_iov_1_1) {
3850 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3851 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3852 ICE_MAX_SRIOV_VFS);
3853 }
3854 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3855 if (func_caps->common_cap.rss_table_size)
3856 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3857
3858 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3859 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3860 u16 unused;
3861
3862 /* ctrl_vsi_idx will be set to a valid value when flow director
3863 * is setup by ice_init_fdir
3864 */
3865 pf->ctrl_vsi_idx = ICE_NO_VSI;
3866 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3867 /* force guaranteed filter pool for PF */
3868 ice_alloc_fd_guar_item(&pf->hw, &unused,
3869 func_caps->fd_fltr_guar);
3870 /* force shared filter pool for PF */
3871 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3872 func_caps->fd_fltr_best_effort);
3873 }
3874
3875 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3876 if (func_caps->common_cap.ieee_1588)
3877 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3878
3879 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3880 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3881 }
3882
3883 /**
3884 * ice_init_pf - Initialize general software structures (struct ice_pf)
3885 * @pf: board private structure to initialize
3886 */
ice_init_pf(struct ice_pf * pf)3887 static int ice_init_pf(struct ice_pf *pf)
3888 {
3889 ice_set_pf_caps(pf);
3890
3891 mutex_init(&pf->sw_mutex);
3892 mutex_init(&pf->tc_mutex);
3893 mutex_init(&pf->adev_mutex);
3894 mutex_init(&pf->lag_mutex);
3895
3896 INIT_HLIST_HEAD(&pf->aq_wait_list);
3897 spin_lock_init(&pf->aq_wait_lock);
3898 init_waitqueue_head(&pf->aq_wait_queue);
3899
3900 init_waitqueue_head(&pf->reset_wait_queue);
3901
3902 /* setup service timer and periodic service task */
3903 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3904 pf->serv_tmr_period = HZ;
3905 INIT_WORK(&pf->serv_task, ice_service_task);
3906 clear_bit(ICE_SERVICE_SCHED, pf->state);
3907
3908 mutex_init(&pf->avail_q_mutex);
3909 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3910 if (!pf->avail_txqs)
3911 return -ENOMEM;
3912
3913 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3914 if (!pf->avail_rxqs) {
3915 bitmap_free(pf->avail_txqs);
3916 pf->avail_txqs = NULL;
3917 return -ENOMEM;
3918 }
3919
3920 mutex_init(&pf->vfs.table_lock);
3921 hash_init(pf->vfs.table);
3922 ice_mbx_init_snapshot(&pf->hw);
3923
3924 return 0;
3925 }
3926
3927 /**
3928 * ice_is_wol_supported - check if WoL is supported
3929 * @hw: pointer to hardware info
3930 *
3931 * Check if WoL is supported based on the HW configuration.
3932 * Returns true if NVM supports and enables WoL for this port, false otherwise
3933 */
ice_is_wol_supported(struct ice_hw * hw)3934 bool ice_is_wol_supported(struct ice_hw *hw)
3935 {
3936 u16 wol_ctrl;
3937
3938 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3939 * word) indicates WoL is not supported on the corresponding PF ID.
3940 */
3941 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3942 return false;
3943
3944 return !(BIT(hw->port_info->lport) & wol_ctrl);
3945 }
3946
3947 /**
3948 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3949 * @vsi: VSI being changed
3950 * @new_rx: new number of Rx queues
3951 * @new_tx: new number of Tx queues
3952 * @locked: is adev device_lock held
3953 *
3954 * Only change the number of queues if new_tx, or new_rx is non-0.
3955 *
3956 * Returns 0 on success.
3957 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)3958 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3959 {
3960 struct ice_pf *pf = vsi->back;
3961 int err = 0, timeout = 50;
3962
3963 if (!new_rx && !new_tx)
3964 return -EINVAL;
3965
3966 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3967 timeout--;
3968 if (!timeout)
3969 return -EBUSY;
3970 usleep_range(1000, 2000);
3971 }
3972
3973 if (new_tx)
3974 vsi->req_txq = (u16)new_tx;
3975 if (new_rx)
3976 vsi->req_rxq = (u16)new_rx;
3977
3978 /* set for the next time the netdev is started */
3979 if (!netif_running(vsi->netdev)) {
3980 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3981 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3982 goto done;
3983 }
3984
3985 ice_vsi_close(vsi);
3986 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3987 ice_pf_dcb_recfg(pf, locked);
3988 ice_vsi_open(vsi);
3989 done:
3990 clear_bit(ICE_CFG_BUSY, pf->state);
3991 return err;
3992 }
3993
3994 /**
3995 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3996 * @pf: PF to configure
3997 *
3998 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3999 * VSI can still Tx/Rx VLAN tagged packets.
4000 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4001 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4002 {
4003 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4004 struct ice_vsi_ctx *ctxt;
4005 struct ice_hw *hw;
4006 int status;
4007
4008 if (!vsi)
4009 return;
4010
4011 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4012 if (!ctxt)
4013 return;
4014
4015 hw = &pf->hw;
4016 ctxt->info = vsi->info;
4017
4018 ctxt->info.valid_sections =
4019 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4020 ICE_AQ_VSI_PROP_SECURITY_VALID |
4021 ICE_AQ_VSI_PROP_SW_VALID);
4022
4023 /* disable VLAN anti-spoof */
4024 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4025 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4026
4027 /* disable VLAN pruning and keep all other settings */
4028 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4029
4030 /* allow all VLANs on Tx and don't strip on Rx */
4031 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4032 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4033
4034 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4035 if (status) {
4036 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4037 status, ice_aq_str(hw->adminq.sq_last_status));
4038 } else {
4039 vsi->info.sec_flags = ctxt->info.sec_flags;
4040 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4041 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4042 }
4043
4044 kfree(ctxt);
4045 }
4046
4047 /**
4048 * ice_log_pkg_init - log result of DDP package load
4049 * @hw: pointer to hardware info
4050 * @state: state of package load
4051 */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4052 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4053 {
4054 struct ice_pf *pf = hw->back;
4055 struct device *dev;
4056
4057 dev = ice_pf_to_dev(pf);
4058
4059 switch (state) {
4060 case ICE_DDP_PKG_SUCCESS:
4061 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4062 hw->active_pkg_name,
4063 hw->active_pkg_ver.major,
4064 hw->active_pkg_ver.minor,
4065 hw->active_pkg_ver.update,
4066 hw->active_pkg_ver.draft);
4067 break;
4068 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4069 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4070 hw->active_pkg_name,
4071 hw->active_pkg_ver.major,
4072 hw->active_pkg_ver.minor,
4073 hw->active_pkg_ver.update,
4074 hw->active_pkg_ver.draft);
4075 break;
4076 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4077 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4078 hw->active_pkg_name,
4079 hw->active_pkg_ver.major,
4080 hw->active_pkg_ver.minor,
4081 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4082 break;
4083 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4084 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4085 hw->active_pkg_name,
4086 hw->active_pkg_ver.major,
4087 hw->active_pkg_ver.minor,
4088 hw->active_pkg_ver.update,
4089 hw->active_pkg_ver.draft,
4090 hw->pkg_name,
4091 hw->pkg_ver.major,
4092 hw->pkg_ver.minor,
4093 hw->pkg_ver.update,
4094 hw->pkg_ver.draft);
4095 break;
4096 case ICE_DDP_PKG_FW_MISMATCH:
4097 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4098 break;
4099 case ICE_DDP_PKG_INVALID_FILE:
4100 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4101 break;
4102 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4103 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4104 break;
4105 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4106 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4107 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4108 break;
4109 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4110 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4111 break;
4112 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4113 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4114 break;
4115 case ICE_DDP_PKG_LOAD_ERROR:
4116 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4117 /* poll for reset to complete */
4118 if (ice_check_reset(hw))
4119 dev_err(dev, "Error resetting device. Please reload the driver\n");
4120 break;
4121 case ICE_DDP_PKG_ERR:
4122 default:
4123 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4124 break;
4125 }
4126 }
4127
4128 /**
4129 * ice_load_pkg - load/reload the DDP Package file
4130 * @firmware: firmware structure when firmware requested or NULL for reload
4131 * @pf: pointer to the PF instance
4132 *
4133 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4134 * initialize HW tables.
4135 */
4136 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4137 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4138 {
4139 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4140 struct device *dev = ice_pf_to_dev(pf);
4141 struct ice_hw *hw = &pf->hw;
4142
4143 /* Load DDP Package */
4144 if (firmware && !hw->pkg_copy) {
4145 state = ice_copy_and_init_pkg(hw, firmware->data,
4146 firmware->size);
4147 ice_log_pkg_init(hw, state);
4148 } else if (!firmware && hw->pkg_copy) {
4149 /* Reload package during rebuild after CORER/GLOBR reset */
4150 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4151 ice_log_pkg_init(hw, state);
4152 } else {
4153 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4154 }
4155
4156 if (!ice_is_init_pkg_successful(state)) {
4157 /* Safe Mode */
4158 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4159 return;
4160 }
4161
4162 /* Successful download package is the precondition for advanced
4163 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4164 */
4165 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4166 }
4167
4168 /**
4169 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4170 * @pf: pointer to the PF structure
4171 *
4172 * There is no error returned here because the driver should be able to handle
4173 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4174 * specifically with Tx.
4175 */
ice_verify_cacheline_size(struct ice_pf * pf)4176 static void ice_verify_cacheline_size(struct ice_pf *pf)
4177 {
4178 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4179 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4180 ICE_CACHE_LINE_BYTES);
4181 }
4182
4183 /**
4184 * ice_send_version - update firmware with driver version
4185 * @pf: PF struct
4186 *
4187 * Returns 0 on success, else error code
4188 */
ice_send_version(struct ice_pf * pf)4189 static int ice_send_version(struct ice_pf *pf)
4190 {
4191 struct ice_driver_ver dv;
4192
4193 dv.major_ver = 0xff;
4194 dv.minor_ver = 0xff;
4195 dv.build_ver = 0xff;
4196 dv.subbuild_ver = 0;
4197 strscpy((char *)dv.driver_string, UTS_RELEASE,
4198 sizeof(dv.driver_string));
4199 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4200 }
4201
4202 /**
4203 * ice_init_fdir - Initialize flow director VSI and configuration
4204 * @pf: pointer to the PF instance
4205 *
4206 * returns 0 on success, negative on error
4207 */
ice_init_fdir(struct ice_pf * pf)4208 static int ice_init_fdir(struct ice_pf *pf)
4209 {
4210 struct device *dev = ice_pf_to_dev(pf);
4211 struct ice_vsi *ctrl_vsi;
4212 int err;
4213
4214 /* Side Band Flow Director needs to have a control VSI.
4215 * Allocate it and store it in the PF.
4216 */
4217 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4218 if (!ctrl_vsi) {
4219 dev_dbg(dev, "could not create control VSI\n");
4220 return -ENOMEM;
4221 }
4222
4223 err = ice_vsi_open_ctrl(ctrl_vsi);
4224 if (err) {
4225 dev_dbg(dev, "could not open control VSI\n");
4226 goto err_vsi_open;
4227 }
4228
4229 mutex_init(&pf->hw.fdir_fltr_lock);
4230
4231 err = ice_fdir_create_dflt_rules(pf);
4232 if (err)
4233 goto err_fdir_rule;
4234
4235 return 0;
4236
4237 err_fdir_rule:
4238 ice_fdir_release_flows(&pf->hw);
4239 ice_vsi_close(ctrl_vsi);
4240 err_vsi_open:
4241 ice_vsi_release(ctrl_vsi);
4242 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4243 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4244 pf->ctrl_vsi_idx = ICE_NO_VSI;
4245 }
4246 return err;
4247 }
4248
ice_deinit_fdir(struct ice_pf * pf)4249 static void ice_deinit_fdir(struct ice_pf *pf)
4250 {
4251 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4252
4253 if (!vsi)
4254 return;
4255
4256 ice_vsi_manage_fdir(vsi, false);
4257 ice_vsi_release(vsi);
4258 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4259 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4260 pf->ctrl_vsi_idx = ICE_NO_VSI;
4261 }
4262
4263 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4264 }
4265
4266 /**
4267 * ice_get_opt_fw_name - return optional firmware file name or NULL
4268 * @pf: pointer to the PF instance
4269 */
ice_get_opt_fw_name(struct ice_pf * pf)4270 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4271 {
4272 /* Optional firmware name same as default with additional dash
4273 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4274 */
4275 struct pci_dev *pdev = pf->pdev;
4276 char *opt_fw_filename;
4277 u64 dsn;
4278
4279 /* Determine the name of the optional file using the DSN (two
4280 * dwords following the start of the DSN Capability).
4281 */
4282 dsn = pci_get_dsn(pdev);
4283 if (!dsn)
4284 return NULL;
4285
4286 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4287 if (!opt_fw_filename)
4288 return NULL;
4289
4290 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4291 ICE_DDP_PKG_PATH, dsn);
4292
4293 return opt_fw_filename;
4294 }
4295
4296 /**
4297 * ice_request_fw - Device initialization routine
4298 * @pf: pointer to the PF instance
4299 */
ice_request_fw(struct ice_pf * pf)4300 static void ice_request_fw(struct ice_pf *pf)
4301 {
4302 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4303 const struct firmware *firmware = NULL;
4304 struct device *dev = ice_pf_to_dev(pf);
4305 int err = 0;
4306
4307 /* optional device-specific DDP (if present) overrides the default DDP
4308 * package file. kernel logs a debug message if the file doesn't exist,
4309 * and warning messages for other errors.
4310 */
4311 if (opt_fw_filename) {
4312 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4313 if (err) {
4314 kfree(opt_fw_filename);
4315 goto dflt_pkg_load;
4316 }
4317
4318 /* request for firmware was successful. Download to device */
4319 ice_load_pkg(firmware, pf);
4320 kfree(opt_fw_filename);
4321 release_firmware(firmware);
4322 return;
4323 }
4324
4325 dflt_pkg_load:
4326 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4327 if (err) {
4328 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4329 return;
4330 }
4331
4332 /* request for firmware was successful. Download to device */
4333 ice_load_pkg(firmware, pf);
4334 release_firmware(firmware);
4335 }
4336
4337 /**
4338 * ice_print_wake_reason - show the wake up cause in the log
4339 * @pf: pointer to the PF struct
4340 */
ice_print_wake_reason(struct ice_pf * pf)4341 static void ice_print_wake_reason(struct ice_pf *pf)
4342 {
4343 u32 wus = pf->wakeup_reason;
4344 const char *wake_str;
4345
4346 /* if no wake event, nothing to print */
4347 if (!wus)
4348 return;
4349
4350 if (wus & PFPM_WUS_LNKC_M)
4351 wake_str = "Link\n";
4352 else if (wus & PFPM_WUS_MAG_M)
4353 wake_str = "Magic Packet\n";
4354 else if (wus & PFPM_WUS_MNG_M)
4355 wake_str = "Management\n";
4356 else if (wus & PFPM_WUS_FW_RST_WK_M)
4357 wake_str = "Firmware Reset\n";
4358 else
4359 wake_str = "Unknown\n";
4360
4361 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4362 }
4363
4364 /**
4365 * ice_register_netdev - register netdev
4366 * @vsi: pointer to the VSI struct
4367 */
ice_register_netdev(struct ice_vsi * vsi)4368 static int ice_register_netdev(struct ice_vsi *vsi)
4369 {
4370 int err;
4371
4372 if (!vsi || !vsi->netdev)
4373 return -EIO;
4374
4375 err = register_netdev(vsi->netdev);
4376 if (err)
4377 return err;
4378
4379 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4380 netif_carrier_off(vsi->netdev);
4381 netif_tx_stop_all_queues(vsi->netdev);
4382
4383 return 0;
4384 }
4385
ice_unregister_netdev(struct ice_vsi * vsi)4386 static void ice_unregister_netdev(struct ice_vsi *vsi)
4387 {
4388 if (!vsi || !vsi->netdev)
4389 return;
4390
4391 unregister_netdev(vsi->netdev);
4392 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4393 }
4394
4395 /**
4396 * ice_cfg_netdev - Allocate, configure and register a netdev
4397 * @vsi: the VSI associated with the new netdev
4398 *
4399 * Returns 0 on success, negative value on failure
4400 */
ice_cfg_netdev(struct ice_vsi * vsi)4401 static int ice_cfg_netdev(struct ice_vsi *vsi)
4402 {
4403 struct ice_netdev_priv *np;
4404 struct net_device *netdev;
4405 u8 mac_addr[ETH_ALEN];
4406
4407 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4408 vsi->alloc_rxq);
4409 if (!netdev)
4410 return -ENOMEM;
4411
4412 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4413 vsi->netdev = netdev;
4414 np = netdev_priv(netdev);
4415 np->vsi = vsi;
4416
4417 ice_set_netdev_features(netdev);
4418 ice_set_ops(vsi);
4419
4420 if (vsi->type == ICE_VSI_PF) {
4421 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4422 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4423 eth_hw_addr_set(netdev, mac_addr);
4424 }
4425
4426 netdev->priv_flags |= IFF_UNICAST_FLT;
4427
4428 /* Setup netdev TC information */
4429 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4430
4431 netdev->max_mtu = ICE_MAX_MTU;
4432
4433 return 0;
4434 }
4435
ice_decfg_netdev(struct ice_vsi * vsi)4436 static void ice_decfg_netdev(struct ice_vsi *vsi)
4437 {
4438 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4439 free_netdev(vsi->netdev);
4440 vsi->netdev = NULL;
4441 }
4442
ice_start_eth(struct ice_vsi * vsi)4443 static int ice_start_eth(struct ice_vsi *vsi)
4444 {
4445 int err;
4446
4447 err = ice_init_mac_fltr(vsi->back);
4448 if (err)
4449 return err;
4450
4451 err = ice_vsi_open(vsi);
4452 if (err)
4453 ice_fltr_remove_all(vsi);
4454
4455 return err;
4456 }
4457
ice_stop_eth(struct ice_vsi * vsi)4458 static void ice_stop_eth(struct ice_vsi *vsi)
4459 {
4460 ice_fltr_remove_all(vsi);
4461 ice_vsi_close(vsi);
4462 }
4463
ice_init_eth(struct ice_pf * pf)4464 static int ice_init_eth(struct ice_pf *pf)
4465 {
4466 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4467 int err;
4468
4469 if (!vsi)
4470 return -EINVAL;
4471
4472 /* init channel list */
4473 INIT_LIST_HEAD(&vsi->ch_list);
4474
4475 err = ice_cfg_netdev(vsi);
4476 if (err)
4477 return err;
4478 /* Setup DCB netlink interface */
4479 ice_dcbnl_setup(vsi);
4480
4481 err = ice_init_mac_fltr(pf);
4482 if (err)
4483 goto err_init_mac_fltr;
4484
4485 err = ice_devlink_create_pf_port(pf);
4486 if (err)
4487 goto err_devlink_create_pf_port;
4488
4489 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4490
4491 err = ice_register_netdev(vsi);
4492 if (err)
4493 goto err_register_netdev;
4494
4495 err = ice_tc_indir_block_register(vsi);
4496 if (err)
4497 goto err_tc_indir_block_register;
4498
4499 ice_napi_add(vsi);
4500
4501 return 0;
4502
4503 err_tc_indir_block_register:
4504 ice_unregister_netdev(vsi);
4505 err_register_netdev:
4506 ice_devlink_destroy_pf_port(pf);
4507 err_devlink_create_pf_port:
4508 err_init_mac_fltr:
4509 ice_decfg_netdev(vsi);
4510 return err;
4511 }
4512
ice_deinit_eth(struct ice_pf * pf)4513 static void ice_deinit_eth(struct ice_pf *pf)
4514 {
4515 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4516
4517 if (!vsi)
4518 return;
4519
4520 ice_vsi_close(vsi);
4521 ice_unregister_netdev(vsi);
4522 ice_devlink_destroy_pf_port(pf);
4523 ice_tc_indir_block_unregister(vsi);
4524 ice_decfg_netdev(vsi);
4525 }
4526
4527 /**
4528 * ice_wait_for_fw - wait for full FW readiness
4529 * @hw: pointer to the hardware structure
4530 * @timeout: milliseconds that can elapse before timing out
4531 */
ice_wait_for_fw(struct ice_hw * hw,u32 timeout)4532 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4533 {
4534 int fw_loading;
4535 u32 elapsed = 0;
4536
4537 while (elapsed <= timeout) {
4538 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4539
4540 /* firmware was not yet loaded, we have to wait more */
4541 if (fw_loading) {
4542 elapsed += 100;
4543 msleep(100);
4544 continue;
4545 }
4546 return 0;
4547 }
4548
4549 return -ETIMEDOUT;
4550 }
4551
ice_init_dev(struct ice_pf * pf)4552 static int ice_init_dev(struct ice_pf *pf)
4553 {
4554 struct device *dev = ice_pf_to_dev(pf);
4555 struct ice_hw *hw = &pf->hw;
4556 int err;
4557
4558 err = ice_init_hw(hw);
4559 if (err) {
4560 dev_err(dev, "ice_init_hw failed: %d\n", err);
4561 return err;
4562 }
4563
4564 /* Some cards require longer initialization times
4565 * due to necessity of loading FW from an external source.
4566 * This can take even half a minute.
4567 */
4568 if (ice_is_pf_c827(hw)) {
4569 err = ice_wait_for_fw(hw, 30000);
4570 if (err) {
4571 dev_err(dev, "ice_wait_for_fw timed out");
4572 return err;
4573 }
4574 }
4575
4576 ice_init_feature_support(pf);
4577
4578 ice_request_fw(pf);
4579
4580 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4581 * set in pf->state, which will cause ice_is_safe_mode to return
4582 * true
4583 */
4584 if (ice_is_safe_mode(pf)) {
4585 /* we already got function/device capabilities but these don't
4586 * reflect what the driver needs to do in safe mode. Instead of
4587 * adding conditional logic everywhere to ignore these
4588 * device/function capabilities, override them.
4589 */
4590 ice_set_safe_mode_caps(hw);
4591 }
4592
4593 err = ice_init_pf(pf);
4594 if (err) {
4595 dev_err(dev, "ice_init_pf failed: %d\n", err);
4596 goto err_init_pf;
4597 }
4598
4599 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4600 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4601 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4602 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4603 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4604 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4605 pf->hw.tnl.valid_count[TNL_VXLAN];
4606 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4607 UDP_TUNNEL_TYPE_VXLAN;
4608 }
4609 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4610 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4611 pf->hw.tnl.valid_count[TNL_GENEVE];
4612 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4613 UDP_TUNNEL_TYPE_GENEVE;
4614 }
4615
4616 err = ice_init_interrupt_scheme(pf);
4617 if (err) {
4618 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4619 err = -EIO;
4620 goto err_init_interrupt_scheme;
4621 }
4622
4623 /* In case of MSIX we are going to setup the misc vector right here
4624 * to handle admin queue events etc. In case of legacy and MSI
4625 * the misc functionality and queue processing is combined in
4626 * the same vector and that gets setup at open.
4627 */
4628 err = ice_req_irq_msix_misc(pf);
4629 if (err) {
4630 dev_err(dev, "setup of misc vector failed: %d\n", err);
4631 goto err_req_irq_msix_misc;
4632 }
4633
4634 return 0;
4635
4636 err_req_irq_msix_misc:
4637 ice_clear_interrupt_scheme(pf);
4638 err_init_interrupt_scheme:
4639 ice_deinit_pf(pf);
4640 err_init_pf:
4641 ice_deinit_hw(hw);
4642 return err;
4643 }
4644
ice_deinit_dev(struct ice_pf * pf)4645 static void ice_deinit_dev(struct ice_pf *pf)
4646 {
4647 ice_free_irq_msix_misc(pf);
4648 ice_deinit_pf(pf);
4649 ice_deinit_hw(&pf->hw);
4650
4651 /* Service task is already stopped, so call reset directly. */
4652 ice_reset(&pf->hw, ICE_RESET_PFR);
4653 pci_wait_for_pending_transaction(pf->pdev);
4654 ice_clear_interrupt_scheme(pf);
4655 }
4656
ice_init_features(struct ice_pf * pf)4657 static void ice_init_features(struct ice_pf *pf)
4658 {
4659 struct device *dev = ice_pf_to_dev(pf);
4660
4661 if (ice_is_safe_mode(pf))
4662 return;
4663
4664 /* initialize DDP driven features */
4665 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4666 ice_ptp_init(pf);
4667
4668 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4669 ice_gnss_init(pf);
4670
4671 /* Note: Flow director init failure is non-fatal to load */
4672 if (ice_init_fdir(pf))
4673 dev_err(dev, "could not initialize flow director\n");
4674
4675 /* Note: DCB init failure is non-fatal to load */
4676 if (ice_init_pf_dcb(pf, false)) {
4677 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4678 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4679 } else {
4680 ice_cfg_lldp_mib_change(&pf->hw, true);
4681 }
4682
4683 if (ice_init_lag(pf))
4684 dev_warn(dev, "Failed to init link aggregation support\n");
4685 }
4686
ice_deinit_features(struct ice_pf * pf)4687 static void ice_deinit_features(struct ice_pf *pf)
4688 {
4689 if (ice_is_safe_mode(pf))
4690 return;
4691
4692 ice_deinit_lag(pf);
4693 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4694 ice_cfg_lldp_mib_change(&pf->hw, false);
4695 ice_deinit_fdir(pf);
4696 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4697 ice_gnss_exit(pf);
4698 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4699 ice_ptp_release(pf);
4700 }
4701
ice_init_wakeup(struct ice_pf * pf)4702 static void ice_init_wakeup(struct ice_pf *pf)
4703 {
4704 /* Save wakeup reason register for later use */
4705 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4706
4707 /* check for a power management event */
4708 ice_print_wake_reason(pf);
4709
4710 /* clear wake status, all bits */
4711 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4712
4713 /* Disable WoL at init, wait for user to enable */
4714 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4715 }
4716
ice_init_link(struct ice_pf * pf)4717 static int ice_init_link(struct ice_pf *pf)
4718 {
4719 struct device *dev = ice_pf_to_dev(pf);
4720 int err;
4721
4722 err = ice_init_link_events(pf->hw.port_info);
4723 if (err) {
4724 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4725 return err;
4726 }
4727
4728 /* not a fatal error if this fails */
4729 err = ice_init_nvm_phy_type(pf->hw.port_info);
4730 if (err)
4731 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4732
4733 /* not a fatal error if this fails */
4734 err = ice_update_link_info(pf->hw.port_info);
4735 if (err)
4736 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4737
4738 ice_init_link_dflt_override(pf->hw.port_info);
4739
4740 ice_check_link_cfg_err(pf,
4741 pf->hw.port_info->phy.link_info.link_cfg_err);
4742
4743 /* if media available, initialize PHY settings */
4744 if (pf->hw.port_info->phy.link_info.link_info &
4745 ICE_AQ_MEDIA_AVAILABLE) {
4746 /* not a fatal error if this fails */
4747 err = ice_init_phy_user_cfg(pf->hw.port_info);
4748 if (err)
4749 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4750
4751 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4752 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4753
4754 if (vsi)
4755 ice_configure_phy(vsi);
4756 }
4757 } else {
4758 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4759 }
4760
4761 return err;
4762 }
4763
ice_init_pf_sw(struct ice_pf * pf)4764 static int ice_init_pf_sw(struct ice_pf *pf)
4765 {
4766 bool dvm = ice_is_dvm_ena(&pf->hw);
4767 struct ice_vsi *vsi;
4768 int err;
4769
4770 /* create switch struct for the switch element created by FW on boot */
4771 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4772 if (!pf->first_sw)
4773 return -ENOMEM;
4774
4775 if (pf->hw.evb_veb)
4776 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4777 else
4778 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4779
4780 pf->first_sw->pf = pf;
4781
4782 /* record the sw_id available for later use */
4783 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4784
4785 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4786 if (err)
4787 goto err_aq_set_port_params;
4788
4789 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4790 if (!vsi) {
4791 err = -ENOMEM;
4792 goto err_pf_vsi_setup;
4793 }
4794
4795 return 0;
4796
4797 err_pf_vsi_setup:
4798 err_aq_set_port_params:
4799 kfree(pf->first_sw);
4800 return err;
4801 }
4802
ice_deinit_pf_sw(struct ice_pf * pf)4803 static void ice_deinit_pf_sw(struct ice_pf *pf)
4804 {
4805 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4806
4807 if (!vsi)
4808 return;
4809
4810 ice_vsi_release(vsi);
4811 kfree(pf->first_sw);
4812 }
4813
ice_alloc_vsis(struct ice_pf * pf)4814 static int ice_alloc_vsis(struct ice_pf *pf)
4815 {
4816 struct device *dev = ice_pf_to_dev(pf);
4817
4818 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4819 if (!pf->num_alloc_vsi)
4820 return -EIO;
4821
4822 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4823 dev_warn(dev,
4824 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4825 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4826 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4827 }
4828
4829 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4830 GFP_KERNEL);
4831 if (!pf->vsi)
4832 return -ENOMEM;
4833
4834 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4835 sizeof(*pf->vsi_stats), GFP_KERNEL);
4836 if (!pf->vsi_stats) {
4837 devm_kfree(dev, pf->vsi);
4838 return -ENOMEM;
4839 }
4840
4841 return 0;
4842 }
4843
ice_dealloc_vsis(struct ice_pf * pf)4844 static void ice_dealloc_vsis(struct ice_pf *pf)
4845 {
4846 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4847 pf->vsi_stats = NULL;
4848
4849 pf->num_alloc_vsi = 0;
4850 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4851 pf->vsi = NULL;
4852 }
4853
ice_init_devlink(struct ice_pf * pf)4854 static int ice_init_devlink(struct ice_pf *pf)
4855 {
4856 int err;
4857
4858 err = ice_devlink_register_params(pf);
4859 if (err)
4860 return err;
4861
4862 ice_devlink_init_regions(pf);
4863 ice_devlink_register(pf);
4864
4865 return 0;
4866 }
4867
ice_deinit_devlink(struct ice_pf * pf)4868 static void ice_deinit_devlink(struct ice_pf *pf)
4869 {
4870 ice_devlink_unregister(pf);
4871 ice_devlink_destroy_regions(pf);
4872 ice_devlink_unregister_params(pf);
4873 }
4874
ice_init(struct ice_pf * pf)4875 static int ice_init(struct ice_pf *pf)
4876 {
4877 int err;
4878
4879 err = ice_init_dev(pf);
4880 if (err)
4881 return err;
4882
4883 err = ice_alloc_vsis(pf);
4884 if (err)
4885 goto err_alloc_vsis;
4886
4887 err = ice_init_pf_sw(pf);
4888 if (err)
4889 goto err_init_pf_sw;
4890
4891 ice_init_wakeup(pf);
4892
4893 err = ice_init_link(pf);
4894 if (err)
4895 goto err_init_link;
4896
4897 err = ice_send_version(pf);
4898 if (err)
4899 goto err_init_link;
4900
4901 ice_verify_cacheline_size(pf);
4902
4903 if (ice_is_safe_mode(pf))
4904 ice_set_safe_mode_vlan_cfg(pf);
4905 else
4906 /* print PCI link speed and width */
4907 pcie_print_link_status(pf->pdev);
4908
4909 /* ready to go, so clear down state bit */
4910 clear_bit(ICE_DOWN, pf->state);
4911 clear_bit(ICE_SERVICE_DIS, pf->state);
4912
4913 /* since everything is good, start the service timer */
4914 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4915
4916 return 0;
4917
4918 err_init_link:
4919 ice_deinit_pf_sw(pf);
4920 err_init_pf_sw:
4921 ice_dealloc_vsis(pf);
4922 err_alloc_vsis:
4923 ice_deinit_dev(pf);
4924 return err;
4925 }
4926
ice_deinit(struct ice_pf * pf)4927 static void ice_deinit(struct ice_pf *pf)
4928 {
4929 set_bit(ICE_SERVICE_DIS, pf->state);
4930 set_bit(ICE_DOWN, pf->state);
4931
4932 ice_deinit_pf_sw(pf);
4933 ice_dealloc_vsis(pf);
4934 ice_deinit_dev(pf);
4935 }
4936
4937 /**
4938 * ice_load - load pf by init hw and starting VSI
4939 * @pf: pointer to the pf instance
4940 */
ice_load(struct ice_pf * pf)4941 int ice_load(struct ice_pf *pf)
4942 {
4943 struct ice_vsi_cfg_params params = {};
4944 struct ice_vsi *vsi;
4945 int err;
4946
4947 err = ice_init_dev(pf);
4948 if (err)
4949 return err;
4950
4951 vsi = ice_get_main_vsi(pf);
4952
4953 params = ice_vsi_to_params(vsi);
4954 params.flags = ICE_VSI_FLAG_INIT;
4955
4956 rtnl_lock();
4957 err = ice_vsi_cfg(vsi, ¶ms);
4958 if (err)
4959 goto err_vsi_cfg;
4960
4961 err = ice_start_eth(ice_get_main_vsi(pf));
4962 if (err)
4963 goto err_start_eth;
4964 rtnl_unlock();
4965
4966 err = ice_init_rdma(pf);
4967 if (err)
4968 goto err_init_rdma;
4969
4970 ice_init_features(pf);
4971 ice_service_task_restart(pf);
4972
4973 clear_bit(ICE_DOWN, pf->state);
4974
4975 return 0;
4976
4977 err_init_rdma:
4978 ice_vsi_close(ice_get_main_vsi(pf));
4979 rtnl_lock();
4980 err_start_eth:
4981 ice_vsi_decfg(ice_get_main_vsi(pf));
4982 err_vsi_cfg:
4983 rtnl_unlock();
4984 ice_deinit_dev(pf);
4985 return err;
4986 }
4987
4988 /**
4989 * ice_unload - unload pf by stopping VSI and deinit hw
4990 * @pf: pointer to the pf instance
4991 */
ice_unload(struct ice_pf * pf)4992 void ice_unload(struct ice_pf *pf)
4993 {
4994 ice_deinit_features(pf);
4995 ice_deinit_rdma(pf);
4996 rtnl_lock();
4997 ice_stop_eth(ice_get_main_vsi(pf));
4998 ice_vsi_decfg(ice_get_main_vsi(pf));
4999 rtnl_unlock();
5000 ice_deinit_dev(pf);
5001 }
5002
5003 /**
5004 * ice_probe - Device initialization routine
5005 * @pdev: PCI device information struct
5006 * @ent: entry in ice_pci_tbl
5007 *
5008 * Returns 0 on success, negative on failure
5009 */
5010 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5011 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5012 {
5013 struct device *dev = &pdev->dev;
5014 struct ice_pf *pf;
5015 struct ice_hw *hw;
5016 int err;
5017
5018 if (pdev->is_virtfn) {
5019 dev_err(dev, "can't probe a virtual function\n");
5020 return -EINVAL;
5021 }
5022
5023 /* when under a kdump kernel initiate a reset before enabling the
5024 * device in order to clear out any pending DMA transactions. These
5025 * transactions can cause some systems to machine check when doing
5026 * the pcim_enable_device() below.
5027 */
5028 if (is_kdump_kernel()) {
5029 pci_save_state(pdev);
5030 pci_clear_master(pdev);
5031 err = pcie_flr(pdev);
5032 if (err)
5033 return err;
5034 pci_restore_state(pdev);
5035 }
5036
5037 /* this driver uses devres, see
5038 * Documentation/driver-api/driver-model/devres.rst
5039 */
5040 err = pcim_enable_device(pdev);
5041 if (err)
5042 return err;
5043
5044 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5045 if (err) {
5046 dev_err(dev, "BAR0 I/O map error %d\n", err);
5047 return err;
5048 }
5049
5050 pf = ice_allocate_pf(dev);
5051 if (!pf)
5052 return -ENOMEM;
5053
5054 /* initialize Auxiliary index to invalid value */
5055 pf->aux_idx = -1;
5056
5057 /* set up for high or low DMA */
5058 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5059 if (err) {
5060 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5061 return err;
5062 }
5063
5064 pci_set_master(pdev);
5065
5066 pf->pdev = pdev;
5067 pci_set_drvdata(pdev, pf);
5068 set_bit(ICE_DOWN, pf->state);
5069 /* Disable service task until DOWN bit is cleared */
5070 set_bit(ICE_SERVICE_DIS, pf->state);
5071
5072 hw = &pf->hw;
5073 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5074 pci_save_state(pdev);
5075
5076 hw->back = pf;
5077 hw->port_info = NULL;
5078 hw->vendor_id = pdev->vendor;
5079 hw->device_id = pdev->device;
5080 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5081 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5082 hw->subsystem_device_id = pdev->subsystem_device;
5083 hw->bus.device = PCI_SLOT(pdev->devfn);
5084 hw->bus.func = PCI_FUNC(pdev->devfn);
5085 ice_set_ctrlq_len(hw);
5086
5087 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5088
5089 #ifndef CONFIG_DYNAMIC_DEBUG
5090 if (debug < -1)
5091 hw->debug_mask = debug;
5092 #endif
5093
5094 err = ice_init(pf);
5095 if (err)
5096 goto err_init;
5097
5098 err = ice_init_eth(pf);
5099 if (err)
5100 goto err_init_eth;
5101
5102 err = ice_init_rdma(pf);
5103 if (err)
5104 goto err_init_rdma;
5105
5106 err = ice_init_devlink(pf);
5107 if (err)
5108 goto err_init_devlink;
5109
5110 ice_init_features(pf);
5111
5112 return 0;
5113
5114 err_init_devlink:
5115 ice_deinit_rdma(pf);
5116 err_init_rdma:
5117 ice_deinit_eth(pf);
5118 err_init_eth:
5119 ice_deinit(pf);
5120 err_init:
5121 pci_disable_device(pdev);
5122 return err;
5123 }
5124
5125 /**
5126 * ice_set_wake - enable or disable Wake on LAN
5127 * @pf: pointer to the PF struct
5128 *
5129 * Simple helper for WoL control
5130 */
ice_set_wake(struct ice_pf * pf)5131 static void ice_set_wake(struct ice_pf *pf)
5132 {
5133 struct ice_hw *hw = &pf->hw;
5134 bool wol = pf->wol_ena;
5135
5136 /* clear wake state, otherwise new wake events won't fire */
5137 wr32(hw, PFPM_WUS, U32_MAX);
5138
5139 /* enable / disable APM wake up, no RMW needed */
5140 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5141
5142 /* set magic packet filter enabled */
5143 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5144 }
5145
5146 /**
5147 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5148 * @pf: pointer to the PF struct
5149 *
5150 * Issue firmware command to enable multicast magic wake, making
5151 * sure that any locally administered address (LAA) is used for
5152 * wake, and that PF reset doesn't undo the LAA.
5153 */
ice_setup_mc_magic_wake(struct ice_pf * pf)5154 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5155 {
5156 struct device *dev = ice_pf_to_dev(pf);
5157 struct ice_hw *hw = &pf->hw;
5158 u8 mac_addr[ETH_ALEN];
5159 struct ice_vsi *vsi;
5160 int status;
5161 u8 flags;
5162
5163 if (!pf->wol_ena)
5164 return;
5165
5166 vsi = ice_get_main_vsi(pf);
5167 if (!vsi)
5168 return;
5169
5170 /* Get current MAC address in case it's an LAA */
5171 if (vsi->netdev)
5172 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5173 else
5174 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5175
5176 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5177 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5178 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5179
5180 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5181 if (status)
5182 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5183 status, ice_aq_str(hw->adminq.sq_last_status));
5184 }
5185
5186 /**
5187 * ice_remove - Device removal routine
5188 * @pdev: PCI device information struct
5189 */
ice_remove(struct pci_dev * pdev)5190 static void ice_remove(struct pci_dev *pdev)
5191 {
5192 struct ice_pf *pf = pci_get_drvdata(pdev);
5193 int i;
5194
5195 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5196 if (!ice_is_reset_in_progress(pf->state))
5197 break;
5198 msleep(100);
5199 }
5200
5201 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5202 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5203 ice_free_vfs(pf);
5204 }
5205
5206 ice_service_task_stop(pf);
5207 ice_aq_cancel_waiting_tasks(pf);
5208 set_bit(ICE_DOWN, pf->state);
5209
5210 if (!ice_is_safe_mode(pf))
5211 ice_remove_arfs(pf);
5212 ice_deinit_features(pf);
5213 ice_deinit_devlink(pf);
5214 ice_deinit_rdma(pf);
5215 ice_deinit_eth(pf);
5216 ice_deinit(pf);
5217
5218 ice_vsi_release_all(pf);
5219
5220 ice_setup_mc_magic_wake(pf);
5221 ice_set_wake(pf);
5222
5223 pci_disable_device(pdev);
5224 }
5225
5226 /**
5227 * ice_shutdown - PCI callback for shutting down device
5228 * @pdev: PCI device information struct
5229 */
ice_shutdown(struct pci_dev * pdev)5230 static void ice_shutdown(struct pci_dev *pdev)
5231 {
5232 struct ice_pf *pf = pci_get_drvdata(pdev);
5233
5234 ice_remove(pdev);
5235
5236 if (system_state == SYSTEM_POWER_OFF) {
5237 pci_wake_from_d3(pdev, pf->wol_ena);
5238 pci_set_power_state(pdev, PCI_D3hot);
5239 }
5240 }
5241
5242 #ifdef CONFIG_PM
5243 /**
5244 * ice_prepare_for_shutdown - prep for PCI shutdown
5245 * @pf: board private structure
5246 *
5247 * Inform or close all dependent features in prep for PCI device shutdown
5248 */
ice_prepare_for_shutdown(struct ice_pf * pf)5249 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5250 {
5251 struct ice_hw *hw = &pf->hw;
5252 u32 v;
5253
5254 /* Notify VFs of impending reset */
5255 if (ice_check_sq_alive(hw, &hw->mailboxq))
5256 ice_vc_notify_reset(pf);
5257
5258 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5259
5260 /* disable the VSIs and their queues that are not already DOWN */
5261 ice_pf_dis_all_vsi(pf, false);
5262
5263 ice_for_each_vsi(pf, v)
5264 if (pf->vsi[v])
5265 pf->vsi[v]->vsi_num = 0;
5266
5267 ice_shutdown_all_ctrlq(hw);
5268 }
5269
5270 /**
5271 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5272 * @pf: board private structure to reinitialize
5273 *
5274 * This routine reinitialize interrupt scheme that was cleared during
5275 * power management suspend callback.
5276 *
5277 * This should be called during resume routine to re-allocate the q_vectors
5278 * and reacquire interrupts.
5279 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5280 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5281 {
5282 struct device *dev = ice_pf_to_dev(pf);
5283 int ret, v;
5284
5285 /* Since we clear MSIX flag during suspend, we need to
5286 * set it back during resume...
5287 */
5288
5289 ret = ice_init_interrupt_scheme(pf);
5290 if (ret) {
5291 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5292 return ret;
5293 }
5294
5295 /* Remap vectors and rings, after successful re-init interrupts */
5296 ice_for_each_vsi(pf, v) {
5297 if (!pf->vsi[v])
5298 continue;
5299
5300 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5301 if (ret)
5302 goto err_reinit;
5303 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5304 }
5305
5306 ret = ice_req_irq_msix_misc(pf);
5307 if (ret) {
5308 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5309 ret);
5310 goto err_reinit;
5311 }
5312
5313 return 0;
5314
5315 err_reinit:
5316 while (v--)
5317 if (pf->vsi[v])
5318 ice_vsi_free_q_vectors(pf->vsi[v]);
5319
5320 return ret;
5321 }
5322
5323 /**
5324 * ice_suspend
5325 * @dev: generic device information structure
5326 *
5327 * Power Management callback to quiesce the device and prepare
5328 * for D3 transition.
5329 */
ice_suspend(struct device * dev)5330 static int __maybe_unused ice_suspend(struct device *dev)
5331 {
5332 struct pci_dev *pdev = to_pci_dev(dev);
5333 struct ice_pf *pf;
5334 int disabled, v;
5335
5336 pf = pci_get_drvdata(pdev);
5337
5338 if (!ice_pf_state_is_nominal(pf)) {
5339 dev_err(dev, "Device is not ready, no need to suspend it\n");
5340 return -EBUSY;
5341 }
5342
5343 /* Stop watchdog tasks until resume completion.
5344 * Even though it is most likely that the service task is
5345 * disabled if the device is suspended or down, the service task's
5346 * state is controlled by a different state bit, and we should
5347 * store and honor whatever state that bit is in at this point.
5348 */
5349 disabled = ice_service_task_stop(pf);
5350
5351 ice_deinit_rdma(pf);
5352
5353 /* Already suspended?, then there is nothing to do */
5354 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5355 if (!disabled)
5356 ice_service_task_restart(pf);
5357 return 0;
5358 }
5359
5360 if (test_bit(ICE_DOWN, pf->state) ||
5361 ice_is_reset_in_progress(pf->state)) {
5362 dev_err(dev, "can't suspend device in reset or already down\n");
5363 if (!disabled)
5364 ice_service_task_restart(pf);
5365 return 0;
5366 }
5367
5368 ice_setup_mc_magic_wake(pf);
5369
5370 ice_prepare_for_shutdown(pf);
5371
5372 ice_set_wake(pf);
5373
5374 /* Free vectors, clear the interrupt scheme and release IRQs
5375 * for proper hibernation, especially with large number of CPUs.
5376 * Otherwise hibernation might fail when mapping all the vectors back
5377 * to CPU0.
5378 */
5379 ice_free_irq_msix_misc(pf);
5380 ice_for_each_vsi(pf, v) {
5381 if (!pf->vsi[v])
5382 continue;
5383 ice_vsi_free_q_vectors(pf->vsi[v]);
5384 }
5385 ice_clear_interrupt_scheme(pf);
5386
5387 pci_save_state(pdev);
5388 pci_wake_from_d3(pdev, pf->wol_ena);
5389 pci_set_power_state(pdev, PCI_D3hot);
5390 return 0;
5391 }
5392
5393 /**
5394 * ice_resume - PM callback for waking up from D3
5395 * @dev: generic device information structure
5396 */
ice_resume(struct device * dev)5397 static int __maybe_unused ice_resume(struct device *dev)
5398 {
5399 struct pci_dev *pdev = to_pci_dev(dev);
5400 enum ice_reset_req reset_type;
5401 struct ice_pf *pf;
5402 struct ice_hw *hw;
5403 int ret;
5404
5405 pci_set_power_state(pdev, PCI_D0);
5406 pci_restore_state(pdev);
5407 pci_save_state(pdev);
5408
5409 if (!pci_device_is_present(pdev))
5410 return -ENODEV;
5411
5412 ret = pci_enable_device_mem(pdev);
5413 if (ret) {
5414 dev_err(dev, "Cannot enable device after suspend\n");
5415 return ret;
5416 }
5417
5418 pf = pci_get_drvdata(pdev);
5419 hw = &pf->hw;
5420
5421 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5422 ice_print_wake_reason(pf);
5423
5424 /* We cleared the interrupt scheme when we suspended, so we need to
5425 * restore it now to resume device functionality.
5426 */
5427 ret = ice_reinit_interrupt_scheme(pf);
5428 if (ret)
5429 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5430
5431 ret = ice_init_rdma(pf);
5432 if (ret)
5433 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5434 ret);
5435
5436 clear_bit(ICE_DOWN, pf->state);
5437 /* Now perform PF reset and rebuild */
5438 reset_type = ICE_RESET_PFR;
5439 /* re-enable service task for reset, but allow reset to schedule it */
5440 clear_bit(ICE_SERVICE_DIS, pf->state);
5441
5442 if (ice_schedule_reset(pf, reset_type))
5443 dev_err(dev, "Reset during resume failed.\n");
5444
5445 clear_bit(ICE_SUSPENDED, pf->state);
5446 ice_service_task_restart(pf);
5447
5448 /* Restart the service task */
5449 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5450
5451 return 0;
5452 }
5453 #endif /* CONFIG_PM */
5454
5455 /**
5456 * ice_pci_err_detected - warning that PCI error has been detected
5457 * @pdev: PCI device information struct
5458 * @err: the type of PCI error
5459 *
5460 * Called to warn that something happened on the PCI bus and the error handling
5461 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5462 */
5463 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5464 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5465 {
5466 struct ice_pf *pf = pci_get_drvdata(pdev);
5467
5468 if (!pf) {
5469 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5470 __func__, err);
5471 return PCI_ERS_RESULT_DISCONNECT;
5472 }
5473
5474 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5475 ice_service_task_stop(pf);
5476
5477 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5478 set_bit(ICE_PFR_REQ, pf->state);
5479 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5480 }
5481 }
5482
5483 return PCI_ERS_RESULT_NEED_RESET;
5484 }
5485
5486 /**
5487 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5488 * @pdev: PCI device information struct
5489 *
5490 * Called to determine if the driver can recover from the PCI slot reset by
5491 * using a register read to determine if the device is recoverable.
5492 */
ice_pci_err_slot_reset(struct pci_dev * pdev)5493 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5494 {
5495 struct ice_pf *pf = pci_get_drvdata(pdev);
5496 pci_ers_result_t result;
5497 int err;
5498 u32 reg;
5499
5500 err = pci_enable_device_mem(pdev);
5501 if (err) {
5502 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5503 err);
5504 result = PCI_ERS_RESULT_DISCONNECT;
5505 } else {
5506 pci_set_master(pdev);
5507 pci_restore_state(pdev);
5508 pci_save_state(pdev);
5509 pci_wake_from_d3(pdev, false);
5510
5511 /* Check for life */
5512 reg = rd32(&pf->hw, GLGEN_RTRIG);
5513 if (!reg)
5514 result = PCI_ERS_RESULT_RECOVERED;
5515 else
5516 result = PCI_ERS_RESULT_DISCONNECT;
5517 }
5518
5519 return result;
5520 }
5521
5522 /**
5523 * ice_pci_err_resume - restart operations after PCI error recovery
5524 * @pdev: PCI device information struct
5525 *
5526 * Called to allow the driver to bring things back up after PCI error and/or
5527 * reset recovery have finished
5528 */
ice_pci_err_resume(struct pci_dev * pdev)5529 static void ice_pci_err_resume(struct pci_dev *pdev)
5530 {
5531 struct ice_pf *pf = pci_get_drvdata(pdev);
5532
5533 if (!pf) {
5534 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5535 __func__);
5536 return;
5537 }
5538
5539 if (test_bit(ICE_SUSPENDED, pf->state)) {
5540 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5541 __func__);
5542 return;
5543 }
5544
5545 ice_restore_all_vfs_msi_state(pdev);
5546
5547 ice_do_reset(pf, ICE_RESET_PFR);
5548 ice_service_task_restart(pf);
5549 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5550 }
5551
5552 /**
5553 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5554 * @pdev: PCI device information struct
5555 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5556 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5557 {
5558 struct ice_pf *pf = pci_get_drvdata(pdev);
5559
5560 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5561 ice_service_task_stop(pf);
5562
5563 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5564 set_bit(ICE_PFR_REQ, pf->state);
5565 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5566 }
5567 }
5568 }
5569
5570 /**
5571 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5572 * @pdev: PCI device information struct
5573 */
ice_pci_err_reset_done(struct pci_dev * pdev)5574 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5575 {
5576 ice_pci_err_resume(pdev);
5577 }
5578
5579 /* ice_pci_tbl - PCI Device ID Table
5580 *
5581 * Wildcard entries (PCI_ANY_ID) should come last
5582 * Last entry must be all 0s
5583 *
5584 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5585 * Class, Class Mask, private data (not used) }
5586 */
5587 static const struct pci_device_id ice_pci_tbl[] = {
5588 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5589 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5590 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5591 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5592 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5593 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5594 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5595 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5596 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5597 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5598 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5599 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5600 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5601 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5602 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5603 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5604 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5605 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5606 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5607 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5608 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5609 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5610 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5611 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5612 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5613 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5614 /* required last entry */
5615 { 0, }
5616 };
5617 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5618
5619 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5620
5621 static const struct pci_error_handlers ice_pci_err_handler = {
5622 .error_detected = ice_pci_err_detected,
5623 .slot_reset = ice_pci_err_slot_reset,
5624 .reset_prepare = ice_pci_err_reset_prepare,
5625 .reset_done = ice_pci_err_reset_done,
5626 .resume = ice_pci_err_resume
5627 };
5628
5629 static struct pci_driver ice_driver = {
5630 .name = KBUILD_MODNAME,
5631 .id_table = ice_pci_tbl,
5632 .probe = ice_probe,
5633 .remove = ice_remove,
5634 #ifdef CONFIG_PM
5635 .driver.pm = &ice_pm_ops,
5636 #endif /* CONFIG_PM */
5637 .shutdown = ice_shutdown,
5638 .sriov_configure = ice_sriov_configure,
5639 .err_handler = &ice_pci_err_handler
5640 };
5641
5642 /**
5643 * ice_module_init - Driver registration routine
5644 *
5645 * ice_module_init is the first routine called when the driver is
5646 * loaded. All it does is register with the PCI subsystem.
5647 */
ice_module_init(void)5648 static int __init ice_module_init(void)
5649 {
5650 int status = -ENOMEM;
5651
5652 pr_info("%s\n", ice_driver_string);
5653 pr_info("%s\n", ice_copyright);
5654
5655 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5656 if (!ice_wq) {
5657 pr_err("Failed to create workqueue\n");
5658 return status;
5659 }
5660
5661 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5662 if (!ice_lag_wq) {
5663 pr_err("Failed to create LAG workqueue\n");
5664 goto err_dest_wq;
5665 }
5666
5667 status = pci_register_driver(&ice_driver);
5668 if (status) {
5669 pr_err("failed to register PCI driver, err %d\n", status);
5670 goto err_dest_lag_wq;
5671 }
5672
5673 return 0;
5674
5675 err_dest_lag_wq:
5676 destroy_workqueue(ice_lag_wq);
5677 err_dest_wq:
5678 destroy_workqueue(ice_wq);
5679 return status;
5680 }
5681 module_init(ice_module_init);
5682
5683 /**
5684 * ice_module_exit - Driver exit cleanup routine
5685 *
5686 * ice_module_exit is called just before the driver is removed
5687 * from memory.
5688 */
ice_module_exit(void)5689 static void __exit ice_module_exit(void)
5690 {
5691 pci_unregister_driver(&ice_driver);
5692 destroy_workqueue(ice_wq);
5693 destroy_workqueue(ice_lag_wq);
5694 pr_info("module unloaded\n");
5695 }
5696 module_exit(ice_module_exit);
5697
5698 /**
5699 * ice_set_mac_address - NDO callback to set MAC address
5700 * @netdev: network interface device structure
5701 * @pi: pointer to an address structure
5702 *
5703 * Returns 0 on success, negative on failure
5704 */
ice_set_mac_address(struct net_device * netdev,void * pi)5705 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5706 {
5707 struct ice_netdev_priv *np = netdev_priv(netdev);
5708 struct ice_vsi *vsi = np->vsi;
5709 struct ice_pf *pf = vsi->back;
5710 struct ice_hw *hw = &pf->hw;
5711 struct sockaddr *addr = pi;
5712 u8 old_mac[ETH_ALEN];
5713 u8 flags = 0;
5714 u8 *mac;
5715 int err;
5716
5717 mac = (u8 *)addr->sa_data;
5718
5719 if (!is_valid_ether_addr(mac))
5720 return -EADDRNOTAVAIL;
5721
5722 if (test_bit(ICE_DOWN, pf->state) ||
5723 ice_is_reset_in_progress(pf->state)) {
5724 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5725 mac);
5726 return -EBUSY;
5727 }
5728
5729 if (ice_chnl_dmac_fltr_cnt(pf)) {
5730 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5731 mac);
5732 return -EAGAIN;
5733 }
5734
5735 netif_addr_lock_bh(netdev);
5736 ether_addr_copy(old_mac, netdev->dev_addr);
5737 /* change the netdev's MAC address */
5738 eth_hw_addr_set(netdev, mac);
5739 netif_addr_unlock_bh(netdev);
5740
5741 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5742 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5743 if (err && err != -ENOENT) {
5744 err = -EADDRNOTAVAIL;
5745 goto err_update_filters;
5746 }
5747
5748 /* Add filter for new MAC. If filter exists, return success */
5749 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5750 if (err == -EEXIST) {
5751 /* Although this MAC filter is already present in hardware it's
5752 * possible in some cases (e.g. bonding) that dev_addr was
5753 * modified outside of the driver and needs to be restored back
5754 * to this value.
5755 */
5756 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5757
5758 return 0;
5759 } else if (err) {
5760 /* error if the new filter addition failed */
5761 err = -EADDRNOTAVAIL;
5762 }
5763
5764 err_update_filters:
5765 if (err) {
5766 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5767 mac);
5768 netif_addr_lock_bh(netdev);
5769 eth_hw_addr_set(netdev, old_mac);
5770 netif_addr_unlock_bh(netdev);
5771 return err;
5772 }
5773
5774 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5775 netdev->dev_addr);
5776
5777 /* write new MAC address to the firmware */
5778 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5779 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5780 if (err) {
5781 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5782 mac, err);
5783 }
5784 return 0;
5785 }
5786
5787 /**
5788 * ice_set_rx_mode - NDO callback to set the netdev filters
5789 * @netdev: network interface device structure
5790 */
ice_set_rx_mode(struct net_device * netdev)5791 static void ice_set_rx_mode(struct net_device *netdev)
5792 {
5793 struct ice_netdev_priv *np = netdev_priv(netdev);
5794 struct ice_vsi *vsi = np->vsi;
5795
5796 if (!vsi || ice_is_switchdev_running(vsi->back))
5797 return;
5798
5799 /* Set the flags to synchronize filters
5800 * ndo_set_rx_mode may be triggered even without a change in netdev
5801 * flags
5802 */
5803 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5804 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5805 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5806
5807 /* schedule our worker thread which will take care of
5808 * applying the new filter changes
5809 */
5810 ice_service_task_schedule(vsi->back);
5811 }
5812
5813 /**
5814 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5815 * @netdev: network interface device structure
5816 * @queue_index: Queue ID
5817 * @maxrate: maximum bandwidth in Mbps
5818 */
5819 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)5820 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5821 {
5822 struct ice_netdev_priv *np = netdev_priv(netdev);
5823 struct ice_vsi *vsi = np->vsi;
5824 u16 q_handle;
5825 int status;
5826 u8 tc;
5827
5828 /* Validate maxrate requested is within permitted range */
5829 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5830 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5831 maxrate, queue_index);
5832 return -EINVAL;
5833 }
5834
5835 q_handle = vsi->tx_rings[queue_index]->q_handle;
5836 tc = ice_dcb_get_tc(vsi, queue_index);
5837
5838 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5839 if (!vsi) {
5840 netdev_err(netdev, "Invalid VSI for given queue %d\n",
5841 queue_index);
5842 return -EINVAL;
5843 }
5844
5845 /* Set BW back to default, when user set maxrate to 0 */
5846 if (!maxrate)
5847 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5848 q_handle, ICE_MAX_BW);
5849 else
5850 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5851 q_handle, ICE_MAX_BW, maxrate * 1000);
5852 if (status)
5853 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5854 status);
5855
5856 return status;
5857 }
5858
5859 /**
5860 * ice_fdb_add - add an entry to the hardware database
5861 * @ndm: the input from the stack
5862 * @tb: pointer to array of nladdr (unused)
5863 * @dev: the net device pointer
5864 * @addr: the MAC address entry being added
5865 * @vid: VLAN ID
5866 * @flags: instructions from stack about fdb operation
5867 * @extack: netlink extended ack
5868 */
5869 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)5870 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5871 struct net_device *dev, const unsigned char *addr, u16 vid,
5872 u16 flags, struct netlink_ext_ack __always_unused *extack)
5873 {
5874 int err;
5875
5876 if (vid) {
5877 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5878 return -EINVAL;
5879 }
5880 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5881 netdev_err(dev, "FDB only supports static addresses\n");
5882 return -EINVAL;
5883 }
5884
5885 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5886 err = dev_uc_add_excl(dev, addr);
5887 else if (is_multicast_ether_addr(addr))
5888 err = dev_mc_add_excl(dev, addr);
5889 else
5890 err = -EINVAL;
5891
5892 /* Only return duplicate errors if NLM_F_EXCL is set */
5893 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5894 err = 0;
5895
5896 return err;
5897 }
5898
5899 /**
5900 * ice_fdb_del - delete an entry from the hardware database
5901 * @ndm: the input from the stack
5902 * @tb: pointer to array of nladdr (unused)
5903 * @dev: the net device pointer
5904 * @addr: the MAC address entry being added
5905 * @vid: VLAN ID
5906 * @extack: netlink extended ack
5907 */
5908 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,struct netlink_ext_ack * extack)5909 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5910 struct net_device *dev, const unsigned char *addr,
5911 __always_unused u16 vid, struct netlink_ext_ack *extack)
5912 {
5913 int err;
5914
5915 if (ndm->ndm_state & NUD_PERMANENT) {
5916 netdev_err(dev, "FDB only supports static addresses\n");
5917 return -EINVAL;
5918 }
5919
5920 if (is_unicast_ether_addr(addr))
5921 err = dev_uc_del(dev, addr);
5922 else if (is_multicast_ether_addr(addr))
5923 err = dev_mc_del(dev, addr);
5924 else
5925 err = -EINVAL;
5926
5927 return err;
5928 }
5929
5930 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5931 NETIF_F_HW_VLAN_CTAG_TX | \
5932 NETIF_F_HW_VLAN_STAG_RX | \
5933 NETIF_F_HW_VLAN_STAG_TX)
5934
5935 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5936 NETIF_F_HW_VLAN_STAG_RX)
5937
5938 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5939 NETIF_F_HW_VLAN_STAG_FILTER)
5940
5941 /**
5942 * ice_fix_features - fix the netdev features flags based on device limitations
5943 * @netdev: ptr to the netdev that flags are being fixed on
5944 * @features: features that need to be checked and possibly fixed
5945 *
5946 * Make sure any fixups are made to features in this callback. This enables the
5947 * driver to not have to check unsupported configurations throughout the driver
5948 * because that's the responsiblity of this callback.
5949 *
5950 * Single VLAN Mode (SVM) Supported Features:
5951 * NETIF_F_HW_VLAN_CTAG_FILTER
5952 * NETIF_F_HW_VLAN_CTAG_RX
5953 * NETIF_F_HW_VLAN_CTAG_TX
5954 *
5955 * Double VLAN Mode (DVM) Supported Features:
5956 * NETIF_F_HW_VLAN_CTAG_FILTER
5957 * NETIF_F_HW_VLAN_CTAG_RX
5958 * NETIF_F_HW_VLAN_CTAG_TX
5959 *
5960 * NETIF_F_HW_VLAN_STAG_FILTER
5961 * NETIF_HW_VLAN_STAG_RX
5962 * NETIF_HW_VLAN_STAG_TX
5963 *
5964 * Features that need fixing:
5965 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5966 * These are mutually exlusive as the VSI context cannot support multiple
5967 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5968 * is not done, then default to clearing the requested STAG offload
5969 * settings.
5970 *
5971 * All supported filtering has to be enabled or disabled together. For
5972 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5973 * together. If this is not done, then default to VLAN filtering disabled.
5974 * These are mutually exclusive as there is currently no way to
5975 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5976 * prune rules.
5977 */
5978 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)5979 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5980 {
5981 struct ice_netdev_priv *np = netdev_priv(netdev);
5982 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5983 bool cur_ctag, cur_stag, req_ctag, req_stag;
5984
5985 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5986 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5987 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5988
5989 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5990 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5991 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5992
5993 if (req_vlan_fltr != cur_vlan_fltr) {
5994 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5995 if (req_ctag && req_stag) {
5996 features |= NETIF_VLAN_FILTERING_FEATURES;
5997 } else if (!req_ctag && !req_stag) {
5998 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5999 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6000 (!cur_stag && req_stag && !cur_ctag)) {
6001 features |= NETIF_VLAN_FILTERING_FEATURES;
6002 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6003 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6004 (cur_stag && !req_stag && cur_ctag)) {
6005 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6006 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6007 }
6008 } else {
6009 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6010 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6011
6012 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6013 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6014 }
6015 }
6016
6017 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6018 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6019 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6020 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6021 NETIF_F_HW_VLAN_STAG_TX);
6022 }
6023
6024 if (!(netdev->features & NETIF_F_RXFCS) &&
6025 (features & NETIF_F_RXFCS) &&
6026 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6027 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6028 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6029 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6030 }
6031
6032 return features;
6033 }
6034
6035 /**
6036 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6037 * @vsi: PF's VSI
6038 * @features: features used to determine VLAN offload settings
6039 *
6040 * First, determine the vlan_ethertype based on the VLAN offload bits in
6041 * features. Then determine if stripping and insertion should be enabled or
6042 * disabled. Finally enable or disable VLAN stripping and insertion.
6043 */
6044 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6045 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6046 {
6047 bool enable_stripping = true, enable_insertion = true;
6048 struct ice_vsi_vlan_ops *vlan_ops;
6049 int strip_err = 0, insert_err = 0;
6050 u16 vlan_ethertype = 0;
6051
6052 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6053
6054 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6055 vlan_ethertype = ETH_P_8021AD;
6056 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6057 vlan_ethertype = ETH_P_8021Q;
6058
6059 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6060 enable_stripping = false;
6061 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6062 enable_insertion = false;
6063
6064 if (enable_stripping)
6065 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6066 else
6067 strip_err = vlan_ops->dis_stripping(vsi);
6068
6069 if (enable_insertion)
6070 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6071 else
6072 insert_err = vlan_ops->dis_insertion(vsi);
6073
6074 if (strip_err || insert_err)
6075 return -EIO;
6076
6077 return 0;
6078 }
6079
6080 /**
6081 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6082 * @vsi: PF's VSI
6083 * @features: features used to determine VLAN filtering settings
6084 *
6085 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6086 * features.
6087 */
6088 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6089 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6090 {
6091 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6092 int err = 0;
6093
6094 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6095 * if either bit is set
6096 */
6097 if (features &
6098 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6099 err = vlan_ops->ena_rx_filtering(vsi);
6100 else
6101 err = vlan_ops->dis_rx_filtering(vsi);
6102
6103 return err;
6104 }
6105
6106 /**
6107 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6108 * @netdev: ptr to the netdev being adjusted
6109 * @features: the feature set that the stack is suggesting
6110 *
6111 * Only update VLAN settings if the requested_vlan_features are different than
6112 * the current_vlan_features.
6113 */
6114 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6115 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6116 {
6117 netdev_features_t current_vlan_features, requested_vlan_features;
6118 struct ice_netdev_priv *np = netdev_priv(netdev);
6119 struct ice_vsi *vsi = np->vsi;
6120 int err;
6121
6122 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6123 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6124 if (current_vlan_features ^ requested_vlan_features) {
6125 if ((features & NETIF_F_RXFCS) &&
6126 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6127 dev_err(ice_pf_to_dev(vsi->back),
6128 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6129 return -EIO;
6130 }
6131
6132 err = ice_set_vlan_offload_features(vsi, features);
6133 if (err)
6134 return err;
6135 }
6136
6137 current_vlan_features = netdev->features &
6138 NETIF_VLAN_FILTERING_FEATURES;
6139 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6140 if (current_vlan_features ^ requested_vlan_features) {
6141 err = ice_set_vlan_filtering_features(vsi, features);
6142 if (err)
6143 return err;
6144 }
6145
6146 return 0;
6147 }
6148
6149 /**
6150 * ice_set_loopback - turn on/off loopback mode on underlying PF
6151 * @vsi: ptr to VSI
6152 * @ena: flag to indicate the on/off setting
6153 */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6154 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6155 {
6156 bool if_running = netif_running(vsi->netdev);
6157 int ret;
6158
6159 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6160 ret = ice_down(vsi);
6161 if (ret) {
6162 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6163 return ret;
6164 }
6165 }
6166 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6167 if (ret)
6168 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6169 if (if_running)
6170 ret = ice_up(vsi);
6171
6172 return ret;
6173 }
6174
6175 /**
6176 * ice_set_features - set the netdev feature flags
6177 * @netdev: ptr to the netdev being adjusted
6178 * @features: the feature set that the stack is suggesting
6179 */
6180 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6181 ice_set_features(struct net_device *netdev, netdev_features_t features)
6182 {
6183 netdev_features_t changed = netdev->features ^ features;
6184 struct ice_netdev_priv *np = netdev_priv(netdev);
6185 struct ice_vsi *vsi = np->vsi;
6186 struct ice_pf *pf = vsi->back;
6187 int ret = 0;
6188
6189 /* Don't set any netdev advanced features with device in Safe Mode */
6190 if (ice_is_safe_mode(pf)) {
6191 dev_err(ice_pf_to_dev(pf),
6192 "Device is in Safe Mode - not enabling advanced netdev features\n");
6193 return ret;
6194 }
6195
6196 /* Do not change setting during reset */
6197 if (ice_is_reset_in_progress(pf->state)) {
6198 dev_err(ice_pf_to_dev(pf),
6199 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6200 return -EBUSY;
6201 }
6202
6203 /* Multiple features can be changed in one call so keep features in
6204 * separate if/else statements to guarantee each feature is checked
6205 */
6206 if (changed & NETIF_F_RXHASH)
6207 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6208
6209 ret = ice_set_vlan_features(netdev, features);
6210 if (ret)
6211 return ret;
6212
6213 /* Turn on receive of FCS aka CRC, and after setting this
6214 * flag the packet data will have the 4 byte CRC appended
6215 */
6216 if (changed & NETIF_F_RXFCS) {
6217 if ((features & NETIF_F_RXFCS) &&
6218 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6219 dev_err(ice_pf_to_dev(vsi->back),
6220 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6221 return -EIO;
6222 }
6223
6224 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6225 ret = ice_down_up(vsi);
6226 if (ret)
6227 return ret;
6228 }
6229
6230 if (changed & NETIF_F_NTUPLE) {
6231 bool ena = !!(features & NETIF_F_NTUPLE);
6232
6233 ice_vsi_manage_fdir(vsi, ena);
6234 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6235 }
6236
6237 /* don't turn off hw_tc_offload when ADQ is already enabled */
6238 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6239 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6240 return -EACCES;
6241 }
6242
6243 if (changed & NETIF_F_HW_TC) {
6244 bool ena = !!(features & NETIF_F_HW_TC);
6245
6246 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6247 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6248 }
6249
6250 if (changed & NETIF_F_LOOPBACK)
6251 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6252
6253 return ret;
6254 }
6255
6256 /**
6257 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6258 * @vsi: VSI to setup VLAN properties for
6259 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6260 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6261 {
6262 int err;
6263
6264 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6265 if (err)
6266 return err;
6267
6268 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6269 if (err)
6270 return err;
6271
6272 return ice_vsi_add_vlan_zero(vsi);
6273 }
6274
6275 /**
6276 * ice_vsi_cfg_lan - Setup the VSI lan related config
6277 * @vsi: the VSI being configured
6278 *
6279 * Return 0 on success and negative value on error
6280 */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6281 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6282 {
6283 int err;
6284
6285 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6286 ice_set_rx_mode(vsi->netdev);
6287
6288 err = ice_vsi_vlan_setup(vsi);
6289 if (err)
6290 return err;
6291 }
6292 ice_vsi_cfg_dcb_rings(vsi);
6293
6294 err = ice_vsi_cfg_lan_txqs(vsi);
6295 if (!err && ice_is_xdp_ena_vsi(vsi))
6296 err = ice_vsi_cfg_xdp_txqs(vsi);
6297 if (!err)
6298 err = ice_vsi_cfg_rxqs(vsi);
6299
6300 return err;
6301 }
6302
6303 /* THEORY OF MODERATION:
6304 * The ice driver hardware works differently than the hardware that DIMLIB was
6305 * originally made for. ice hardware doesn't have packet count limits that
6306 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6307 * which is hard-coded to a limit of 250,000 ints/second.
6308 * If not using dynamic moderation, the INTRL value can be modified
6309 * by ethtool rx-usecs-high.
6310 */
6311 struct ice_dim {
6312 /* the throttle rate for interrupts, basically worst case delay before
6313 * an initial interrupt fires, value is stored in microseconds.
6314 */
6315 u16 itr;
6316 };
6317
6318 /* Make a different profile for Rx that doesn't allow quite so aggressive
6319 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6320 * second.
6321 */
6322 static const struct ice_dim rx_profile[] = {
6323 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6324 {8}, /* 125,000 ints/s */
6325 {16}, /* 62,500 ints/s */
6326 {62}, /* 16,129 ints/s */
6327 {126} /* 7,936 ints/s */
6328 };
6329
6330 /* The transmit profile, which has the same sorts of values
6331 * as the previous struct
6332 */
6333 static const struct ice_dim tx_profile[] = {
6334 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6335 {8}, /* 125,000 ints/s */
6336 {40}, /* 16,125 ints/s */
6337 {128}, /* 7,812 ints/s */
6338 {256} /* 3,906 ints/s */
6339 };
6340
ice_tx_dim_work(struct work_struct * work)6341 static void ice_tx_dim_work(struct work_struct *work)
6342 {
6343 struct ice_ring_container *rc;
6344 struct dim *dim;
6345 u16 itr;
6346
6347 dim = container_of(work, struct dim, work);
6348 rc = dim->priv;
6349
6350 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6351
6352 /* look up the values in our local table */
6353 itr = tx_profile[dim->profile_ix].itr;
6354
6355 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6356 ice_write_itr(rc, itr);
6357
6358 dim->state = DIM_START_MEASURE;
6359 }
6360
ice_rx_dim_work(struct work_struct * work)6361 static void ice_rx_dim_work(struct work_struct *work)
6362 {
6363 struct ice_ring_container *rc;
6364 struct dim *dim;
6365 u16 itr;
6366
6367 dim = container_of(work, struct dim, work);
6368 rc = dim->priv;
6369
6370 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6371
6372 /* look up the values in our local table */
6373 itr = rx_profile[dim->profile_ix].itr;
6374
6375 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6376 ice_write_itr(rc, itr);
6377
6378 dim->state = DIM_START_MEASURE;
6379 }
6380
6381 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6382
6383 /**
6384 * ice_init_moderation - set up interrupt moderation
6385 * @q_vector: the vector containing rings to be configured
6386 *
6387 * Set up interrupt moderation registers, with the intent to do the right thing
6388 * when called from reset or from probe, and whether or not dynamic moderation
6389 * is enabled or not. Take special care to write all the registers in both
6390 * dynamic moderation mode or not in order to make sure hardware is in a known
6391 * state.
6392 */
ice_init_moderation(struct ice_q_vector * q_vector)6393 static void ice_init_moderation(struct ice_q_vector *q_vector)
6394 {
6395 struct ice_ring_container *rc;
6396 bool tx_dynamic, rx_dynamic;
6397
6398 rc = &q_vector->tx;
6399 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6400 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6401 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6402 rc->dim.priv = rc;
6403 tx_dynamic = ITR_IS_DYNAMIC(rc);
6404
6405 /* set the initial TX ITR to match the above */
6406 ice_write_itr(rc, tx_dynamic ?
6407 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6408
6409 rc = &q_vector->rx;
6410 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6411 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6412 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6413 rc->dim.priv = rc;
6414 rx_dynamic = ITR_IS_DYNAMIC(rc);
6415
6416 /* set the initial RX ITR to match the above */
6417 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6418 rc->itr_setting);
6419
6420 ice_set_q_vector_intrl(q_vector);
6421 }
6422
6423 /**
6424 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6425 * @vsi: the VSI being configured
6426 */
ice_napi_enable_all(struct ice_vsi * vsi)6427 static void ice_napi_enable_all(struct ice_vsi *vsi)
6428 {
6429 int q_idx;
6430
6431 if (!vsi->netdev)
6432 return;
6433
6434 ice_for_each_q_vector(vsi, q_idx) {
6435 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6436
6437 ice_init_moderation(q_vector);
6438
6439 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6440 napi_enable(&q_vector->napi);
6441 }
6442 }
6443
6444 /**
6445 * ice_up_complete - Finish the last steps of bringing up a connection
6446 * @vsi: The VSI being configured
6447 *
6448 * Return 0 on success and negative value on error
6449 */
ice_up_complete(struct ice_vsi * vsi)6450 static int ice_up_complete(struct ice_vsi *vsi)
6451 {
6452 struct ice_pf *pf = vsi->back;
6453 int err;
6454
6455 ice_vsi_cfg_msix(vsi);
6456
6457 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6458 * Tx queue group list was configured and the context bits were
6459 * programmed using ice_vsi_cfg_txqs
6460 */
6461 err = ice_vsi_start_all_rx_rings(vsi);
6462 if (err)
6463 return err;
6464
6465 clear_bit(ICE_VSI_DOWN, vsi->state);
6466 ice_napi_enable_all(vsi);
6467 ice_vsi_ena_irq(vsi);
6468
6469 if (vsi->port_info &&
6470 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6471 vsi->netdev && vsi->type == ICE_VSI_PF) {
6472 ice_print_link_msg(vsi, true);
6473 netif_tx_start_all_queues(vsi->netdev);
6474 netif_carrier_on(vsi->netdev);
6475 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6476 }
6477
6478 /* Perform an initial read of the statistics registers now to
6479 * set the baseline so counters are ready when interface is up
6480 */
6481 ice_update_eth_stats(vsi);
6482
6483 if (vsi->type == ICE_VSI_PF)
6484 ice_service_task_schedule(pf);
6485
6486 return 0;
6487 }
6488
6489 /**
6490 * ice_up - Bring the connection back up after being down
6491 * @vsi: VSI being configured
6492 */
ice_up(struct ice_vsi * vsi)6493 int ice_up(struct ice_vsi *vsi)
6494 {
6495 int err;
6496
6497 err = ice_vsi_cfg_lan(vsi);
6498 if (!err)
6499 err = ice_up_complete(vsi);
6500
6501 return err;
6502 }
6503
6504 /**
6505 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6506 * @syncp: pointer to u64_stats_sync
6507 * @stats: stats that pkts and bytes count will be taken from
6508 * @pkts: packets stats counter
6509 * @bytes: bytes stats counter
6510 *
6511 * This function fetches stats from the ring considering the atomic operations
6512 * that needs to be performed to read u64 values in 32 bit machine.
6513 */
6514 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6515 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6516 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6517 {
6518 unsigned int start;
6519
6520 do {
6521 start = u64_stats_fetch_begin(syncp);
6522 *pkts = stats.pkts;
6523 *bytes = stats.bytes;
6524 } while (u64_stats_fetch_retry(syncp, start));
6525 }
6526
6527 /**
6528 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6529 * @vsi: the VSI to be updated
6530 * @vsi_stats: the stats struct to be updated
6531 * @rings: rings to work on
6532 * @count: number of rings
6533 */
6534 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6535 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6536 struct rtnl_link_stats64 *vsi_stats,
6537 struct ice_tx_ring **rings, u16 count)
6538 {
6539 u16 i;
6540
6541 for (i = 0; i < count; i++) {
6542 struct ice_tx_ring *ring;
6543 u64 pkts = 0, bytes = 0;
6544
6545 ring = READ_ONCE(rings[i]);
6546 if (!ring || !ring->ring_stats)
6547 continue;
6548 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6549 ring->ring_stats->stats, &pkts,
6550 &bytes);
6551 vsi_stats->tx_packets += pkts;
6552 vsi_stats->tx_bytes += bytes;
6553 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6554 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6555 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6556 }
6557 }
6558
6559 /**
6560 * ice_update_vsi_ring_stats - Update VSI stats counters
6561 * @vsi: the VSI to be updated
6562 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6563 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6564 {
6565 struct rtnl_link_stats64 *net_stats, *stats_prev;
6566 struct rtnl_link_stats64 *vsi_stats;
6567 struct ice_pf *pf = vsi->back;
6568 u64 pkts, bytes;
6569 int i;
6570
6571 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6572 if (!vsi_stats)
6573 return;
6574
6575 /* reset non-netdev (extended) stats */
6576 vsi->tx_restart = 0;
6577 vsi->tx_busy = 0;
6578 vsi->tx_linearize = 0;
6579 vsi->rx_buf_failed = 0;
6580 vsi->rx_page_failed = 0;
6581
6582 rcu_read_lock();
6583
6584 /* update Tx rings counters */
6585 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6586 vsi->num_txq);
6587
6588 /* update Rx rings counters */
6589 ice_for_each_rxq(vsi, i) {
6590 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6591 struct ice_ring_stats *ring_stats;
6592
6593 ring_stats = ring->ring_stats;
6594 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6595 ring_stats->stats, &pkts,
6596 &bytes);
6597 vsi_stats->rx_packets += pkts;
6598 vsi_stats->rx_bytes += bytes;
6599 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6600 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6601 }
6602
6603 /* update XDP Tx rings counters */
6604 if (ice_is_xdp_ena_vsi(vsi))
6605 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6606 vsi->num_xdp_txq);
6607
6608 rcu_read_unlock();
6609
6610 net_stats = &vsi->net_stats;
6611 stats_prev = &vsi->net_stats_prev;
6612
6613 /* Update netdev counters, but keep in mind that values could start at
6614 * random value after PF reset. And as we increase the reported stat by
6615 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6616 * let's skip this round.
6617 */
6618 if (likely(pf->stat_prev_loaded)) {
6619 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6620 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6621 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6622 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6623 }
6624
6625 stats_prev->tx_packets = vsi_stats->tx_packets;
6626 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6627 stats_prev->rx_packets = vsi_stats->rx_packets;
6628 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6629
6630 kfree(vsi_stats);
6631 }
6632
6633 /**
6634 * ice_update_vsi_stats - Update VSI stats counters
6635 * @vsi: the VSI to be updated
6636 */
ice_update_vsi_stats(struct ice_vsi * vsi)6637 void ice_update_vsi_stats(struct ice_vsi *vsi)
6638 {
6639 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6640 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6641 struct ice_pf *pf = vsi->back;
6642
6643 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6644 test_bit(ICE_CFG_BUSY, pf->state))
6645 return;
6646
6647 /* get stats as recorded by Tx/Rx rings */
6648 ice_update_vsi_ring_stats(vsi);
6649
6650 /* get VSI stats as recorded by the hardware */
6651 ice_update_eth_stats(vsi);
6652
6653 cur_ns->tx_errors = cur_es->tx_errors;
6654 cur_ns->rx_dropped = cur_es->rx_discards;
6655 cur_ns->tx_dropped = cur_es->tx_discards;
6656 cur_ns->multicast = cur_es->rx_multicast;
6657
6658 /* update some more netdev stats if this is main VSI */
6659 if (vsi->type == ICE_VSI_PF) {
6660 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6661 cur_ns->rx_errors = pf->stats.crc_errors +
6662 pf->stats.illegal_bytes +
6663 pf->stats.rx_len_errors +
6664 pf->stats.rx_undersize +
6665 pf->hw_csum_rx_error +
6666 pf->stats.rx_jabber +
6667 pf->stats.rx_fragments +
6668 pf->stats.rx_oversize;
6669 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6670 /* record drops from the port level */
6671 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6672 }
6673 }
6674
6675 /**
6676 * ice_update_pf_stats - Update PF port stats counters
6677 * @pf: PF whose stats needs to be updated
6678 */
ice_update_pf_stats(struct ice_pf * pf)6679 void ice_update_pf_stats(struct ice_pf *pf)
6680 {
6681 struct ice_hw_port_stats *prev_ps, *cur_ps;
6682 struct ice_hw *hw = &pf->hw;
6683 u16 fd_ctr_base;
6684 u8 port;
6685
6686 port = hw->port_info->lport;
6687 prev_ps = &pf->stats_prev;
6688 cur_ps = &pf->stats;
6689
6690 if (ice_is_reset_in_progress(pf->state))
6691 pf->stat_prev_loaded = false;
6692
6693 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6694 &prev_ps->eth.rx_bytes,
6695 &cur_ps->eth.rx_bytes);
6696
6697 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6698 &prev_ps->eth.rx_unicast,
6699 &cur_ps->eth.rx_unicast);
6700
6701 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6702 &prev_ps->eth.rx_multicast,
6703 &cur_ps->eth.rx_multicast);
6704
6705 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6706 &prev_ps->eth.rx_broadcast,
6707 &cur_ps->eth.rx_broadcast);
6708
6709 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6710 &prev_ps->eth.rx_discards,
6711 &cur_ps->eth.rx_discards);
6712
6713 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6714 &prev_ps->eth.tx_bytes,
6715 &cur_ps->eth.tx_bytes);
6716
6717 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6718 &prev_ps->eth.tx_unicast,
6719 &cur_ps->eth.tx_unicast);
6720
6721 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6722 &prev_ps->eth.tx_multicast,
6723 &cur_ps->eth.tx_multicast);
6724
6725 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6726 &prev_ps->eth.tx_broadcast,
6727 &cur_ps->eth.tx_broadcast);
6728
6729 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6730 &prev_ps->tx_dropped_link_down,
6731 &cur_ps->tx_dropped_link_down);
6732
6733 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6734 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6735
6736 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6737 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6738
6739 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6740 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6741
6742 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6743 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6744
6745 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6746 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6747
6748 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6749 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6750
6751 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6752 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6753
6754 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6755 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6756
6757 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6758 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6759
6760 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6761 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6762
6763 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6764 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6765
6766 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6767 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6768
6769 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6770 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6771
6772 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6773 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6774
6775 fd_ctr_base = hw->fd_ctr_base;
6776
6777 ice_stat_update40(hw,
6778 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6779 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6780 &cur_ps->fd_sb_match);
6781 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6782 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6783
6784 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6785 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6786
6787 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6788 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6789
6790 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6791 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6792
6793 ice_update_dcb_stats(pf);
6794
6795 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6796 &prev_ps->crc_errors, &cur_ps->crc_errors);
6797
6798 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6799 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6800
6801 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6802 &prev_ps->mac_local_faults,
6803 &cur_ps->mac_local_faults);
6804
6805 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6806 &prev_ps->mac_remote_faults,
6807 &cur_ps->mac_remote_faults);
6808
6809 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6810 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6811
6812 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6813 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6814
6815 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6816 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6817
6818 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6819 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6820
6821 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6822 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6823
6824 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6825
6826 pf->stat_prev_loaded = true;
6827 }
6828
6829 /**
6830 * ice_get_stats64 - get statistics for network device structure
6831 * @netdev: network interface device structure
6832 * @stats: main device statistics structure
6833 */
6834 static
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)6835 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6836 {
6837 struct ice_netdev_priv *np = netdev_priv(netdev);
6838 struct rtnl_link_stats64 *vsi_stats;
6839 struct ice_vsi *vsi = np->vsi;
6840
6841 vsi_stats = &vsi->net_stats;
6842
6843 if (!vsi->num_txq || !vsi->num_rxq)
6844 return;
6845
6846 /* netdev packet/byte stats come from ring counter. These are obtained
6847 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6848 * But, only call the update routine and read the registers if VSI is
6849 * not down.
6850 */
6851 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6852 ice_update_vsi_ring_stats(vsi);
6853 stats->tx_packets = vsi_stats->tx_packets;
6854 stats->tx_bytes = vsi_stats->tx_bytes;
6855 stats->rx_packets = vsi_stats->rx_packets;
6856 stats->rx_bytes = vsi_stats->rx_bytes;
6857
6858 /* The rest of the stats can be read from the hardware but instead we
6859 * just return values that the watchdog task has already obtained from
6860 * the hardware.
6861 */
6862 stats->multicast = vsi_stats->multicast;
6863 stats->tx_errors = vsi_stats->tx_errors;
6864 stats->tx_dropped = vsi_stats->tx_dropped;
6865 stats->rx_errors = vsi_stats->rx_errors;
6866 stats->rx_dropped = vsi_stats->rx_dropped;
6867 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6868 stats->rx_length_errors = vsi_stats->rx_length_errors;
6869 }
6870
6871 /**
6872 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6873 * @vsi: VSI having NAPI disabled
6874 */
ice_napi_disable_all(struct ice_vsi * vsi)6875 static void ice_napi_disable_all(struct ice_vsi *vsi)
6876 {
6877 int q_idx;
6878
6879 if (!vsi->netdev)
6880 return;
6881
6882 ice_for_each_q_vector(vsi, q_idx) {
6883 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6884
6885 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6886 napi_disable(&q_vector->napi);
6887
6888 cancel_work_sync(&q_vector->tx.dim.work);
6889 cancel_work_sync(&q_vector->rx.dim.work);
6890 }
6891 }
6892
6893 /**
6894 * ice_down - Shutdown the connection
6895 * @vsi: The VSI being stopped
6896 *
6897 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6898 */
ice_down(struct ice_vsi * vsi)6899 int ice_down(struct ice_vsi *vsi)
6900 {
6901 int i, tx_err, rx_err, vlan_err = 0;
6902
6903 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6904
6905 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6906 vlan_err = ice_vsi_del_vlan_zero(vsi);
6907 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6908 netif_carrier_off(vsi->netdev);
6909 netif_tx_disable(vsi->netdev);
6910 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6911 ice_eswitch_stop_all_tx_queues(vsi->back);
6912 }
6913
6914 ice_vsi_dis_irq(vsi);
6915
6916 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6917 if (tx_err)
6918 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6919 vsi->vsi_num, tx_err);
6920 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6921 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6922 if (tx_err)
6923 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6924 vsi->vsi_num, tx_err);
6925 }
6926
6927 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6928 if (rx_err)
6929 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6930 vsi->vsi_num, rx_err);
6931
6932 ice_napi_disable_all(vsi);
6933
6934 ice_for_each_txq(vsi, i)
6935 ice_clean_tx_ring(vsi->tx_rings[i]);
6936
6937 if (ice_is_xdp_ena_vsi(vsi))
6938 ice_for_each_xdp_txq(vsi, i)
6939 ice_clean_tx_ring(vsi->xdp_rings[i]);
6940
6941 ice_for_each_rxq(vsi, i)
6942 ice_clean_rx_ring(vsi->rx_rings[i]);
6943
6944 if (tx_err || rx_err || vlan_err) {
6945 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6946 vsi->vsi_num, vsi->vsw->sw_id);
6947 return -EIO;
6948 }
6949
6950 return 0;
6951 }
6952
6953 /**
6954 * ice_down_up - shutdown the VSI connection and bring it up
6955 * @vsi: the VSI to be reconnected
6956 */
ice_down_up(struct ice_vsi * vsi)6957 int ice_down_up(struct ice_vsi *vsi)
6958 {
6959 int ret;
6960
6961 /* if DOWN already set, nothing to do */
6962 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6963 return 0;
6964
6965 ret = ice_down(vsi);
6966 if (ret)
6967 return ret;
6968
6969 ret = ice_up(vsi);
6970 if (ret) {
6971 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6972 return ret;
6973 }
6974
6975 return 0;
6976 }
6977
6978 /**
6979 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6980 * @vsi: VSI having resources allocated
6981 *
6982 * Return 0 on success, negative on failure
6983 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)6984 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6985 {
6986 int i, err = 0;
6987
6988 if (!vsi->num_txq) {
6989 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6990 vsi->vsi_num);
6991 return -EINVAL;
6992 }
6993
6994 ice_for_each_txq(vsi, i) {
6995 struct ice_tx_ring *ring = vsi->tx_rings[i];
6996
6997 if (!ring)
6998 return -EINVAL;
6999
7000 if (vsi->netdev)
7001 ring->netdev = vsi->netdev;
7002 err = ice_setup_tx_ring(ring);
7003 if (err)
7004 break;
7005 }
7006
7007 return err;
7008 }
7009
7010 /**
7011 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7012 * @vsi: VSI having resources allocated
7013 *
7014 * Return 0 on success, negative on failure
7015 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7016 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7017 {
7018 int i, err = 0;
7019
7020 if (!vsi->num_rxq) {
7021 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7022 vsi->vsi_num);
7023 return -EINVAL;
7024 }
7025
7026 ice_for_each_rxq(vsi, i) {
7027 struct ice_rx_ring *ring = vsi->rx_rings[i];
7028
7029 if (!ring)
7030 return -EINVAL;
7031
7032 if (vsi->netdev)
7033 ring->netdev = vsi->netdev;
7034 err = ice_setup_rx_ring(ring);
7035 if (err)
7036 break;
7037 }
7038
7039 return err;
7040 }
7041
7042 /**
7043 * ice_vsi_open_ctrl - open control VSI for use
7044 * @vsi: the VSI to open
7045 *
7046 * Initialization of the Control VSI
7047 *
7048 * Returns 0 on success, negative value on error
7049 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7050 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7051 {
7052 char int_name[ICE_INT_NAME_STR_LEN];
7053 struct ice_pf *pf = vsi->back;
7054 struct device *dev;
7055 int err;
7056
7057 dev = ice_pf_to_dev(pf);
7058 /* allocate descriptors */
7059 err = ice_vsi_setup_tx_rings(vsi);
7060 if (err)
7061 goto err_setup_tx;
7062
7063 err = ice_vsi_setup_rx_rings(vsi);
7064 if (err)
7065 goto err_setup_rx;
7066
7067 err = ice_vsi_cfg_lan(vsi);
7068 if (err)
7069 goto err_setup_rx;
7070
7071 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7072 dev_driver_string(dev), dev_name(dev));
7073 err = ice_vsi_req_irq_msix(vsi, int_name);
7074 if (err)
7075 goto err_setup_rx;
7076
7077 ice_vsi_cfg_msix(vsi);
7078
7079 err = ice_vsi_start_all_rx_rings(vsi);
7080 if (err)
7081 goto err_up_complete;
7082
7083 clear_bit(ICE_VSI_DOWN, vsi->state);
7084 ice_vsi_ena_irq(vsi);
7085
7086 return 0;
7087
7088 err_up_complete:
7089 ice_down(vsi);
7090 err_setup_rx:
7091 ice_vsi_free_rx_rings(vsi);
7092 err_setup_tx:
7093 ice_vsi_free_tx_rings(vsi);
7094
7095 return err;
7096 }
7097
7098 /**
7099 * ice_vsi_open - Called when a network interface is made active
7100 * @vsi: the VSI to open
7101 *
7102 * Initialization of the VSI
7103 *
7104 * Returns 0 on success, negative value on error
7105 */
ice_vsi_open(struct ice_vsi * vsi)7106 int ice_vsi_open(struct ice_vsi *vsi)
7107 {
7108 char int_name[ICE_INT_NAME_STR_LEN];
7109 struct ice_pf *pf = vsi->back;
7110 int err;
7111
7112 /* allocate descriptors */
7113 err = ice_vsi_setup_tx_rings(vsi);
7114 if (err)
7115 goto err_setup_tx;
7116
7117 err = ice_vsi_setup_rx_rings(vsi);
7118 if (err)
7119 goto err_setup_rx;
7120
7121 err = ice_vsi_cfg_lan(vsi);
7122 if (err)
7123 goto err_setup_rx;
7124
7125 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7126 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7127 err = ice_vsi_req_irq_msix(vsi, int_name);
7128 if (err)
7129 goto err_setup_rx;
7130
7131 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7132
7133 if (vsi->type == ICE_VSI_PF) {
7134 /* Notify the stack of the actual queue counts. */
7135 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7136 if (err)
7137 goto err_set_qs;
7138
7139 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7140 if (err)
7141 goto err_set_qs;
7142 }
7143
7144 err = ice_up_complete(vsi);
7145 if (err)
7146 goto err_up_complete;
7147
7148 return 0;
7149
7150 err_up_complete:
7151 ice_down(vsi);
7152 err_set_qs:
7153 ice_vsi_free_irq(vsi);
7154 err_setup_rx:
7155 ice_vsi_free_rx_rings(vsi);
7156 err_setup_tx:
7157 ice_vsi_free_tx_rings(vsi);
7158
7159 return err;
7160 }
7161
7162 /**
7163 * ice_vsi_release_all - Delete all VSIs
7164 * @pf: PF from which all VSIs are being removed
7165 */
ice_vsi_release_all(struct ice_pf * pf)7166 static void ice_vsi_release_all(struct ice_pf *pf)
7167 {
7168 int err, i;
7169
7170 if (!pf->vsi)
7171 return;
7172
7173 ice_for_each_vsi(pf, i) {
7174 if (!pf->vsi[i])
7175 continue;
7176
7177 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7178 continue;
7179
7180 err = ice_vsi_release(pf->vsi[i]);
7181 if (err)
7182 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7183 i, err, pf->vsi[i]->vsi_num);
7184 }
7185 }
7186
7187 /**
7188 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7189 * @pf: pointer to the PF instance
7190 * @type: VSI type to rebuild
7191 *
7192 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7193 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7194 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7195 {
7196 struct device *dev = ice_pf_to_dev(pf);
7197 int i, err;
7198
7199 ice_for_each_vsi(pf, i) {
7200 struct ice_vsi *vsi = pf->vsi[i];
7201
7202 if (!vsi || vsi->type != type)
7203 continue;
7204
7205 /* rebuild the VSI */
7206 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7207 if (err) {
7208 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7209 err, vsi->idx, ice_vsi_type_str(type));
7210 return err;
7211 }
7212
7213 /* replay filters for the VSI */
7214 err = ice_replay_vsi(&pf->hw, vsi->idx);
7215 if (err) {
7216 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7217 err, vsi->idx, ice_vsi_type_str(type));
7218 return err;
7219 }
7220
7221 /* Re-map HW VSI number, using VSI handle that has been
7222 * previously validated in ice_replay_vsi() call above
7223 */
7224 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7225
7226 /* enable the VSI */
7227 err = ice_ena_vsi(vsi, false);
7228 if (err) {
7229 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7230 err, vsi->idx, ice_vsi_type_str(type));
7231 return err;
7232 }
7233
7234 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7235 ice_vsi_type_str(type));
7236 }
7237
7238 return 0;
7239 }
7240
7241 /**
7242 * ice_update_pf_netdev_link - Update PF netdev link status
7243 * @pf: pointer to the PF instance
7244 */
ice_update_pf_netdev_link(struct ice_pf * pf)7245 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7246 {
7247 bool link_up;
7248 int i;
7249
7250 ice_for_each_vsi(pf, i) {
7251 struct ice_vsi *vsi = pf->vsi[i];
7252
7253 if (!vsi || vsi->type != ICE_VSI_PF)
7254 return;
7255
7256 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7257 if (link_up) {
7258 netif_carrier_on(pf->vsi[i]->netdev);
7259 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7260 } else {
7261 netif_carrier_off(pf->vsi[i]->netdev);
7262 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7263 }
7264 }
7265 }
7266
7267 /**
7268 * ice_rebuild - rebuild after reset
7269 * @pf: PF to rebuild
7270 * @reset_type: type of reset
7271 *
7272 * Do not rebuild VF VSI in this flow because that is already handled via
7273 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7274 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7275 * to reset/rebuild all the VF VSI twice.
7276 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7277 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7278 {
7279 struct device *dev = ice_pf_to_dev(pf);
7280 struct ice_hw *hw = &pf->hw;
7281 bool dvm;
7282 int err;
7283
7284 if (test_bit(ICE_DOWN, pf->state))
7285 goto clear_recovery;
7286
7287 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7288
7289 #define ICE_EMP_RESET_SLEEP_MS 5000
7290 if (reset_type == ICE_RESET_EMPR) {
7291 /* If an EMP reset has occurred, any previously pending flash
7292 * update will have completed. We no longer know whether or
7293 * not the NVM update EMP reset is restricted.
7294 */
7295 pf->fw_emp_reset_disabled = false;
7296
7297 msleep(ICE_EMP_RESET_SLEEP_MS);
7298 }
7299
7300 err = ice_init_all_ctrlq(hw);
7301 if (err) {
7302 dev_err(dev, "control queues init failed %d\n", err);
7303 goto err_init_ctrlq;
7304 }
7305
7306 /* if DDP was previously loaded successfully */
7307 if (!ice_is_safe_mode(pf)) {
7308 /* reload the SW DB of filter tables */
7309 if (reset_type == ICE_RESET_PFR)
7310 ice_fill_blk_tbls(hw);
7311 else
7312 /* Reload DDP Package after CORER/GLOBR reset */
7313 ice_load_pkg(NULL, pf);
7314 }
7315
7316 err = ice_clear_pf_cfg(hw);
7317 if (err) {
7318 dev_err(dev, "clear PF configuration failed %d\n", err);
7319 goto err_init_ctrlq;
7320 }
7321
7322 ice_clear_pxe_mode(hw);
7323
7324 err = ice_init_nvm(hw);
7325 if (err) {
7326 dev_err(dev, "ice_init_nvm failed %d\n", err);
7327 goto err_init_ctrlq;
7328 }
7329
7330 err = ice_get_caps(hw);
7331 if (err) {
7332 dev_err(dev, "ice_get_caps failed %d\n", err);
7333 goto err_init_ctrlq;
7334 }
7335
7336 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7337 if (err) {
7338 dev_err(dev, "set_mac_cfg failed %d\n", err);
7339 goto err_init_ctrlq;
7340 }
7341
7342 dvm = ice_is_dvm_ena(hw);
7343
7344 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7345 if (err)
7346 goto err_init_ctrlq;
7347
7348 err = ice_sched_init_port(hw->port_info);
7349 if (err)
7350 goto err_sched_init_port;
7351
7352 /* start misc vector */
7353 err = ice_req_irq_msix_misc(pf);
7354 if (err) {
7355 dev_err(dev, "misc vector setup failed: %d\n", err);
7356 goto err_sched_init_port;
7357 }
7358
7359 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7360 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7361 if (!rd32(hw, PFQF_FD_SIZE)) {
7362 u16 unused, guar, b_effort;
7363
7364 guar = hw->func_caps.fd_fltr_guar;
7365 b_effort = hw->func_caps.fd_fltr_best_effort;
7366
7367 /* force guaranteed filter pool for PF */
7368 ice_alloc_fd_guar_item(hw, &unused, guar);
7369 /* force shared filter pool for PF */
7370 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7371 }
7372 }
7373
7374 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7375 ice_dcb_rebuild(pf);
7376
7377 /* If the PF previously had enabled PTP, PTP init needs to happen before
7378 * the VSI rebuild. If not, this causes the PTP link status events to
7379 * fail.
7380 */
7381 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7382 ice_ptp_reset(pf);
7383
7384 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7385 ice_gnss_init(pf);
7386
7387 /* rebuild PF VSI */
7388 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7389 if (err) {
7390 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7391 goto err_vsi_rebuild;
7392 }
7393
7394 /* configure PTP timestamping after VSI rebuild */
7395 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7396 ice_ptp_cfg_timestamp(pf, false);
7397
7398 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7399 if (err) {
7400 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7401 goto err_vsi_rebuild;
7402 }
7403
7404 if (reset_type == ICE_RESET_PFR) {
7405 err = ice_rebuild_channels(pf);
7406 if (err) {
7407 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7408 err);
7409 goto err_vsi_rebuild;
7410 }
7411 }
7412
7413 /* If Flow Director is active */
7414 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7415 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7416 if (err) {
7417 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7418 goto err_vsi_rebuild;
7419 }
7420
7421 /* replay HW Flow Director recipes */
7422 if (hw->fdir_prof)
7423 ice_fdir_replay_flows(hw);
7424
7425 /* replay Flow Director filters */
7426 ice_fdir_replay_fltrs(pf);
7427
7428 ice_rebuild_arfs(pf);
7429 }
7430
7431 ice_update_pf_netdev_link(pf);
7432
7433 /* tell the firmware we are up */
7434 err = ice_send_version(pf);
7435 if (err) {
7436 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7437 err);
7438 goto err_vsi_rebuild;
7439 }
7440
7441 ice_replay_post(hw);
7442
7443 /* if we get here, reset flow is successful */
7444 clear_bit(ICE_RESET_FAILED, pf->state);
7445
7446 ice_plug_aux_dev(pf);
7447 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7448 ice_lag_rebuild(pf);
7449 return;
7450
7451 err_vsi_rebuild:
7452 err_sched_init_port:
7453 ice_sched_cleanup_all(hw);
7454 err_init_ctrlq:
7455 ice_shutdown_all_ctrlq(hw);
7456 set_bit(ICE_RESET_FAILED, pf->state);
7457 clear_recovery:
7458 /* set this bit in PF state to control service task scheduling */
7459 set_bit(ICE_NEEDS_RESTART, pf->state);
7460 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7461 }
7462
7463 /**
7464 * ice_change_mtu - NDO callback to change the MTU
7465 * @netdev: network interface device structure
7466 * @new_mtu: new value for maximum frame size
7467 *
7468 * Returns 0 on success, negative on failure
7469 */
ice_change_mtu(struct net_device * netdev,int new_mtu)7470 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7471 {
7472 struct ice_netdev_priv *np = netdev_priv(netdev);
7473 struct ice_vsi *vsi = np->vsi;
7474 struct ice_pf *pf = vsi->back;
7475 struct bpf_prog *prog;
7476 u8 count = 0;
7477 int err = 0;
7478
7479 if (new_mtu == (int)netdev->mtu) {
7480 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7481 return 0;
7482 }
7483
7484 prog = vsi->xdp_prog;
7485 if (prog && !prog->aux->xdp_has_frags) {
7486 int frame_size = ice_max_xdp_frame_size(vsi);
7487
7488 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7489 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7490 frame_size - ICE_ETH_PKT_HDR_PAD);
7491 return -EINVAL;
7492 }
7493 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7494 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7495 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7496 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7497 return -EINVAL;
7498 }
7499 }
7500
7501 /* if a reset is in progress, wait for some time for it to complete */
7502 do {
7503 if (ice_is_reset_in_progress(pf->state)) {
7504 count++;
7505 usleep_range(1000, 2000);
7506 } else {
7507 break;
7508 }
7509
7510 } while (count < 100);
7511
7512 if (count == 100) {
7513 netdev_err(netdev, "can't change MTU. Device is busy\n");
7514 return -EBUSY;
7515 }
7516
7517 netdev->mtu = (unsigned int)new_mtu;
7518 err = ice_down_up(vsi);
7519 if (err)
7520 return err;
7521
7522 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7523 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7524
7525 return err;
7526 }
7527
7528 /**
7529 * ice_eth_ioctl - Access the hwtstamp interface
7530 * @netdev: network interface device structure
7531 * @ifr: interface request data
7532 * @cmd: ioctl command
7533 */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)7534 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7535 {
7536 struct ice_netdev_priv *np = netdev_priv(netdev);
7537 struct ice_pf *pf = np->vsi->back;
7538
7539 switch (cmd) {
7540 case SIOCGHWTSTAMP:
7541 return ice_ptp_get_ts_config(pf, ifr);
7542 case SIOCSHWTSTAMP:
7543 return ice_ptp_set_ts_config(pf, ifr);
7544 default:
7545 return -EOPNOTSUPP;
7546 }
7547 }
7548
7549 /**
7550 * ice_aq_str - convert AQ err code to a string
7551 * @aq_err: the AQ error code to convert
7552 */
ice_aq_str(enum ice_aq_err aq_err)7553 const char *ice_aq_str(enum ice_aq_err aq_err)
7554 {
7555 switch (aq_err) {
7556 case ICE_AQ_RC_OK:
7557 return "OK";
7558 case ICE_AQ_RC_EPERM:
7559 return "ICE_AQ_RC_EPERM";
7560 case ICE_AQ_RC_ENOENT:
7561 return "ICE_AQ_RC_ENOENT";
7562 case ICE_AQ_RC_ENOMEM:
7563 return "ICE_AQ_RC_ENOMEM";
7564 case ICE_AQ_RC_EBUSY:
7565 return "ICE_AQ_RC_EBUSY";
7566 case ICE_AQ_RC_EEXIST:
7567 return "ICE_AQ_RC_EEXIST";
7568 case ICE_AQ_RC_EINVAL:
7569 return "ICE_AQ_RC_EINVAL";
7570 case ICE_AQ_RC_ENOSPC:
7571 return "ICE_AQ_RC_ENOSPC";
7572 case ICE_AQ_RC_ENOSYS:
7573 return "ICE_AQ_RC_ENOSYS";
7574 case ICE_AQ_RC_EMODE:
7575 return "ICE_AQ_RC_EMODE";
7576 case ICE_AQ_RC_ENOSEC:
7577 return "ICE_AQ_RC_ENOSEC";
7578 case ICE_AQ_RC_EBADSIG:
7579 return "ICE_AQ_RC_EBADSIG";
7580 case ICE_AQ_RC_ESVN:
7581 return "ICE_AQ_RC_ESVN";
7582 case ICE_AQ_RC_EBADMAN:
7583 return "ICE_AQ_RC_EBADMAN";
7584 case ICE_AQ_RC_EBADBUF:
7585 return "ICE_AQ_RC_EBADBUF";
7586 }
7587
7588 return "ICE_AQ_RC_UNKNOWN";
7589 }
7590
7591 /**
7592 * ice_set_rss_lut - Set RSS LUT
7593 * @vsi: Pointer to VSI structure
7594 * @lut: Lookup table
7595 * @lut_size: Lookup table size
7596 *
7597 * Returns 0 on success, negative on failure
7598 */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7599 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7600 {
7601 struct ice_aq_get_set_rss_lut_params params = {};
7602 struct ice_hw *hw = &vsi->back->hw;
7603 int status;
7604
7605 if (!lut)
7606 return -EINVAL;
7607
7608 params.vsi_handle = vsi->idx;
7609 params.lut_size = lut_size;
7610 params.lut_type = vsi->rss_lut_type;
7611 params.lut = lut;
7612
7613 status = ice_aq_set_rss_lut(hw, ¶ms);
7614 if (status)
7615 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7616 status, ice_aq_str(hw->adminq.sq_last_status));
7617
7618 return status;
7619 }
7620
7621 /**
7622 * ice_set_rss_key - Set RSS key
7623 * @vsi: Pointer to the VSI structure
7624 * @seed: RSS hash seed
7625 *
7626 * Returns 0 on success, negative on failure
7627 */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7628 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7629 {
7630 struct ice_hw *hw = &vsi->back->hw;
7631 int status;
7632
7633 if (!seed)
7634 return -EINVAL;
7635
7636 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7637 if (status)
7638 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7639 status, ice_aq_str(hw->adminq.sq_last_status));
7640
7641 return status;
7642 }
7643
7644 /**
7645 * ice_get_rss_lut - Get RSS LUT
7646 * @vsi: Pointer to VSI structure
7647 * @lut: Buffer to store the lookup table entries
7648 * @lut_size: Size of buffer to store the lookup table entries
7649 *
7650 * Returns 0 on success, negative on failure
7651 */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7652 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7653 {
7654 struct ice_aq_get_set_rss_lut_params params = {};
7655 struct ice_hw *hw = &vsi->back->hw;
7656 int status;
7657
7658 if (!lut)
7659 return -EINVAL;
7660
7661 params.vsi_handle = vsi->idx;
7662 params.lut_size = lut_size;
7663 params.lut_type = vsi->rss_lut_type;
7664 params.lut = lut;
7665
7666 status = ice_aq_get_rss_lut(hw, ¶ms);
7667 if (status)
7668 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7669 status, ice_aq_str(hw->adminq.sq_last_status));
7670
7671 return status;
7672 }
7673
7674 /**
7675 * ice_get_rss_key - Get RSS key
7676 * @vsi: Pointer to VSI structure
7677 * @seed: Buffer to store the key in
7678 *
7679 * Returns 0 on success, negative on failure
7680 */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)7681 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7682 {
7683 struct ice_hw *hw = &vsi->back->hw;
7684 int status;
7685
7686 if (!seed)
7687 return -EINVAL;
7688
7689 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7690 if (status)
7691 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7692 status, ice_aq_str(hw->adminq.sq_last_status));
7693
7694 return status;
7695 }
7696
7697 /**
7698 * ice_bridge_getlink - Get the hardware bridge mode
7699 * @skb: skb buff
7700 * @pid: process ID
7701 * @seq: RTNL message seq
7702 * @dev: the netdev being configured
7703 * @filter_mask: filter mask passed in
7704 * @nlflags: netlink flags passed in
7705 *
7706 * Return the bridge mode (VEB/VEPA)
7707 */
7708 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)7709 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7710 struct net_device *dev, u32 filter_mask, int nlflags)
7711 {
7712 struct ice_netdev_priv *np = netdev_priv(dev);
7713 struct ice_vsi *vsi = np->vsi;
7714 struct ice_pf *pf = vsi->back;
7715 u16 bmode;
7716
7717 bmode = pf->first_sw->bridge_mode;
7718
7719 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7720 filter_mask, NULL);
7721 }
7722
7723 /**
7724 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7725 * @vsi: Pointer to VSI structure
7726 * @bmode: Hardware bridge mode (VEB/VEPA)
7727 *
7728 * Returns 0 on success, negative on failure
7729 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)7730 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7731 {
7732 struct ice_aqc_vsi_props *vsi_props;
7733 struct ice_hw *hw = &vsi->back->hw;
7734 struct ice_vsi_ctx *ctxt;
7735 int ret;
7736
7737 vsi_props = &vsi->info;
7738
7739 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7740 if (!ctxt)
7741 return -ENOMEM;
7742
7743 ctxt->info = vsi->info;
7744
7745 if (bmode == BRIDGE_MODE_VEB)
7746 /* change from VEPA to VEB mode */
7747 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7748 else
7749 /* change from VEB to VEPA mode */
7750 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7751 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7752
7753 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7754 if (ret) {
7755 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7756 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7757 goto out;
7758 }
7759 /* Update sw flags for book keeping */
7760 vsi_props->sw_flags = ctxt->info.sw_flags;
7761
7762 out:
7763 kfree(ctxt);
7764 return ret;
7765 }
7766
7767 /**
7768 * ice_bridge_setlink - Set the hardware bridge mode
7769 * @dev: the netdev being configured
7770 * @nlh: RTNL message
7771 * @flags: bridge setlink flags
7772 * @extack: netlink extended ack
7773 *
7774 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7775 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7776 * not already set for all VSIs connected to this switch. And also update the
7777 * unicast switch filter rules for the corresponding switch of the netdev.
7778 */
7779 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)7780 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7781 u16 __always_unused flags,
7782 struct netlink_ext_ack __always_unused *extack)
7783 {
7784 struct ice_netdev_priv *np = netdev_priv(dev);
7785 struct ice_pf *pf = np->vsi->back;
7786 struct nlattr *attr, *br_spec;
7787 struct ice_hw *hw = &pf->hw;
7788 struct ice_sw *pf_sw;
7789 int rem, v, err = 0;
7790
7791 pf_sw = pf->first_sw;
7792 /* find the attribute in the netlink message */
7793 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7794 if (!br_spec)
7795 return -EINVAL;
7796
7797 nla_for_each_nested(attr, br_spec, rem) {
7798 __u16 mode;
7799
7800 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7801 continue;
7802 mode = nla_get_u16(attr);
7803 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7804 return -EINVAL;
7805 /* Continue if bridge mode is not being flipped */
7806 if (mode == pf_sw->bridge_mode)
7807 continue;
7808 /* Iterates through the PF VSI list and update the loopback
7809 * mode of the VSI
7810 */
7811 ice_for_each_vsi(pf, v) {
7812 if (!pf->vsi[v])
7813 continue;
7814 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7815 if (err)
7816 return err;
7817 }
7818
7819 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7820 /* Update the unicast switch filter rules for the corresponding
7821 * switch of the netdev
7822 */
7823 err = ice_update_sw_rule_bridge_mode(hw);
7824 if (err) {
7825 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7826 mode, err,
7827 ice_aq_str(hw->adminq.sq_last_status));
7828 /* revert hw->evb_veb */
7829 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7830 return err;
7831 }
7832
7833 pf_sw->bridge_mode = mode;
7834 }
7835
7836 return 0;
7837 }
7838
7839 /**
7840 * ice_tx_timeout - Respond to a Tx Hang
7841 * @netdev: network interface device structure
7842 * @txqueue: Tx queue
7843 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)7844 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7845 {
7846 struct ice_netdev_priv *np = netdev_priv(netdev);
7847 struct ice_tx_ring *tx_ring = NULL;
7848 struct ice_vsi *vsi = np->vsi;
7849 struct ice_pf *pf = vsi->back;
7850 u32 i;
7851
7852 pf->tx_timeout_count++;
7853
7854 /* Check if PFC is enabled for the TC to which the queue belongs
7855 * to. If yes then Tx timeout is not caused by a hung queue, no
7856 * need to reset and rebuild
7857 */
7858 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7859 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7860 txqueue);
7861 return;
7862 }
7863
7864 /* now that we have an index, find the tx_ring struct */
7865 ice_for_each_txq(vsi, i)
7866 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7867 if (txqueue == vsi->tx_rings[i]->q_index) {
7868 tx_ring = vsi->tx_rings[i];
7869 break;
7870 }
7871
7872 /* Reset recovery level if enough time has elapsed after last timeout.
7873 * Also ensure no new reset action happens before next timeout period.
7874 */
7875 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7876 pf->tx_timeout_recovery_level = 1;
7877 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7878 netdev->watchdog_timeo)))
7879 return;
7880
7881 if (tx_ring) {
7882 struct ice_hw *hw = &pf->hw;
7883 u32 head, val = 0;
7884
7885 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7886 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7887 /* Read interrupt register */
7888 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7889
7890 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7891 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7892 head, tx_ring->next_to_use, val);
7893 }
7894
7895 pf->tx_timeout_last_recovery = jiffies;
7896 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7897 pf->tx_timeout_recovery_level, txqueue);
7898
7899 switch (pf->tx_timeout_recovery_level) {
7900 case 1:
7901 set_bit(ICE_PFR_REQ, pf->state);
7902 break;
7903 case 2:
7904 set_bit(ICE_CORER_REQ, pf->state);
7905 break;
7906 case 3:
7907 set_bit(ICE_GLOBR_REQ, pf->state);
7908 break;
7909 default:
7910 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7911 set_bit(ICE_DOWN, pf->state);
7912 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7913 set_bit(ICE_SERVICE_DIS, pf->state);
7914 break;
7915 }
7916
7917 ice_service_task_schedule(pf);
7918 pf->tx_timeout_recovery_level++;
7919 }
7920
7921 /**
7922 * ice_setup_tc_cls_flower - flower classifier offloads
7923 * @np: net device to configure
7924 * @filter_dev: device on which filter is added
7925 * @cls_flower: offload data
7926 */
7927 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower)7928 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7929 struct net_device *filter_dev,
7930 struct flow_cls_offload *cls_flower)
7931 {
7932 struct ice_vsi *vsi = np->vsi;
7933
7934 if (cls_flower->common.chain_index)
7935 return -EOPNOTSUPP;
7936
7937 switch (cls_flower->command) {
7938 case FLOW_CLS_REPLACE:
7939 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7940 case FLOW_CLS_DESTROY:
7941 return ice_del_cls_flower(vsi, cls_flower);
7942 default:
7943 return -EINVAL;
7944 }
7945 }
7946
7947 /**
7948 * ice_setup_tc_block_cb - callback handler registered for TC block
7949 * @type: TC SETUP type
7950 * @type_data: TC flower offload data that contains user input
7951 * @cb_priv: netdev private data
7952 */
7953 static int
ice_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)7954 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7955 {
7956 struct ice_netdev_priv *np = cb_priv;
7957
7958 switch (type) {
7959 case TC_SETUP_CLSFLOWER:
7960 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7961 type_data);
7962 default:
7963 return -EOPNOTSUPP;
7964 }
7965 }
7966
7967 /**
7968 * ice_validate_mqprio_qopt - Validate TCF input parameters
7969 * @vsi: Pointer to VSI
7970 * @mqprio_qopt: input parameters for mqprio queue configuration
7971 *
7972 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7973 * needed), and make sure user doesn't specify qcount and BW rate limit
7974 * for TCs, which are more than "num_tc"
7975 */
7976 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)7977 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7978 struct tc_mqprio_qopt_offload *mqprio_qopt)
7979 {
7980 int non_power_of_2_qcount = 0;
7981 struct ice_pf *pf = vsi->back;
7982 int max_rss_q_cnt = 0;
7983 u64 sum_min_rate = 0;
7984 struct device *dev;
7985 int i, speed;
7986 u8 num_tc;
7987
7988 if (vsi->type != ICE_VSI_PF)
7989 return -EINVAL;
7990
7991 if (mqprio_qopt->qopt.offset[0] != 0 ||
7992 mqprio_qopt->qopt.num_tc < 1 ||
7993 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7994 return -EINVAL;
7995
7996 dev = ice_pf_to_dev(pf);
7997 vsi->ch_rss_size = 0;
7998 num_tc = mqprio_qopt->qopt.num_tc;
7999 speed = ice_get_link_speed_kbps(vsi);
8000
8001 for (i = 0; num_tc; i++) {
8002 int qcount = mqprio_qopt->qopt.count[i];
8003 u64 max_rate, min_rate, rem;
8004
8005 if (!qcount)
8006 return -EINVAL;
8007
8008 if (is_power_of_2(qcount)) {
8009 if (non_power_of_2_qcount &&
8010 qcount > non_power_of_2_qcount) {
8011 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8012 qcount, non_power_of_2_qcount);
8013 return -EINVAL;
8014 }
8015 if (qcount > max_rss_q_cnt)
8016 max_rss_q_cnt = qcount;
8017 } else {
8018 if (non_power_of_2_qcount &&
8019 qcount != non_power_of_2_qcount) {
8020 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8021 qcount, non_power_of_2_qcount);
8022 return -EINVAL;
8023 }
8024 if (qcount < max_rss_q_cnt) {
8025 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8026 qcount, max_rss_q_cnt);
8027 return -EINVAL;
8028 }
8029 max_rss_q_cnt = qcount;
8030 non_power_of_2_qcount = qcount;
8031 }
8032
8033 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8034 * converts the bandwidth rate limit into Bytes/s when
8035 * passing it down to the driver. So convert input bandwidth
8036 * from Bytes/s to Kbps
8037 */
8038 max_rate = mqprio_qopt->max_rate[i];
8039 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8040
8041 /* min_rate is minimum guaranteed rate and it can't be zero */
8042 min_rate = mqprio_qopt->min_rate[i];
8043 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8044 sum_min_rate += min_rate;
8045
8046 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8047 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8048 min_rate, ICE_MIN_BW_LIMIT);
8049 return -EINVAL;
8050 }
8051
8052 if (max_rate && max_rate > speed) {
8053 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8054 i, max_rate, speed);
8055 return -EINVAL;
8056 }
8057
8058 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8059 if (rem) {
8060 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8061 i, ICE_MIN_BW_LIMIT);
8062 return -EINVAL;
8063 }
8064
8065 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8066 if (rem) {
8067 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8068 i, ICE_MIN_BW_LIMIT);
8069 return -EINVAL;
8070 }
8071
8072 /* min_rate can't be more than max_rate, except when max_rate
8073 * is zero (implies max_rate sought is max line rate). In such
8074 * a case min_rate can be more than max.
8075 */
8076 if (max_rate && min_rate > max_rate) {
8077 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8078 min_rate, max_rate);
8079 return -EINVAL;
8080 }
8081
8082 if (i >= mqprio_qopt->qopt.num_tc - 1)
8083 break;
8084 if (mqprio_qopt->qopt.offset[i + 1] !=
8085 (mqprio_qopt->qopt.offset[i] + qcount))
8086 return -EINVAL;
8087 }
8088 if (vsi->num_rxq <
8089 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8090 return -EINVAL;
8091 if (vsi->num_txq <
8092 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8093 return -EINVAL;
8094
8095 if (sum_min_rate && sum_min_rate > (u64)speed) {
8096 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8097 sum_min_rate, speed);
8098 return -EINVAL;
8099 }
8100
8101 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8102 vsi->ch_rss_size = max_rss_q_cnt;
8103
8104 return 0;
8105 }
8106
8107 /**
8108 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8109 * @pf: ptr to PF device
8110 * @vsi: ptr to VSI
8111 */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8112 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8113 {
8114 struct device *dev = ice_pf_to_dev(pf);
8115 bool added = false;
8116 struct ice_hw *hw;
8117 int flow;
8118
8119 if (!(vsi->num_gfltr || vsi->num_bfltr))
8120 return -EINVAL;
8121
8122 hw = &pf->hw;
8123 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8124 struct ice_fd_hw_prof *prof;
8125 int tun, status;
8126 u64 entry_h;
8127
8128 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8129 hw->fdir_prof[flow]->cnt))
8130 continue;
8131
8132 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8133 enum ice_flow_priority prio;
8134 u64 prof_id;
8135
8136 /* add this VSI to FDir profile for this flow */
8137 prio = ICE_FLOW_PRIO_NORMAL;
8138 prof = hw->fdir_prof[flow];
8139 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8140 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8141 prof->vsi_h[0], vsi->idx,
8142 prio, prof->fdir_seg[tun],
8143 &entry_h);
8144 if (status) {
8145 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8146 vsi->idx, flow);
8147 continue;
8148 }
8149
8150 prof->entry_h[prof->cnt][tun] = entry_h;
8151 }
8152
8153 /* store VSI for filter replay and delete */
8154 prof->vsi_h[prof->cnt] = vsi->idx;
8155 prof->cnt++;
8156
8157 added = true;
8158 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8159 flow);
8160 }
8161
8162 if (!added)
8163 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8164
8165 return 0;
8166 }
8167
8168 /**
8169 * ice_add_channel - add a channel by adding VSI
8170 * @pf: ptr to PF device
8171 * @sw_id: underlying HW switching element ID
8172 * @ch: ptr to channel structure
8173 *
8174 * Add a channel (VSI) using add_vsi and queue_map
8175 */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8176 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8177 {
8178 struct device *dev = ice_pf_to_dev(pf);
8179 struct ice_vsi *vsi;
8180
8181 if (ch->type != ICE_VSI_CHNL) {
8182 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8183 return -EINVAL;
8184 }
8185
8186 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8187 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8188 dev_err(dev, "create chnl VSI failure\n");
8189 return -EINVAL;
8190 }
8191
8192 ice_add_vsi_to_fdir(pf, vsi);
8193
8194 ch->sw_id = sw_id;
8195 ch->vsi_num = vsi->vsi_num;
8196 ch->info.mapping_flags = vsi->info.mapping_flags;
8197 ch->ch_vsi = vsi;
8198 /* set the back pointer of channel for newly created VSI */
8199 vsi->ch = ch;
8200
8201 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8202 sizeof(vsi->info.q_mapping));
8203 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8204 sizeof(vsi->info.tc_mapping));
8205
8206 return 0;
8207 }
8208
8209 /**
8210 * ice_chnl_cfg_res
8211 * @vsi: the VSI being setup
8212 * @ch: ptr to channel structure
8213 *
8214 * Configure channel specific resources such as rings, vector.
8215 */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8216 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8217 {
8218 int i;
8219
8220 for (i = 0; i < ch->num_txq; i++) {
8221 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8222 struct ice_ring_container *rc;
8223 struct ice_tx_ring *tx_ring;
8224 struct ice_rx_ring *rx_ring;
8225
8226 tx_ring = vsi->tx_rings[ch->base_q + i];
8227 rx_ring = vsi->rx_rings[ch->base_q + i];
8228 if (!tx_ring || !rx_ring)
8229 continue;
8230
8231 /* setup ring being channel enabled */
8232 tx_ring->ch = ch;
8233 rx_ring->ch = ch;
8234
8235 /* following code block sets up vector specific attributes */
8236 tx_q_vector = tx_ring->q_vector;
8237 rx_q_vector = rx_ring->q_vector;
8238 if (!tx_q_vector && !rx_q_vector)
8239 continue;
8240
8241 if (tx_q_vector) {
8242 tx_q_vector->ch = ch;
8243 /* setup Tx and Rx ITR setting if DIM is off */
8244 rc = &tx_q_vector->tx;
8245 if (!ITR_IS_DYNAMIC(rc))
8246 ice_write_itr(rc, rc->itr_setting);
8247 }
8248 if (rx_q_vector) {
8249 rx_q_vector->ch = ch;
8250 /* setup Tx and Rx ITR setting if DIM is off */
8251 rc = &rx_q_vector->rx;
8252 if (!ITR_IS_DYNAMIC(rc))
8253 ice_write_itr(rc, rc->itr_setting);
8254 }
8255 }
8256
8257 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8258 * GLINT_ITR register would have written to perform in-context
8259 * update, hence perform flush
8260 */
8261 if (ch->num_txq || ch->num_rxq)
8262 ice_flush(&vsi->back->hw);
8263 }
8264
8265 /**
8266 * ice_cfg_chnl_all_res - configure channel resources
8267 * @vsi: pte to main_vsi
8268 * @ch: ptr to channel structure
8269 *
8270 * This function configures channel specific resources such as flow-director
8271 * counter index, and other resources such as queues, vectors, ITR settings
8272 */
8273 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8274 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8275 {
8276 /* configure channel (aka ADQ) resources such as queues, vectors,
8277 * ITR settings for channel specific vectors and anything else
8278 */
8279 ice_chnl_cfg_res(vsi, ch);
8280 }
8281
8282 /**
8283 * ice_setup_hw_channel - setup new channel
8284 * @pf: ptr to PF device
8285 * @vsi: the VSI being setup
8286 * @ch: ptr to channel structure
8287 * @sw_id: underlying HW switching element ID
8288 * @type: type of channel to be created (VMDq2/VF)
8289 *
8290 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8291 * and configures Tx rings accordingly
8292 */
8293 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8294 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8295 struct ice_channel *ch, u16 sw_id, u8 type)
8296 {
8297 struct device *dev = ice_pf_to_dev(pf);
8298 int ret;
8299
8300 ch->base_q = vsi->next_base_q;
8301 ch->type = type;
8302
8303 ret = ice_add_channel(pf, sw_id, ch);
8304 if (ret) {
8305 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8306 return ret;
8307 }
8308
8309 /* configure/setup ADQ specific resources */
8310 ice_cfg_chnl_all_res(vsi, ch);
8311
8312 /* make sure to update the next_base_q so that subsequent channel's
8313 * (aka ADQ) VSI queue map is correct
8314 */
8315 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8316 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8317 ch->num_rxq);
8318
8319 return 0;
8320 }
8321
8322 /**
8323 * ice_setup_channel - setup new channel using uplink element
8324 * @pf: ptr to PF device
8325 * @vsi: the VSI being setup
8326 * @ch: ptr to channel structure
8327 *
8328 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8329 * and uplink switching element
8330 */
8331 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8332 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8333 struct ice_channel *ch)
8334 {
8335 struct device *dev = ice_pf_to_dev(pf);
8336 u16 sw_id;
8337 int ret;
8338
8339 if (vsi->type != ICE_VSI_PF) {
8340 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8341 return false;
8342 }
8343
8344 sw_id = pf->first_sw->sw_id;
8345
8346 /* create channel (VSI) */
8347 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8348 if (ret) {
8349 dev_err(dev, "failed to setup hw_channel\n");
8350 return false;
8351 }
8352 dev_dbg(dev, "successfully created channel()\n");
8353
8354 return ch->ch_vsi ? true : false;
8355 }
8356
8357 /**
8358 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8359 * @vsi: VSI to be configured
8360 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8361 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8362 */
8363 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8364 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8365 {
8366 int err;
8367
8368 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8369 if (err)
8370 return err;
8371
8372 return ice_set_max_bw_limit(vsi, max_tx_rate);
8373 }
8374
8375 /**
8376 * ice_create_q_channel - function to create channel
8377 * @vsi: VSI to be configured
8378 * @ch: ptr to channel (it contains channel specific params)
8379 *
8380 * This function creates channel (VSI) using num_queues specified by user,
8381 * reconfigs RSS if needed.
8382 */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8383 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8384 {
8385 struct ice_pf *pf = vsi->back;
8386 struct device *dev;
8387
8388 if (!ch)
8389 return -EINVAL;
8390
8391 dev = ice_pf_to_dev(pf);
8392 if (!ch->num_txq || !ch->num_rxq) {
8393 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8394 return -EINVAL;
8395 }
8396
8397 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8398 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8399 vsi->cnt_q_avail, ch->num_txq);
8400 return -EINVAL;
8401 }
8402
8403 if (!ice_setup_channel(pf, vsi, ch)) {
8404 dev_info(dev, "Failed to setup channel\n");
8405 return -EINVAL;
8406 }
8407 /* configure BW rate limit */
8408 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8409 int ret;
8410
8411 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8412 ch->min_tx_rate);
8413 if (ret)
8414 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8415 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8416 else
8417 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8418 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8419 }
8420
8421 vsi->cnt_q_avail -= ch->num_txq;
8422
8423 return 0;
8424 }
8425
8426 /**
8427 * ice_rem_all_chnl_fltrs - removes all channel filters
8428 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8429 *
8430 * Remove all advanced switch filters only if they are channel specific
8431 * tc-flower based filter
8432 */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8433 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8434 {
8435 struct ice_tc_flower_fltr *fltr;
8436 struct hlist_node *node;
8437
8438 /* to remove all channel filters, iterate an ordered list of filters */
8439 hlist_for_each_entry_safe(fltr, node,
8440 &pf->tc_flower_fltr_list,
8441 tc_flower_node) {
8442 struct ice_rule_query_data rule;
8443 int status;
8444
8445 /* for now process only channel specific filters */
8446 if (!ice_is_chnl_fltr(fltr))
8447 continue;
8448
8449 rule.rid = fltr->rid;
8450 rule.rule_id = fltr->rule_id;
8451 rule.vsi_handle = fltr->dest_vsi_handle;
8452 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8453 if (status) {
8454 if (status == -ENOENT)
8455 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8456 rule.rule_id);
8457 else
8458 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8459 status);
8460 } else if (fltr->dest_vsi) {
8461 /* update advanced switch filter count */
8462 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8463 u32 flags = fltr->flags;
8464
8465 fltr->dest_vsi->num_chnl_fltr--;
8466 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8467 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8468 pf->num_dmac_chnl_fltrs--;
8469 }
8470 }
8471
8472 hlist_del(&fltr->tc_flower_node);
8473 kfree(fltr);
8474 }
8475 }
8476
8477 /**
8478 * ice_remove_q_channels - Remove queue channels for the TCs
8479 * @vsi: VSI to be configured
8480 * @rem_fltr: delete advanced switch filter or not
8481 *
8482 * Remove queue channels for the TCs
8483 */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8484 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8485 {
8486 struct ice_channel *ch, *ch_tmp;
8487 struct ice_pf *pf = vsi->back;
8488 int i;
8489
8490 /* remove all tc-flower based filter if they are channel filters only */
8491 if (rem_fltr)
8492 ice_rem_all_chnl_fltrs(pf);
8493
8494 /* remove ntuple filters since queue configuration is being changed */
8495 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8496 struct ice_hw *hw = &pf->hw;
8497
8498 mutex_lock(&hw->fdir_fltr_lock);
8499 ice_fdir_del_all_fltrs(vsi);
8500 mutex_unlock(&hw->fdir_fltr_lock);
8501 }
8502
8503 /* perform cleanup for channels if they exist */
8504 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8505 struct ice_vsi *ch_vsi;
8506
8507 list_del(&ch->list);
8508 ch_vsi = ch->ch_vsi;
8509 if (!ch_vsi) {
8510 kfree(ch);
8511 continue;
8512 }
8513
8514 /* Reset queue contexts */
8515 for (i = 0; i < ch->num_rxq; i++) {
8516 struct ice_tx_ring *tx_ring;
8517 struct ice_rx_ring *rx_ring;
8518
8519 tx_ring = vsi->tx_rings[ch->base_q + i];
8520 rx_ring = vsi->rx_rings[ch->base_q + i];
8521 if (tx_ring) {
8522 tx_ring->ch = NULL;
8523 if (tx_ring->q_vector)
8524 tx_ring->q_vector->ch = NULL;
8525 }
8526 if (rx_ring) {
8527 rx_ring->ch = NULL;
8528 if (rx_ring->q_vector)
8529 rx_ring->q_vector->ch = NULL;
8530 }
8531 }
8532
8533 /* Release FD resources for the channel VSI */
8534 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8535
8536 /* clear the VSI from scheduler tree */
8537 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8538
8539 /* Delete VSI from FW, PF and HW VSI arrays */
8540 ice_vsi_delete(ch->ch_vsi);
8541
8542 /* free the channel */
8543 kfree(ch);
8544 }
8545
8546 /* clear the channel VSI map which is stored in main VSI */
8547 ice_for_each_chnl_tc(i)
8548 vsi->tc_map_vsi[i] = NULL;
8549
8550 /* reset main VSI's all TC information */
8551 vsi->all_enatc = 0;
8552 vsi->all_numtc = 0;
8553 }
8554
8555 /**
8556 * ice_rebuild_channels - rebuild channel
8557 * @pf: ptr to PF
8558 *
8559 * Recreate channel VSIs and replay filters
8560 */
ice_rebuild_channels(struct ice_pf * pf)8561 static int ice_rebuild_channels(struct ice_pf *pf)
8562 {
8563 struct device *dev = ice_pf_to_dev(pf);
8564 struct ice_vsi *main_vsi;
8565 bool rem_adv_fltr = true;
8566 struct ice_channel *ch;
8567 struct ice_vsi *vsi;
8568 int tc_idx = 1;
8569 int i, err;
8570
8571 main_vsi = ice_get_main_vsi(pf);
8572 if (!main_vsi)
8573 return 0;
8574
8575 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8576 main_vsi->old_numtc == 1)
8577 return 0; /* nothing to be done */
8578
8579 /* reconfigure main VSI based on old value of TC and cached values
8580 * for MQPRIO opts
8581 */
8582 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8583 if (err) {
8584 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8585 main_vsi->old_ena_tc, main_vsi->vsi_num);
8586 return err;
8587 }
8588
8589 /* rebuild ADQ VSIs */
8590 ice_for_each_vsi(pf, i) {
8591 enum ice_vsi_type type;
8592
8593 vsi = pf->vsi[i];
8594 if (!vsi || vsi->type != ICE_VSI_CHNL)
8595 continue;
8596
8597 type = vsi->type;
8598
8599 /* rebuild ADQ VSI */
8600 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8601 if (err) {
8602 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8603 ice_vsi_type_str(type), vsi->idx, err);
8604 goto cleanup;
8605 }
8606
8607 /* Re-map HW VSI number, using VSI handle that has been
8608 * previously validated in ice_replay_vsi() call above
8609 */
8610 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8611
8612 /* replay filters for the VSI */
8613 err = ice_replay_vsi(&pf->hw, vsi->idx);
8614 if (err) {
8615 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8616 ice_vsi_type_str(type), err, vsi->idx);
8617 rem_adv_fltr = false;
8618 goto cleanup;
8619 }
8620 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8621 ice_vsi_type_str(type), vsi->idx);
8622
8623 /* store ADQ VSI at correct TC index in main VSI's
8624 * map of TC to VSI
8625 */
8626 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8627 }
8628
8629 /* ADQ VSI(s) has been rebuilt successfully, so setup
8630 * channel for main VSI's Tx and Rx rings
8631 */
8632 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8633 struct ice_vsi *ch_vsi;
8634
8635 ch_vsi = ch->ch_vsi;
8636 if (!ch_vsi)
8637 continue;
8638
8639 /* reconfig channel resources */
8640 ice_cfg_chnl_all_res(main_vsi, ch);
8641
8642 /* replay BW rate limit if it is non-zero */
8643 if (!ch->max_tx_rate && !ch->min_tx_rate)
8644 continue;
8645
8646 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8647 ch->min_tx_rate);
8648 if (err)
8649 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8650 err, ch->max_tx_rate, ch->min_tx_rate,
8651 ch_vsi->vsi_num);
8652 else
8653 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8654 ch->max_tx_rate, ch->min_tx_rate,
8655 ch_vsi->vsi_num);
8656 }
8657
8658 /* reconfig RSS for main VSI */
8659 if (main_vsi->ch_rss_size)
8660 ice_vsi_cfg_rss_lut_key(main_vsi);
8661
8662 return 0;
8663
8664 cleanup:
8665 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8666 return err;
8667 }
8668
8669 /**
8670 * ice_create_q_channels - Add queue channel for the given TCs
8671 * @vsi: VSI to be configured
8672 *
8673 * Configures queue channel mapping to the given TCs
8674 */
ice_create_q_channels(struct ice_vsi * vsi)8675 static int ice_create_q_channels(struct ice_vsi *vsi)
8676 {
8677 struct ice_pf *pf = vsi->back;
8678 struct ice_channel *ch;
8679 int ret = 0, i;
8680
8681 ice_for_each_chnl_tc(i) {
8682 if (!(vsi->all_enatc & BIT(i)))
8683 continue;
8684
8685 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8686 if (!ch) {
8687 ret = -ENOMEM;
8688 goto err_free;
8689 }
8690 INIT_LIST_HEAD(&ch->list);
8691 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8692 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8693 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8694 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8695 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8696
8697 /* convert to Kbits/s */
8698 if (ch->max_tx_rate)
8699 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8700 ICE_BW_KBPS_DIVISOR);
8701 if (ch->min_tx_rate)
8702 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8703 ICE_BW_KBPS_DIVISOR);
8704
8705 ret = ice_create_q_channel(vsi, ch);
8706 if (ret) {
8707 dev_err(ice_pf_to_dev(pf),
8708 "failed creating channel TC:%d\n", i);
8709 kfree(ch);
8710 goto err_free;
8711 }
8712 list_add_tail(&ch->list, &vsi->ch_list);
8713 vsi->tc_map_vsi[i] = ch->ch_vsi;
8714 dev_dbg(ice_pf_to_dev(pf),
8715 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8716 }
8717 return 0;
8718
8719 err_free:
8720 ice_remove_q_channels(vsi, false);
8721
8722 return ret;
8723 }
8724
8725 /**
8726 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8727 * @netdev: net device to configure
8728 * @type_data: TC offload data
8729 */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)8730 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8731 {
8732 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8733 struct ice_netdev_priv *np = netdev_priv(netdev);
8734 struct ice_vsi *vsi = np->vsi;
8735 struct ice_pf *pf = vsi->back;
8736 u16 mode, ena_tc_qdisc = 0;
8737 int cur_txq, cur_rxq;
8738 u8 hw = 0, num_tcf;
8739 struct device *dev;
8740 int ret, i;
8741
8742 dev = ice_pf_to_dev(pf);
8743 num_tcf = mqprio_qopt->qopt.num_tc;
8744 hw = mqprio_qopt->qopt.hw;
8745 mode = mqprio_qopt->mode;
8746 if (!hw) {
8747 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8748 vsi->ch_rss_size = 0;
8749 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8750 goto config_tcf;
8751 }
8752
8753 /* Generate queue region map for number of TCF requested */
8754 for (i = 0; i < num_tcf; i++)
8755 ena_tc_qdisc |= BIT(i);
8756
8757 switch (mode) {
8758 case TC_MQPRIO_MODE_CHANNEL:
8759
8760 if (pf->hw.port_info->is_custom_tx_enabled) {
8761 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8762 return -EBUSY;
8763 }
8764 ice_tear_down_devlink_rate_tree(pf);
8765
8766 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8767 if (ret) {
8768 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8769 ret);
8770 return ret;
8771 }
8772 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8773 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8774 /* don't assume state of hw_tc_offload during driver load
8775 * and set the flag for TC flower filter if hw_tc_offload
8776 * already ON
8777 */
8778 if (vsi->netdev->features & NETIF_F_HW_TC)
8779 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8780 break;
8781 default:
8782 return -EINVAL;
8783 }
8784
8785 config_tcf:
8786
8787 /* Requesting same TCF configuration as already enabled */
8788 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8789 mode != TC_MQPRIO_MODE_CHANNEL)
8790 return 0;
8791
8792 /* Pause VSI queues */
8793 ice_dis_vsi(vsi, true);
8794
8795 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8796 ice_remove_q_channels(vsi, true);
8797
8798 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8799 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8800 num_online_cpus());
8801 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8802 num_online_cpus());
8803 } else {
8804 /* logic to rebuild VSI, same like ethtool -L */
8805 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8806
8807 for (i = 0; i < num_tcf; i++) {
8808 if (!(ena_tc_qdisc & BIT(i)))
8809 continue;
8810
8811 offset = vsi->mqprio_qopt.qopt.offset[i];
8812 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8813 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8814 }
8815 vsi->req_txq = offset + qcount_tx;
8816 vsi->req_rxq = offset + qcount_rx;
8817
8818 /* store away original rss_size info, so that it gets reused
8819 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8820 * determine, what should be the rss_sizefor main VSI
8821 */
8822 vsi->orig_rss_size = vsi->rss_size;
8823 }
8824
8825 /* save current values of Tx and Rx queues before calling VSI rebuild
8826 * for fallback option
8827 */
8828 cur_txq = vsi->num_txq;
8829 cur_rxq = vsi->num_rxq;
8830
8831 /* proceed with rebuild main VSI using correct number of queues */
8832 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8833 if (ret) {
8834 /* fallback to current number of queues */
8835 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8836 vsi->req_txq = cur_txq;
8837 vsi->req_rxq = cur_rxq;
8838 clear_bit(ICE_RESET_FAILED, pf->state);
8839 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8840 dev_err(dev, "Rebuild of main VSI failed again\n");
8841 return ret;
8842 }
8843 }
8844
8845 vsi->all_numtc = num_tcf;
8846 vsi->all_enatc = ena_tc_qdisc;
8847 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8848 if (ret) {
8849 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8850 vsi->vsi_num);
8851 goto exit;
8852 }
8853
8854 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8855 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8856 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8857
8858 /* set TC0 rate limit if specified */
8859 if (max_tx_rate || min_tx_rate) {
8860 /* convert to Kbits/s */
8861 if (max_tx_rate)
8862 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8863 if (min_tx_rate)
8864 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8865
8866 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8867 if (!ret) {
8868 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8869 max_tx_rate, min_tx_rate, vsi->vsi_num);
8870 } else {
8871 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8872 max_tx_rate, min_tx_rate, vsi->vsi_num);
8873 goto exit;
8874 }
8875 }
8876 ret = ice_create_q_channels(vsi);
8877 if (ret) {
8878 netdev_err(netdev, "failed configuring queue channels\n");
8879 goto exit;
8880 } else {
8881 netdev_dbg(netdev, "successfully configured channels\n");
8882 }
8883 }
8884
8885 if (vsi->ch_rss_size)
8886 ice_vsi_cfg_rss_lut_key(vsi);
8887
8888 exit:
8889 /* if error, reset the all_numtc and all_enatc */
8890 if (ret) {
8891 vsi->all_numtc = 0;
8892 vsi->all_enatc = 0;
8893 }
8894 /* resume VSI */
8895 ice_ena_vsi(vsi, true);
8896
8897 return ret;
8898 }
8899
8900 static LIST_HEAD(ice_block_cb_list);
8901
8902 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)8903 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8904 void *type_data)
8905 {
8906 struct ice_netdev_priv *np = netdev_priv(netdev);
8907 struct ice_pf *pf = np->vsi->back;
8908 bool locked = false;
8909 int err;
8910
8911 switch (type) {
8912 case TC_SETUP_BLOCK:
8913 return flow_block_cb_setup_simple(type_data,
8914 &ice_block_cb_list,
8915 ice_setup_tc_block_cb,
8916 np, np, true);
8917 case TC_SETUP_QDISC_MQPRIO:
8918 if (ice_is_eswitch_mode_switchdev(pf)) {
8919 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8920 return -EOPNOTSUPP;
8921 }
8922
8923 if (pf->adev) {
8924 mutex_lock(&pf->adev_mutex);
8925 device_lock(&pf->adev->dev);
8926 locked = true;
8927 if (pf->adev->dev.driver) {
8928 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8929 err = -EBUSY;
8930 goto adev_unlock;
8931 }
8932 }
8933
8934 /* setup traffic classifier for receive side */
8935 mutex_lock(&pf->tc_mutex);
8936 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8937 mutex_unlock(&pf->tc_mutex);
8938
8939 adev_unlock:
8940 if (locked) {
8941 device_unlock(&pf->adev->dev);
8942 mutex_unlock(&pf->adev_mutex);
8943 }
8944 return err;
8945 default:
8946 return -EOPNOTSUPP;
8947 }
8948 return -EOPNOTSUPP;
8949 }
8950
8951 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)8952 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8953 struct net_device *netdev)
8954 {
8955 struct ice_indr_block_priv *cb_priv;
8956
8957 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8958 if (!cb_priv->netdev)
8959 return NULL;
8960 if (cb_priv->netdev == netdev)
8961 return cb_priv;
8962 }
8963 return NULL;
8964 }
8965
8966 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)8967 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8968 void *indr_priv)
8969 {
8970 struct ice_indr_block_priv *priv = indr_priv;
8971 struct ice_netdev_priv *np = priv->np;
8972
8973 switch (type) {
8974 case TC_SETUP_CLSFLOWER:
8975 return ice_setup_tc_cls_flower(np, priv->netdev,
8976 (struct flow_cls_offload *)
8977 type_data);
8978 default:
8979 return -EOPNOTSUPP;
8980 }
8981 }
8982
8983 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))8984 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8985 struct ice_netdev_priv *np,
8986 struct flow_block_offload *f, void *data,
8987 void (*cleanup)(struct flow_block_cb *block_cb))
8988 {
8989 struct ice_indr_block_priv *indr_priv;
8990 struct flow_block_cb *block_cb;
8991
8992 if (!ice_is_tunnel_supported(netdev) &&
8993 !(is_vlan_dev(netdev) &&
8994 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8995 return -EOPNOTSUPP;
8996
8997 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8998 return -EOPNOTSUPP;
8999
9000 switch (f->command) {
9001 case FLOW_BLOCK_BIND:
9002 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9003 if (indr_priv)
9004 return -EEXIST;
9005
9006 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9007 if (!indr_priv)
9008 return -ENOMEM;
9009
9010 indr_priv->netdev = netdev;
9011 indr_priv->np = np;
9012 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9013
9014 block_cb =
9015 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9016 indr_priv, indr_priv,
9017 ice_rep_indr_tc_block_unbind,
9018 f, netdev, sch, data, np,
9019 cleanup);
9020
9021 if (IS_ERR(block_cb)) {
9022 list_del(&indr_priv->list);
9023 kfree(indr_priv);
9024 return PTR_ERR(block_cb);
9025 }
9026 flow_block_cb_add(block_cb, f);
9027 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9028 break;
9029 case FLOW_BLOCK_UNBIND:
9030 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9031 if (!indr_priv)
9032 return -ENOENT;
9033
9034 block_cb = flow_block_cb_lookup(f->block,
9035 ice_indr_setup_block_cb,
9036 indr_priv);
9037 if (!block_cb)
9038 return -ENOENT;
9039
9040 flow_indr_block_cb_remove(block_cb, f);
9041
9042 list_del(&block_cb->driver_list);
9043 break;
9044 default:
9045 return -EOPNOTSUPP;
9046 }
9047 return 0;
9048 }
9049
9050 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9051 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9052 void *cb_priv, enum tc_setup_type type, void *type_data,
9053 void *data,
9054 void (*cleanup)(struct flow_block_cb *block_cb))
9055 {
9056 switch (type) {
9057 case TC_SETUP_BLOCK:
9058 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9059 data, cleanup);
9060
9061 default:
9062 return -EOPNOTSUPP;
9063 }
9064 }
9065
9066 /**
9067 * ice_open - Called when a network interface becomes active
9068 * @netdev: network interface device structure
9069 *
9070 * The open entry point is called when a network interface is made
9071 * active by the system (IFF_UP). At this point all resources needed
9072 * for transmit and receive operations are allocated, the interrupt
9073 * handler is registered with the OS, the netdev watchdog is enabled,
9074 * and the stack is notified that the interface is ready.
9075 *
9076 * Returns 0 on success, negative value on failure
9077 */
ice_open(struct net_device * netdev)9078 int ice_open(struct net_device *netdev)
9079 {
9080 struct ice_netdev_priv *np = netdev_priv(netdev);
9081 struct ice_pf *pf = np->vsi->back;
9082
9083 if (ice_is_reset_in_progress(pf->state)) {
9084 netdev_err(netdev, "can't open net device while reset is in progress");
9085 return -EBUSY;
9086 }
9087
9088 return ice_open_internal(netdev);
9089 }
9090
9091 /**
9092 * ice_open_internal - Called when a network interface becomes active
9093 * @netdev: network interface device structure
9094 *
9095 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9096 * handling routine
9097 *
9098 * Returns 0 on success, negative value on failure
9099 */
ice_open_internal(struct net_device * netdev)9100 int ice_open_internal(struct net_device *netdev)
9101 {
9102 struct ice_netdev_priv *np = netdev_priv(netdev);
9103 struct ice_vsi *vsi = np->vsi;
9104 struct ice_pf *pf = vsi->back;
9105 struct ice_port_info *pi;
9106 int err;
9107
9108 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9109 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9110 return -EIO;
9111 }
9112
9113 netif_carrier_off(netdev);
9114
9115 pi = vsi->port_info;
9116 err = ice_update_link_info(pi);
9117 if (err) {
9118 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9119 return err;
9120 }
9121
9122 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9123
9124 /* Set PHY if there is media, otherwise, turn off PHY */
9125 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9126 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9127 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9128 err = ice_init_phy_user_cfg(pi);
9129 if (err) {
9130 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9131 err);
9132 return err;
9133 }
9134 }
9135
9136 err = ice_configure_phy(vsi);
9137 if (err) {
9138 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9139 err);
9140 return err;
9141 }
9142 } else {
9143 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9144 ice_set_link(vsi, false);
9145 }
9146
9147 err = ice_vsi_open(vsi);
9148 if (err)
9149 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9150 vsi->vsi_num, vsi->vsw->sw_id);
9151
9152 /* Update existing tunnels information */
9153 udp_tunnel_get_rx_info(netdev);
9154
9155 return err;
9156 }
9157
9158 /**
9159 * ice_stop - Disables a network interface
9160 * @netdev: network interface device structure
9161 *
9162 * The stop entry point is called when an interface is de-activated by the OS,
9163 * and the netdevice enters the DOWN state. The hardware is still under the
9164 * driver's control, but the netdev interface is disabled.
9165 *
9166 * Returns success only - not allowed to fail
9167 */
ice_stop(struct net_device * netdev)9168 int ice_stop(struct net_device *netdev)
9169 {
9170 struct ice_netdev_priv *np = netdev_priv(netdev);
9171 struct ice_vsi *vsi = np->vsi;
9172 struct ice_pf *pf = vsi->back;
9173
9174 if (ice_is_reset_in_progress(pf->state)) {
9175 netdev_err(netdev, "can't stop net device while reset is in progress");
9176 return -EBUSY;
9177 }
9178
9179 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9180 int link_err = ice_force_phys_link_state(vsi, false);
9181
9182 if (link_err) {
9183 if (link_err == -ENOMEDIUM)
9184 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9185 vsi->vsi_num);
9186 else
9187 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9188 vsi->vsi_num, link_err);
9189
9190 ice_vsi_close(vsi);
9191 return -EIO;
9192 }
9193 }
9194
9195 ice_vsi_close(vsi);
9196
9197 return 0;
9198 }
9199
9200 /**
9201 * ice_features_check - Validate encapsulated packet conforms to limits
9202 * @skb: skb buffer
9203 * @netdev: This port's netdev
9204 * @features: Offload features that the stack believes apply
9205 */
9206 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9207 ice_features_check(struct sk_buff *skb,
9208 struct net_device __always_unused *netdev,
9209 netdev_features_t features)
9210 {
9211 bool gso = skb_is_gso(skb);
9212 size_t len;
9213
9214 /* No point in doing any of this if neither checksum nor GSO are
9215 * being requested for this frame. We can rule out both by just
9216 * checking for CHECKSUM_PARTIAL
9217 */
9218 if (skb->ip_summed != CHECKSUM_PARTIAL)
9219 return features;
9220
9221 /* We cannot support GSO if the MSS is going to be less than
9222 * 64 bytes. If it is then we need to drop support for GSO.
9223 */
9224 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9225 features &= ~NETIF_F_GSO_MASK;
9226
9227 len = skb_network_offset(skb);
9228 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9229 goto out_rm_features;
9230
9231 len = skb_network_header_len(skb);
9232 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9233 goto out_rm_features;
9234
9235 if (skb->encapsulation) {
9236 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9237 * the case of IPIP frames, the transport header pointer is
9238 * after the inner header! So check to make sure that this
9239 * is a GRE or UDP_TUNNEL frame before doing that math.
9240 */
9241 if (gso && (skb_shinfo(skb)->gso_type &
9242 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9243 len = skb_inner_network_header(skb) -
9244 skb_transport_header(skb);
9245 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9246 goto out_rm_features;
9247 }
9248
9249 len = skb_inner_network_header_len(skb);
9250 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9251 goto out_rm_features;
9252 }
9253
9254 return features;
9255 out_rm_features:
9256 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9257 }
9258
9259 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9260 .ndo_open = ice_open,
9261 .ndo_stop = ice_stop,
9262 .ndo_start_xmit = ice_start_xmit,
9263 .ndo_set_mac_address = ice_set_mac_address,
9264 .ndo_validate_addr = eth_validate_addr,
9265 .ndo_change_mtu = ice_change_mtu,
9266 .ndo_get_stats64 = ice_get_stats64,
9267 .ndo_tx_timeout = ice_tx_timeout,
9268 .ndo_bpf = ice_xdp_safe_mode,
9269 };
9270
9271 static const struct net_device_ops ice_netdev_ops = {
9272 .ndo_open = ice_open,
9273 .ndo_stop = ice_stop,
9274 .ndo_start_xmit = ice_start_xmit,
9275 .ndo_select_queue = ice_select_queue,
9276 .ndo_features_check = ice_features_check,
9277 .ndo_fix_features = ice_fix_features,
9278 .ndo_set_rx_mode = ice_set_rx_mode,
9279 .ndo_set_mac_address = ice_set_mac_address,
9280 .ndo_validate_addr = eth_validate_addr,
9281 .ndo_change_mtu = ice_change_mtu,
9282 .ndo_get_stats64 = ice_get_stats64,
9283 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9284 .ndo_eth_ioctl = ice_eth_ioctl,
9285 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9286 .ndo_set_vf_mac = ice_set_vf_mac,
9287 .ndo_get_vf_config = ice_get_vf_cfg,
9288 .ndo_set_vf_trust = ice_set_vf_trust,
9289 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9290 .ndo_set_vf_link_state = ice_set_vf_link_state,
9291 .ndo_get_vf_stats = ice_get_vf_stats,
9292 .ndo_set_vf_rate = ice_set_vf_bw,
9293 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9294 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9295 .ndo_setup_tc = ice_setup_tc,
9296 .ndo_set_features = ice_set_features,
9297 .ndo_bridge_getlink = ice_bridge_getlink,
9298 .ndo_bridge_setlink = ice_bridge_setlink,
9299 .ndo_fdb_add = ice_fdb_add,
9300 .ndo_fdb_del = ice_fdb_del,
9301 #ifdef CONFIG_RFS_ACCEL
9302 .ndo_rx_flow_steer = ice_rx_flow_steer,
9303 #endif
9304 .ndo_tx_timeout = ice_tx_timeout,
9305 .ndo_bpf = ice_xdp,
9306 .ndo_xdp_xmit = ice_xdp_xmit,
9307 .ndo_xsk_wakeup = ice_xsk_wakeup,
9308 };
9309