1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
18 * ice tracepoint functions. This must be done exactly once across the
19 * ice driver.
20 */
21 #define CREATE_TRACE_POINTS
22 #include "ice_trace.h"
23 #include "ice_eswitch.h"
24 #include "ice_tc_lib.h"
25 #include "ice_vsi_vlan_ops.h"
26 #include <net/xdp_sock_drv.h>
27
28 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
29 static const char ice_driver_string[] = DRV_SUMMARY;
30 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
31
32 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
33 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
34 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
35
36 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
37 MODULE_DESCRIPTION(DRV_SUMMARY);
38 MODULE_LICENSE("GPL v2");
39 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
40
41 static int debug = -1;
42 module_param(debug, int, 0644);
43 #ifndef CONFIG_DYNAMIC_DEBUG
44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
45 #else
46 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
47 #endif /* !CONFIG_DYNAMIC_DEBUG */
48
49 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
50 EXPORT_SYMBOL(ice_xdp_locking_key);
51
52 /**
53 * ice_hw_to_dev - Get device pointer from the hardware structure
54 * @hw: pointer to the device HW structure
55 *
56 * Used to access the device pointer from compilation units which can't easily
57 * include the definition of struct ice_pf without leading to circular header
58 * dependencies.
59 */
ice_hw_to_dev(struct ice_hw * hw)60 struct device *ice_hw_to_dev(struct ice_hw *hw)
61 {
62 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
63
64 return &pf->pdev->dev;
65 }
66
67 static struct workqueue_struct *ice_wq;
68 struct workqueue_struct *ice_lag_wq;
69 static const struct net_device_ops ice_netdev_safe_mode_ops;
70 static const struct net_device_ops ice_netdev_ops;
71
72 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
73
74 static void ice_vsi_release_all(struct ice_pf *pf);
75
76 static int ice_rebuild_channels(struct ice_pf *pf);
77 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
78
79 static int
80 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
81 void *cb_priv, enum tc_setup_type type, void *type_data,
82 void *data,
83 void (*cleanup)(struct flow_block_cb *block_cb));
84
netif_is_ice(const struct net_device * dev)85 bool netif_is_ice(const struct net_device *dev)
86 {
87 return dev && (dev->netdev_ops == &ice_netdev_ops);
88 }
89
90 /**
91 * ice_get_tx_pending - returns number of Tx descriptors not processed
92 * @ring: the ring of descriptors
93 */
ice_get_tx_pending(struct ice_tx_ring * ring)94 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
95 {
96 u16 head, tail;
97
98 head = ring->next_to_clean;
99 tail = ring->next_to_use;
100
101 if (head != tail)
102 return (head < tail) ?
103 tail - head : (tail + ring->count - head);
104 return 0;
105 }
106
107 /**
108 * ice_check_for_hang_subtask - check for and recover hung queues
109 * @pf: pointer to PF struct
110 */
ice_check_for_hang_subtask(struct ice_pf * pf)111 static void ice_check_for_hang_subtask(struct ice_pf *pf)
112 {
113 struct ice_vsi *vsi = NULL;
114 struct ice_hw *hw;
115 unsigned int i;
116 int packets;
117 u32 v;
118
119 ice_for_each_vsi(pf, v)
120 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
121 vsi = pf->vsi[v];
122 break;
123 }
124
125 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
126 return;
127
128 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
129 return;
130
131 hw = &vsi->back->hw;
132
133 ice_for_each_txq(vsi, i) {
134 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
135 struct ice_ring_stats *ring_stats;
136
137 if (!tx_ring)
138 continue;
139 if (ice_ring_ch_enabled(tx_ring))
140 continue;
141
142 ring_stats = tx_ring->ring_stats;
143 if (!ring_stats)
144 continue;
145
146 if (tx_ring->desc) {
147 /* If packet counter has not changed the queue is
148 * likely stalled, so force an interrupt for this
149 * queue.
150 *
151 * prev_pkt would be negative if there was no
152 * pending work.
153 */
154 packets = ring_stats->stats.pkts & INT_MAX;
155 if (ring_stats->tx_stats.prev_pkt == packets) {
156 /* Trigger sw interrupt to revive the queue */
157 ice_trigger_sw_intr(hw, tx_ring->q_vector);
158 continue;
159 }
160
161 /* Memory barrier between read of packet count and call
162 * to ice_get_tx_pending()
163 */
164 smp_rmb();
165 ring_stats->tx_stats.prev_pkt =
166 ice_get_tx_pending(tx_ring) ? packets : -1;
167 }
168 }
169 }
170
171 /**
172 * ice_init_mac_fltr - Set initial MAC filters
173 * @pf: board private structure
174 *
175 * Set initial set of MAC filters for PF VSI; configure filters for permanent
176 * address and broadcast address. If an error is encountered, netdevice will be
177 * unregistered.
178 */
ice_init_mac_fltr(struct ice_pf * pf)179 static int ice_init_mac_fltr(struct ice_pf *pf)
180 {
181 struct ice_vsi *vsi;
182 u8 *perm_addr;
183
184 vsi = ice_get_main_vsi(pf);
185 if (!vsi)
186 return -EINVAL;
187
188 perm_addr = vsi->port_info->mac.perm_addr;
189 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
190 }
191
192 /**
193 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
194 * @netdev: the net device on which the sync is happening
195 * @addr: MAC address to sync
196 *
197 * This is a callback function which is called by the in kernel device sync
198 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
199 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
200 * MAC filters from the hardware.
201 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)202 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
203 {
204 struct ice_netdev_priv *np = netdev_priv(netdev);
205 struct ice_vsi *vsi = np->vsi;
206
207 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
208 ICE_FWD_TO_VSI))
209 return -EINVAL;
210
211 return 0;
212 }
213
214 /**
215 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
216 * @netdev: the net device on which the unsync is happening
217 * @addr: MAC address to unsync
218 *
219 * This is a callback function which is called by the in kernel device unsync
220 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
221 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
222 * delete the MAC filters from the hardware.
223 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)224 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
225 {
226 struct ice_netdev_priv *np = netdev_priv(netdev);
227 struct ice_vsi *vsi = np->vsi;
228
229 /* Under some circumstances, we might receive a request to delete our
230 * own device address from our uc list. Because we store the device
231 * address in the VSI's MAC filter list, we need to ignore such
232 * requests and not delete our device address from this list.
233 */
234 if (ether_addr_equal(addr, netdev->dev_addr))
235 return 0;
236
237 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
238 ICE_FWD_TO_VSI))
239 return -EINVAL;
240
241 return 0;
242 }
243
244 /**
245 * ice_vsi_fltr_changed - check if filter state changed
246 * @vsi: VSI to be checked
247 *
248 * returns true if filter state has changed, false otherwise.
249 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)250 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
251 {
252 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
253 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
254 }
255
256 /**
257 * ice_set_promisc - Enable promiscuous mode for a given PF
258 * @vsi: the VSI being configured
259 * @promisc_m: mask of promiscuous config bits
260 *
261 */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)262 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
263 {
264 int status;
265
266 if (vsi->type != ICE_VSI_PF)
267 return 0;
268
269 if (ice_vsi_has_non_zero_vlans(vsi)) {
270 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
271 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
272 promisc_m);
273 } else {
274 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
275 promisc_m, 0);
276 }
277 if (status && status != -EEXIST)
278 return status;
279
280 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
281 vsi->vsi_num, promisc_m);
282 return 0;
283 }
284
285 /**
286 * ice_clear_promisc - Disable promiscuous mode for a given PF
287 * @vsi: the VSI being configured
288 * @promisc_m: mask of promiscuous config bits
289 *
290 */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)291 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
292 {
293 int status;
294
295 if (vsi->type != ICE_VSI_PF)
296 return 0;
297
298 if (ice_vsi_has_non_zero_vlans(vsi)) {
299 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
300 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
301 promisc_m);
302 } else {
303 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
304 promisc_m, 0);
305 }
306
307 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
308 vsi->vsi_num, promisc_m);
309 return status;
310 }
311
312 /**
313 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
314 * @vsi: ptr to the VSI
315 *
316 * Push any outstanding VSI filter changes through the AdminQ.
317 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)318 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
319 {
320 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
321 struct device *dev = ice_pf_to_dev(vsi->back);
322 struct net_device *netdev = vsi->netdev;
323 bool promisc_forced_on = false;
324 struct ice_pf *pf = vsi->back;
325 struct ice_hw *hw = &pf->hw;
326 u32 changed_flags = 0;
327 int err;
328
329 if (!vsi->netdev)
330 return -EINVAL;
331
332 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
333 usleep_range(1000, 2000);
334
335 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
336 vsi->current_netdev_flags = vsi->netdev->flags;
337
338 INIT_LIST_HEAD(&vsi->tmp_sync_list);
339 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
340
341 if (ice_vsi_fltr_changed(vsi)) {
342 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
343 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
344
345 /* grab the netdev's addr_list_lock */
346 netif_addr_lock_bh(netdev);
347 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
348 ice_add_mac_to_unsync_list);
349 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
350 ice_add_mac_to_unsync_list);
351 /* our temp lists are populated. release lock */
352 netif_addr_unlock_bh(netdev);
353 }
354
355 /* Remove MAC addresses in the unsync list */
356 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
357 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
358 if (err) {
359 netdev_err(netdev, "Failed to delete MAC filters\n");
360 /* if we failed because of alloc failures, just bail */
361 if (err == -ENOMEM)
362 goto out;
363 }
364
365 /* Add MAC addresses in the sync list */
366 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
367 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
368 /* If filter is added successfully or already exists, do not go into
369 * 'if' condition and report it as error. Instead continue processing
370 * rest of the function.
371 */
372 if (err && err != -EEXIST) {
373 netdev_err(netdev, "Failed to add MAC filters\n");
374 /* If there is no more space for new umac filters, VSI
375 * should go into promiscuous mode. There should be some
376 * space reserved for promiscuous filters.
377 */
378 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
379 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
380 vsi->state)) {
381 promisc_forced_on = true;
382 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
383 vsi->vsi_num);
384 } else {
385 goto out;
386 }
387 }
388 err = 0;
389 /* check for changes in promiscuous modes */
390 if (changed_flags & IFF_ALLMULTI) {
391 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
392 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
393 if (err) {
394 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
395 goto out_promisc;
396 }
397 } else {
398 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
399 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
400 if (err) {
401 vsi->current_netdev_flags |= IFF_ALLMULTI;
402 goto out_promisc;
403 }
404 }
405 }
406
407 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
408 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
409 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
410 if (vsi->current_netdev_flags & IFF_PROMISC) {
411 /* Apply Rx filter rule to get traffic from wire */
412 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
413 err = ice_set_dflt_vsi(vsi);
414 if (err && err != -EEXIST) {
415 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
416 err, vsi->vsi_num);
417 vsi->current_netdev_flags &=
418 ~IFF_PROMISC;
419 goto out_promisc;
420 }
421 err = 0;
422 vlan_ops->dis_rx_filtering(vsi);
423
424 /* promiscuous mode implies allmulticast so
425 * that VSIs that are in promiscuous mode are
426 * subscribed to multicast packets coming to
427 * the port
428 */
429 err = ice_set_promisc(vsi,
430 ICE_MCAST_PROMISC_BITS);
431 if (err)
432 goto out_promisc;
433 }
434 } else {
435 /* Clear Rx filter to remove traffic from wire */
436 if (ice_is_vsi_dflt_vsi(vsi)) {
437 err = ice_clear_dflt_vsi(vsi);
438 if (err) {
439 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
440 err, vsi->vsi_num);
441 vsi->current_netdev_flags |=
442 IFF_PROMISC;
443 goto out_promisc;
444 }
445 if (vsi->netdev->features &
446 NETIF_F_HW_VLAN_CTAG_FILTER)
447 vlan_ops->ena_rx_filtering(vsi);
448 }
449
450 /* disable allmulti here, but only if allmulti is not
451 * still enabled for the netdev
452 */
453 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
454 err = ice_clear_promisc(vsi,
455 ICE_MCAST_PROMISC_BITS);
456 if (err) {
457 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
458 err, vsi->vsi_num);
459 }
460 }
461 }
462 }
463 goto exit;
464
465 out_promisc:
466 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
467 goto exit;
468 out:
469 /* if something went wrong then set the changed flag so we try again */
470 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
471 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
472 exit:
473 clear_bit(ICE_CFG_BUSY, vsi->state);
474 return err;
475 }
476
477 /**
478 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
479 * @pf: board private structure
480 */
ice_sync_fltr_subtask(struct ice_pf * pf)481 static void ice_sync_fltr_subtask(struct ice_pf *pf)
482 {
483 int v;
484
485 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
486 return;
487
488 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
489
490 ice_for_each_vsi(pf, v)
491 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
492 ice_vsi_sync_fltr(pf->vsi[v])) {
493 /* come back and try again later */
494 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
495 break;
496 }
497 }
498
499 /**
500 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
501 * @pf: the PF
502 * @locked: is the rtnl_lock already held
503 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)504 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
505 {
506 int node;
507 int v;
508
509 ice_for_each_vsi(pf, v)
510 if (pf->vsi[v])
511 ice_dis_vsi(pf->vsi[v], locked);
512
513 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
514 pf->pf_agg_node[node].num_vsis = 0;
515
516 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
517 pf->vf_agg_node[node].num_vsis = 0;
518 }
519
520 /**
521 * ice_clear_sw_switch_recipes - clear switch recipes
522 * @pf: board private structure
523 *
524 * Mark switch recipes as not created in sw structures. There are cases where
525 * rules (especially advanced rules) need to be restored, either re-read from
526 * hardware or added again. For example after the reset. 'recp_created' flag
527 * prevents from doing that and need to be cleared upfront.
528 */
ice_clear_sw_switch_recipes(struct ice_pf * pf)529 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
530 {
531 struct ice_sw_recipe *recp;
532 u8 i;
533
534 recp = pf->hw.switch_info->recp_list;
535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
536 recp[i].recp_created = false;
537 }
538
539 /**
540 * ice_prepare_for_reset - prep for reset
541 * @pf: board private structure
542 * @reset_type: reset type requested
543 *
544 * Inform or close all dependent features in prep for reset.
545 */
546 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)547 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
548 {
549 struct ice_hw *hw = &pf->hw;
550 struct ice_vsi *vsi;
551 struct ice_vf *vf;
552 unsigned int bkt;
553
554 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
555
556 /* already prepared for reset */
557 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
558 return;
559
560 synchronize_irq(pf->oicr_irq.virq);
561
562 ice_unplug_aux_dev(pf);
563
564 /* Notify VFs of impending reset */
565 if (ice_check_sq_alive(hw, &hw->mailboxq))
566 ice_vc_notify_reset(pf);
567
568 /* Disable VFs until reset is completed */
569 mutex_lock(&pf->vfs.table_lock);
570 ice_for_each_vf(pf, bkt, vf)
571 ice_set_vf_state_dis(vf);
572 mutex_unlock(&pf->vfs.table_lock);
573
574 if (ice_is_eswitch_mode_switchdev(pf)) {
575 if (reset_type != ICE_RESET_PFR)
576 ice_clear_sw_switch_recipes(pf);
577 }
578
579 /* release ADQ specific HW and SW resources */
580 vsi = ice_get_main_vsi(pf);
581 if (!vsi)
582 goto skip;
583
584 /* to be on safe side, reset orig_rss_size so that normal flow
585 * of deciding rss_size can take precedence
586 */
587 vsi->orig_rss_size = 0;
588
589 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
590 if (reset_type == ICE_RESET_PFR) {
591 vsi->old_ena_tc = vsi->all_enatc;
592 vsi->old_numtc = vsi->all_numtc;
593 } else {
594 ice_remove_q_channels(vsi, true);
595
596 /* for other reset type, do not support channel rebuild
597 * hence reset needed info
598 */
599 vsi->old_ena_tc = 0;
600 vsi->all_enatc = 0;
601 vsi->old_numtc = 0;
602 vsi->all_numtc = 0;
603 vsi->req_txq = 0;
604 vsi->req_rxq = 0;
605 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
606 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
607 }
608 }
609
610 if (vsi->netdev)
611 netif_device_detach(vsi->netdev);
612 skip:
613
614 /* clear SW filtering DB */
615 ice_clear_hw_tbls(hw);
616 /* disable the VSIs and their queues that are not already DOWN */
617 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
618 ice_pf_dis_all_vsi(pf, false);
619
620 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
621 ice_ptp_prepare_for_reset(pf);
622
623 if (ice_is_feature_supported(pf, ICE_F_GNSS))
624 ice_gnss_exit(pf);
625
626 if (hw->port_info)
627 ice_sched_clear_port(hw->port_info);
628
629 ice_shutdown_all_ctrlq(hw);
630
631 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
632 }
633
634 /**
635 * ice_do_reset - Initiate one of many types of resets
636 * @pf: board private structure
637 * @reset_type: reset type requested before this function was called.
638 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)639 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
640 {
641 struct device *dev = ice_pf_to_dev(pf);
642 struct ice_hw *hw = &pf->hw;
643
644 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
645
646 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
647 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
648 reset_type = ICE_RESET_CORER;
649 }
650
651 ice_prepare_for_reset(pf, reset_type);
652
653 /* trigger the reset */
654 if (ice_reset(hw, reset_type)) {
655 dev_err(dev, "reset %d failed\n", reset_type);
656 set_bit(ICE_RESET_FAILED, pf->state);
657 clear_bit(ICE_RESET_OICR_RECV, pf->state);
658 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
659 clear_bit(ICE_PFR_REQ, pf->state);
660 clear_bit(ICE_CORER_REQ, pf->state);
661 clear_bit(ICE_GLOBR_REQ, pf->state);
662 wake_up(&pf->reset_wait_queue);
663 return;
664 }
665
666 /* PFR is a bit of a special case because it doesn't result in an OICR
667 * interrupt. So for PFR, rebuild after the reset and clear the reset-
668 * associated state bits.
669 */
670 if (reset_type == ICE_RESET_PFR) {
671 pf->pfr_count++;
672 ice_rebuild(pf, reset_type);
673 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
674 clear_bit(ICE_PFR_REQ, pf->state);
675 wake_up(&pf->reset_wait_queue);
676 ice_reset_all_vfs(pf);
677 }
678 }
679
680 /**
681 * ice_reset_subtask - Set up for resetting the device and driver
682 * @pf: board private structure
683 */
ice_reset_subtask(struct ice_pf * pf)684 static void ice_reset_subtask(struct ice_pf *pf)
685 {
686 enum ice_reset_req reset_type = ICE_RESET_INVAL;
687
688 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
689 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
690 * of reset is pending and sets bits in pf->state indicating the reset
691 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
692 * prepare for pending reset if not already (for PF software-initiated
693 * global resets the software should already be prepared for it as
694 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
695 * by firmware or software on other PFs, that bit is not set so prepare
696 * for the reset now), poll for reset done, rebuild and return.
697 */
698 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
699 /* Perform the largest reset requested */
700 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
701 reset_type = ICE_RESET_CORER;
702 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
703 reset_type = ICE_RESET_GLOBR;
704 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
705 reset_type = ICE_RESET_EMPR;
706 /* return if no valid reset type requested */
707 if (reset_type == ICE_RESET_INVAL)
708 return;
709 ice_prepare_for_reset(pf, reset_type);
710
711 /* make sure we are ready to rebuild */
712 if (ice_check_reset(&pf->hw)) {
713 set_bit(ICE_RESET_FAILED, pf->state);
714 } else {
715 /* done with reset. start rebuild */
716 pf->hw.reset_ongoing = false;
717 ice_rebuild(pf, reset_type);
718 /* clear bit to resume normal operations, but
719 * ICE_NEEDS_RESTART bit is set in case rebuild failed
720 */
721 clear_bit(ICE_RESET_OICR_RECV, pf->state);
722 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
723 clear_bit(ICE_PFR_REQ, pf->state);
724 clear_bit(ICE_CORER_REQ, pf->state);
725 clear_bit(ICE_GLOBR_REQ, pf->state);
726 wake_up(&pf->reset_wait_queue);
727 ice_reset_all_vfs(pf);
728 }
729
730 return;
731 }
732
733 /* No pending resets to finish processing. Check for new resets */
734 if (test_bit(ICE_PFR_REQ, pf->state)) {
735 reset_type = ICE_RESET_PFR;
736 if (pf->lag && pf->lag->bonded) {
737 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
738 reset_type = ICE_RESET_CORER;
739 }
740 }
741 if (test_bit(ICE_CORER_REQ, pf->state))
742 reset_type = ICE_RESET_CORER;
743 if (test_bit(ICE_GLOBR_REQ, pf->state))
744 reset_type = ICE_RESET_GLOBR;
745 /* If no valid reset type requested just return */
746 if (reset_type == ICE_RESET_INVAL)
747 return;
748
749 /* reset if not already down or busy */
750 if (!test_bit(ICE_DOWN, pf->state) &&
751 !test_bit(ICE_CFG_BUSY, pf->state)) {
752 ice_do_reset(pf, reset_type);
753 }
754 }
755
756 /**
757 * ice_print_topo_conflict - print topology conflict message
758 * @vsi: the VSI whose topology status is being checked
759 */
ice_print_topo_conflict(struct ice_vsi * vsi)760 static void ice_print_topo_conflict(struct ice_vsi *vsi)
761 {
762 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
763 case ICE_AQ_LINK_TOPO_CONFLICT:
764 case ICE_AQ_LINK_MEDIA_CONFLICT:
765 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
766 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
767 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
768 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
769 break;
770 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
771 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
772 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
773 else
774 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
775 break;
776 default:
777 break;
778 }
779 }
780
781 /**
782 * ice_print_link_msg - print link up or down message
783 * @vsi: the VSI whose link status is being queried
784 * @isup: boolean for if the link is now up or down
785 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)786 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
787 {
788 struct ice_aqc_get_phy_caps_data *caps;
789 const char *an_advertised;
790 const char *fec_req;
791 const char *speed;
792 const char *fec;
793 const char *fc;
794 const char *an;
795 int status;
796
797 if (!vsi)
798 return;
799
800 if (vsi->current_isup == isup)
801 return;
802
803 vsi->current_isup = isup;
804
805 if (!isup) {
806 netdev_info(vsi->netdev, "NIC Link is Down\n");
807 return;
808 }
809
810 switch (vsi->port_info->phy.link_info.link_speed) {
811 case ICE_AQ_LINK_SPEED_100GB:
812 speed = "100 G";
813 break;
814 case ICE_AQ_LINK_SPEED_50GB:
815 speed = "50 G";
816 break;
817 case ICE_AQ_LINK_SPEED_40GB:
818 speed = "40 G";
819 break;
820 case ICE_AQ_LINK_SPEED_25GB:
821 speed = "25 G";
822 break;
823 case ICE_AQ_LINK_SPEED_20GB:
824 speed = "20 G";
825 break;
826 case ICE_AQ_LINK_SPEED_10GB:
827 speed = "10 G";
828 break;
829 case ICE_AQ_LINK_SPEED_5GB:
830 speed = "5 G";
831 break;
832 case ICE_AQ_LINK_SPEED_2500MB:
833 speed = "2.5 G";
834 break;
835 case ICE_AQ_LINK_SPEED_1000MB:
836 speed = "1 G";
837 break;
838 case ICE_AQ_LINK_SPEED_100MB:
839 speed = "100 M";
840 break;
841 default:
842 speed = "Unknown ";
843 break;
844 }
845
846 switch (vsi->port_info->fc.current_mode) {
847 case ICE_FC_FULL:
848 fc = "Rx/Tx";
849 break;
850 case ICE_FC_TX_PAUSE:
851 fc = "Tx";
852 break;
853 case ICE_FC_RX_PAUSE:
854 fc = "Rx";
855 break;
856 case ICE_FC_NONE:
857 fc = "None";
858 break;
859 default:
860 fc = "Unknown";
861 break;
862 }
863
864 /* Get FEC mode based on negotiated link info */
865 switch (vsi->port_info->phy.link_info.fec_info) {
866 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
867 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
868 fec = "RS-FEC";
869 break;
870 case ICE_AQ_LINK_25G_KR_FEC_EN:
871 fec = "FC-FEC/BASE-R";
872 break;
873 default:
874 fec = "NONE";
875 break;
876 }
877
878 /* check if autoneg completed, might be false due to not supported */
879 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
880 an = "True";
881 else
882 an = "False";
883
884 /* Get FEC mode requested based on PHY caps last SW configuration */
885 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
886 if (!caps) {
887 fec_req = "Unknown";
888 an_advertised = "Unknown";
889 goto done;
890 }
891
892 status = ice_aq_get_phy_caps(vsi->port_info, false,
893 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
894 if (status)
895 netdev_info(vsi->netdev, "Get phy capability failed.\n");
896
897 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
898
899 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
900 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
901 fec_req = "RS-FEC";
902 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
903 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
904 fec_req = "FC-FEC/BASE-R";
905 else
906 fec_req = "NONE";
907
908 kfree(caps);
909
910 done:
911 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
912 speed, fec_req, fec, an_advertised, an, fc);
913 ice_print_topo_conflict(vsi);
914 }
915
916 /**
917 * ice_vsi_link_event - update the VSI's netdev
918 * @vsi: the VSI on which the link event occurred
919 * @link_up: whether or not the VSI needs to be set up or down
920 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)921 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
922 {
923 if (!vsi)
924 return;
925
926 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
927 return;
928
929 if (vsi->type == ICE_VSI_PF) {
930 if (link_up == netif_carrier_ok(vsi->netdev))
931 return;
932
933 if (link_up) {
934 netif_carrier_on(vsi->netdev);
935 netif_tx_wake_all_queues(vsi->netdev);
936 } else {
937 netif_carrier_off(vsi->netdev);
938 netif_tx_stop_all_queues(vsi->netdev);
939 }
940 }
941 }
942
943 /**
944 * ice_set_dflt_mib - send a default config MIB to the FW
945 * @pf: private PF struct
946 *
947 * This function sends a default configuration MIB to the FW.
948 *
949 * If this function errors out at any point, the driver is still able to
950 * function. The main impact is that LFC may not operate as expected.
951 * Therefore an error state in this function should be treated with a DBG
952 * message and continue on with driver rebuild/reenable.
953 */
ice_set_dflt_mib(struct ice_pf * pf)954 static void ice_set_dflt_mib(struct ice_pf *pf)
955 {
956 struct device *dev = ice_pf_to_dev(pf);
957 u8 mib_type, *buf, *lldpmib = NULL;
958 u16 len, typelen, offset = 0;
959 struct ice_lldp_org_tlv *tlv;
960 struct ice_hw *hw = &pf->hw;
961 u32 ouisubtype;
962
963 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
964 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
965 if (!lldpmib) {
966 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
967 __func__);
968 return;
969 }
970
971 /* Add ETS CFG TLV */
972 tlv = (struct ice_lldp_org_tlv *)lldpmib;
973 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
974 ICE_IEEE_ETS_TLV_LEN);
975 tlv->typelen = htons(typelen);
976 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
977 ICE_IEEE_SUBTYPE_ETS_CFG);
978 tlv->ouisubtype = htonl(ouisubtype);
979
980 buf = tlv->tlvinfo;
981 buf[0] = 0;
982
983 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
984 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
985 * Octets 13 - 20 are TSA values - leave as zeros
986 */
987 buf[5] = 0x64;
988 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
989 offset += len + 2;
990 tlv = (struct ice_lldp_org_tlv *)
991 ((char *)tlv + sizeof(tlv->typelen) + len);
992
993 /* Add ETS REC TLV */
994 buf = tlv->tlvinfo;
995 tlv->typelen = htons(typelen);
996
997 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
998 ICE_IEEE_SUBTYPE_ETS_REC);
999 tlv->ouisubtype = htonl(ouisubtype);
1000
1001 /* First octet of buf is reserved
1002 * Octets 1 - 4 map UP to TC - all UPs map to zero
1003 * Octets 5 - 12 are BW values - set TC 0 to 100%.
1004 * Octets 13 - 20 are TSA value - leave as zeros
1005 */
1006 buf[5] = 0x64;
1007 offset += len + 2;
1008 tlv = (struct ice_lldp_org_tlv *)
1009 ((char *)tlv + sizeof(tlv->typelen) + len);
1010
1011 /* Add PFC CFG TLV */
1012 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1013 ICE_IEEE_PFC_TLV_LEN);
1014 tlv->typelen = htons(typelen);
1015
1016 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1017 ICE_IEEE_SUBTYPE_PFC_CFG);
1018 tlv->ouisubtype = htonl(ouisubtype);
1019
1020 /* Octet 1 left as all zeros - PFC disabled */
1021 buf[0] = 0x08;
1022 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1023 offset += len + 2;
1024
1025 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1026 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1027
1028 kfree(lldpmib);
1029 }
1030
1031 /**
1032 * ice_check_phy_fw_load - check if PHY FW load failed
1033 * @pf: pointer to PF struct
1034 * @link_cfg_err: bitmap from the link info structure
1035 *
1036 * check if external PHY FW load failed and print an error message if it did
1037 */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1038 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1039 {
1040 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1041 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1042 return;
1043 }
1044
1045 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1046 return;
1047
1048 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1049 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1050 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1051 }
1052 }
1053
1054 /**
1055 * ice_check_module_power
1056 * @pf: pointer to PF struct
1057 * @link_cfg_err: bitmap from the link info structure
1058 *
1059 * check module power level returned by a previous call to aq_get_link_info
1060 * and print error messages if module power level is not supported
1061 */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1062 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1063 {
1064 /* if module power level is supported, clear the flag */
1065 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1066 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1067 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1068 return;
1069 }
1070
1071 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1072 * above block didn't clear this bit, there's nothing to do
1073 */
1074 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1075 return;
1076
1077 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1078 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1079 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1080 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1081 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1082 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1083 }
1084 }
1085
1086 /**
1087 * ice_check_link_cfg_err - check if link configuration failed
1088 * @pf: pointer to the PF struct
1089 * @link_cfg_err: bitmap from the link info structure
1090 *
1091 * print if any link configuration failure happens due to the value in the
1092 * link_cfg_err parameter in the link info structure
1093 */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1094 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1095 {
1096 ice_check_module_power(pf, link_cfg_err);
1097 ice_check_phy_fw_load(pf, link_cfg_err);
1098 }
1099
1100 /**
1101 * ice_link_event - process the link event
1102 * @pf: PF that the link event is associated with
1103 * @pi: port_info for the port that the link event is associated with
1104 * @link_up: true if the physical link is up and false if it is down
1105 * @link_speed: current link speed received from the link event
1106 *
1107 * Returns 0 on success and negative on failure
1108 */
1109 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1110 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1111 u16 link_speed)
1112 {
1113 struct device *dev = ice_pf_to_dev(pf);
1114 struct ice_phy_info *phy_info;
1115 struct ice_vsi *vsi;
1116 u16 old_link_speed;
1117 bool old_link;
1118 int status;
1119
1120 phy_info = &pi->phy;
1121 phy_info->link_info_old = phy_info->link_info;
1122
1123 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1124 old_link_speed = phy_info->link_info_old.link_speed;
1125
1126 /* update the link info structures and re-enable link events,
1127 * don't bail on failure due to other book keeping needed
1128 */
1129 status = ice_update_link_info(pi);
1130 if (status)
1131 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1132 pi->lport, status,
1133 ice_aq_str(pi->hw->adminq.sq_last_status));
1134
1135 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1136
1137 /* Check if the link state is up after updating link info, and treat
1138 * this event as an UP event since the link is actually UP now.
1139 */
1140 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1141 link_up = true;
1142
1143 vsi = ice_get_main_vsi(pf);
1144 if (!vsi || !vsi->port_info)
1145 return -EINVAL;
1146
1147 /* turn off PHY if media was removed */
1148 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1149 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1150 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1151 ice_set_link(vsi, false);
1152 }
1153
1154 /* if the old link up/down and speed is the same as the new */
1155 if (link_up == old_link && link_speed == old_link_speed)
1156 return 0;
1157
1158 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1159
1160 if (ice_is_dcb_active(pf)) {
1161 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1162 ice_dcb_rebuild(pf);
1163 } else {
1164 if (link_up)
1165 ice_set_dflt_mib(pf);
1166 }
1167 ice_vsi_link_event(vsi, link_up);
1168 ice_print_link_msg(vsi, link_up);
1169
1170 ice_vc_notify_link_state(pf);
1171
1172 return 0;
1173 }
1174
1175 /**
1176 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1177 * @pf: board private structure
1178 */
ice_watchdog_subtask(struct ice_pf * pf)1179 static void ice_watchdog_subtask(struct ice_pf *pf)
1180 {
1181 int i;
1182
1183 /* if interface is down do nothing */
1184 if (test_bit(ICE_DOWN, pf->state) ||
1185 test_bit(ICE_CFG_BUSY, pf->state))
1186 return;
1187
1188 /* make sure we don't do these things too often */
1189 if (time_before(jiffies,
1190 pf->serv_tmr_prev + pf->serv_tmr_period))
1191 return;
1192
1193 pf->serv_tmr_prev = jiffies;
1194
1195 /* Update the stats for active netdevs so the network stack
1196 * can look at updated numbers whenever it cares to
1197 */
1198 ice_update_pf_stats(pf);
1199 ice_for_each_vsi(pf, i)
1200 if (pf->vsi[i] && pf->vsi[i]->netdev)
1201 ice_update_vsi_stats(pf->vsi[i]);
1202 }
1203
1204 /**
1205 * ice_init_link_events - enable/initialize link events
1206 * @pi: pointer to the port_info instance
1207 *
1208 * Returns -EIO on failure, 0 on success
1209 */
ice_init_link_events(struct ice_port_info * pi)1210 static int ice_init_link_events(struct ice_port_info *pi)
1211 {
1212 u16 mask;
1213
1214 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1215 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1216 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1217
1218 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1219 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1220 pi->lport);
1221 return -EIO;
1222 }
1223
1224 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1225 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1226 pi->lport);
1227 return -EIO;
1228 }
1229
1230 return 0;
1231 }
1232
1233 /**
1234 * ice_handle_link_event - handle link event via ARQ
1235 * @pf: PF that the link event is associated with
1236 * @event: event structure containing link status info
1237 */
1238 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1239 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1240 {
1241 struct ice_aqc_get_link_status_data *link_data;
1242 struct ice_port_info *port_info;
1243 int status;
1244
1245 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1246 port_info = pf->hw.port_info;
1247 if (!port_info)
1248 return -EINVAL;
1249
1250 status = ice_link_event(pf, port_info,
1251 !!(link_data->link_info & ICE_AQ_LINK_UP),
1252 le16_to_cpu(link_data->link_speed));
1253 if (status)
1254 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1255 status);
1256
1257 return status;
1258 }
1259
1260 /**
1261 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1262 * @pf: pointer to the PF private structure
1263 * @task: intermediate helper storage and identifier for waiting
1264 * @opcode: the opcode to wait for
1265 *
1266 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1267 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1268 *
1269 * Calls are separated to allow caller registering for event before sending
1270 * the command, which mitigates a race between registering and FW responding.
1271 *
1272 * To obtain only the descriptor contents, pass an task->event with null
1273 * msg_buf. If the complete data buffer is desired, allocate the
1274 * task->event.msg_buf with enough space ahead of time.
1275 */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1276 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1277 u16 opcode)
1278 {
1279 INIT_HLIST_NODE(&task->entry);
1280 task->opcode = opcode;
1281 task->state = ICE_AQ_TASK_WAITING;
1282
1283 spin_lock_bh(&pf->aq_wait_lock);
1284 hlist_add_head(&task->entry, &pf->aq_wait_list);
1285 spin_unlock_bh(&pf->aq_wait_lock);
1286 }
1287
1288 /**
1289 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1290 * @pf: pointer to the PF private structure
1291 * @task: ptr prepared by ice_aq_prep_for_event()
1292 * @timeout: how long to wait, in jiffies
1293 *
1294 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1295 * current thread will be put to sleep until the specified event occurs or
1296 * until the given timeout is reached.
1297 *
1298 * Returns: zero on success, or a negative error code on failure.
1299 */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1300 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1301 unsigned long timeout)
1302 {
1303 enum ice_aq_task_state *state = &task->state;
1304 struct device *dev = ice_pf_to_dev(pf);
1305 unsigned long start = jiffies;
1306 long ret;
1307 int err;
1308
1309 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1310 *state != ICE_AQ_TASK_WAITING,
1311 timeout);
1312 switch (*state) {
1313 case ICE_AQ_TASK_NOT_PREPARED:
1314 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1315 err = -EINVAL;
1316 break;
1317 case ICE_AQ_TASK_WAITING:
1318 err = ret < 0 ? ret : -ETIMEDOUT;
1319 break;
1320 case ICE_AQ_TASK_CANCELED:
1321 err = ret < 0 ? ret : -ECANCELED;
1322 break;
1323 case ICE_AQ_TASK_COMPLETE:
1324 err = ret < 0 ? ret : 0;
1325 break;
1326 default:
1327 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1328 err = -EINVAL;
1329 break;
1330 }
1331
1332 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1333 jiffies_to_msecs(jiffies - start),
1334 jiffies_to_msecs(timeout),
1335 task->opcode);
1336
1337 spin_lock_bh(&pf->aq_wait_lock);
1338 hlist_del(&task->entry);
1339 spin_unlock_bh(&pf->aq_wait_lock);
1340
1341 return err;
1342 }
1343
1344 /**
1345 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1346 * @pf: pointer to the PF private structure
1347 * @opcode: the opcode of the event
1348 * @event: the event to check
1349 *
1350 * Loops over the current list of pending threads waiting for an AdminQ event.
1351 * For each matching task, copy the contents of the event into the task
1352 * structure and wake up the thread.
1353 *
1354 * If multiple threads wait for the same opcode, they will all be woken up.
1355 *
1356 * Note that event->msg_buf will only be duplicated if the event has a buffer
1357 * with enough space already allocated. Otherwise, only the descriptor and
1358 * message length will be copied.
1359 *
1360 * Returns: true if an event was found, false otherwise
1361 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1362 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1363 struct ice_rq_event_info *event)
1364 {
1365 struct ice_rq_event_info *task_ev;
1366 struct ice_aq_task *task;
1367 bool found = false;
1368
1369 spin_lock_bh(&pf->aq_wait_lock);
1370 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1371 if (task->state != ICE_AQ_TASK_WAITING)
1372 continue;
1373 if (task->opcode != opcode)
1374 continue;
1375
1376 task_ev = &task->event;
1377 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1378 task_ev->msg_len = event->msg_len;
1379
1380 /* Only copy the data buffer if a destination was set */
1381 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1382 memcpy(task_ev->msg_buf, event->msg_buf,
1383 event->buf_len);
1384 task_ev->buf_len = event->buf_len;
1385 }
1386
1387 task->state = ICE_AQ_TASK_COMPLETE;
1388 found = true;
1389 }
1390 spin_unlock_bh(&pf->aq_wait_lock);
1391
1392 if (found)
1393 wake_up(&pf->aq_wait_queue);
1394 }
1395
1396 /**
1397 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1398 * @pf: the PF private structure
1399 *
1400 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1401 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1402 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1403 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1404 {
1405 struct ice_aq_task *task;
1406
1407 spin_lock_bh(&pf->aq_wait_lock);
1408 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1409 task->state = ICE_AQ_TASK_CANCELED;
1410 spin_unlock_bh(&pf->aq_wait_lock);
1411
1412 wake_up(&pf->aq_wait_queue);
1413 }
1414
1415 #define ICE_MBX_OVERFLOW_WATERMARK 64
1416
1417 /**
1418 * __ice_clean_ctrlq - helper function to clean controlq rings
1419 * @pf: ptr to struct ice_pf
1420 * @q_type: specific Control queue type
1421 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1422 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1423 {
1424 struct device *dev = ice_pf_to_dev(pf);
1425 struct ice_rq_event_info event;
1426 struct ice_hw *hw = &pf->hw;
1427 struct ice_ctl_q_info *cq;
1428 u16 pending, i = 0;
1429 const char *qtype;
1430 u32 oldval, val;
1431
1432 /* Do not clean control queue if/when PF reset fails */
1433 if (test_bit(ICE_RESET_FAILED, pf->state))
1434 return 0;
1435
1436 switch (q_type) {
1437 case ICE_CTL_Q_ADMIN:
1438 cq = &hw->adminq;
1439 qtype = "Admin";
1440 break;
1441 case ICE_CTL_Q_SB:
1442 cq = &hw->sbq;
1443 qtype = "Sideband";
1444 break;
1445 case ICE_CTL_Q_MAILBOX:
1446 cq = &hw->mailboxq;
1447 qtype = "Mailbox";
1448 /* we are going to try to detect a malicious VF, so set the
1449 * state to begin detection
1450 */
1451 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1452 break;
1453 default:
1454 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1455 return 0;
1456 }
1457
1458 /* check for error indications - PF_xx_AxQLEN register layout for
1459 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1460 */
1461 val = rd32(hw, cq->rq.len);
1462 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1463 PF_FW_ARQLEN_ARQCRIT_M)) {
1464 oldval = val;
1465 if (val & PF_FW_ARQLEN_ARQVFE_M)
1466 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1467 qtype);
1468 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1469 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1470 qtype);
1471 }
1472 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1473 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1474 qtype);
1475 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1476 PF_FW_ARQLEN_ARQCRIT_M);
1477 if (oldval != val)
1478 wr32(hw, cq->rq.len, val);
1479 }
1480
1481 val = rd32(hw, cq->sq.len);
1482 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1483 PF_FW_ATQLEN_ATQCRIT_M)) {
1484 oldval = val;
1485 if (val & PF_FW_ATQLEN_ATQVFE_M)
1486 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1487 qtype);
1488 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1489 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1490 qtype);
1491 }
1492 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1493 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1494 qtype);
1495 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1496 PF_FW_ATQLEN_ATQCRIT_M);
1497 if (oldval != val)
1498 wr32(hw, cq->sq.len, val);
1499 }
1500
1501 event.buf_len = cq->rq_buf_size;
1502 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1503 if (!event.msg_buf)
1504 return 0;
1505
1506 do {
1507 struct ice_mbx_data data = {};
1508 u16 opcode;
1509 int ret;
1510
1511 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1512 if (ret == -EALREADY)
1513 break;
1514 if (ret) {
1515 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1516 ret);
1517 break;
1518 }
1519
1520 opcode = le16_to_cpu(event.desc.opcode);
1521
1522 /* Notify any thread that might be waiting for this event */
1523 ice_aq_check_events(pf, opcode, &event);
1524
1525 switch (opcode) {
1526 case ice_aqc_opc_get_link_status:
1527 if (ice_handle_link_event(pf, &event))
1528 dev_err(dev, "Could not handle link event\n");
1529 break;
1530 case ice_aqc_opc_event_lan_overflow:
1531 ice_vf_lan_overflow_event(pf, &event);
1532 break;
1533 case ice_mbx_opc_send_msg_to_pf:
1534 data.num_msg_proc = i;
1535 data.num_pending_arq = pending;
1536 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1537 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1538
1539 ice_vc_process_vf_msg(pf, &event, &data);
1540 break;
1541 case ice_aqc_opc_fw_logging:
1542 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1543 break;
1544 case ice_aqc_opc_lldp_set_mib_change:
1545 ice_dcb_process_lldp_set_mib_change(pf, &event);
1546 break;
1547 default:
1548 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1549 qtype, opcode);
1550 break;
1551 }
1552 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1553
1554 kfree(event.msg_buf);
1555
1556 return pending && (i == ICE_DFLT_IRQ_WORK);
1557 }
1558
1559 /**
1560 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1561 * @hw: pointer to hardware info
1562 * @cq: control queue information
1563 *
1564 * returns true if there are pending messages in a queue, false if there aren't
1565 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1566 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1567 {
1568 u16 ntu;
1569
1570 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1571 return cq->rq.next_to_clean != ntu;
1572 }
1573
1574 /**
1575 * ice_clean_adminq_subtask - clean the AdminQ rings
1576 * @pf: board private structure
1577 */
ice_clean_adminq_subtask(struct ice_pf * pf)1578 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1579 {
1580 struct ice_hw *hw = &pf->hw;
1581
1582 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1583 return;
1584
1585 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1586 return;
1587
1588 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1589
1590 /* There might be a situation where new messages arrive to a control
1591 * queue between processing the last message and clearing the
1592 * EVENT_PENDING bit. So before exiting, check queue head again (using
1593 * ice_ctrlq_pending) and process new messages if any.
1594 */
1595 if (ice_ctrlq_pending(hw, &hw->adminq))
1596 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1597
1598 ice_flush(hw);
1599 }
1600
1601 /**
1602 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1603 * @pf: board private structure
1604 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1605 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1606 {
1607 struct ice_hw *hw = &pf->hw;
1608
1609 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1610 return;
1611
1612 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1613 return;
1614
1615 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1616
1617 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1618 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1619
1620 ice_flush(hw);
1621 }
1622
1623 /**
1624 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1625 * @pf: board private structure
1626 */
ice_clean_sbq_subtask(struct ice_pf * pf)1627 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1628 {
1629 struct ice_hw *hw = &pf->hw;
1630
1631 /* Nothing to do here if sideband queue is not supported */
1632 if (!ice_is_sbq_supported(hw)) {
1633 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1634 return;
1635 }
1636
1637 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1638 return;
1639
1640 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1641 return;
1642
1643 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1644
1645 if (ice_ctrlq_pending(hw, &hw->sbq))
1646 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1647
1648 ice_flush(hw);
1649 }
1650
1651 /**
1652 * ice_service_task_schedule - schedule the service task to wake up
1653 * @pf: board private structure
1654 *
1655 * If not already scheduled, this puts the task into the work queue.
1656 */
ice_service_task_schedule(struct ice_pf * pf)1657 void ice_service_task_schedule(struct ice_pf *pf)
1658 {
1659 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1660 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1661 !test_bit(ICE_NEEDS_RESTART, pf->state))
1662 queue_work(ice_wq, &pf->serv_task);
1663 }
1664
1665 /**
1666 * ice_service_task_complete - finish up the service task
1667 * @pf: board private structure
1668 */
ice_service_task_complete(struct ice_pf * pf)1669 static void ice_service_task_complete(struct ice_pf *pf)
1670 {
1671 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1672
1673 /* force memory (pf->state) to sync before next service task */
1674 smp_mb__before_atomic();
1675 clear_bit(ICE_SERVICE_SCHED, pf->state);
1676 }
1677
1678 /**
1679 * ice_service_task_stop - stop service task and cancel works
1680 * @pf: board private structure
1681 *
1682 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1683 * 1 otherwise.
1684 */
ice_service_task_stop(struct ice_pf * pf)1685 static int ice_service_task_stop(struct ice_pf *pf)
1686 {
1687 int ret;
1688
1689 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1690
1691 if (pf->serv_tmr.function)
1692 del_timer_sync(&pf->serv_tmr);
1693 if (pf->serv_task.func)
1694 cancel_work_sync(&pf->serv_task);
1695
1696 clear_bit(ICE_SERVICE_SCHED, pf->state);
1697 return ret;
1698 }
1699
1700 /**
1701 * ice_service_task_restart - restart service task and schedule works
1702 * @pf: board private structure
1703 *
1704 * This function is needed for suspend and resume works (e.g WoL scenario)
1705 */
ice_service_task_restart(struct ice_pf * pf)1706 static void ice_service_task_restart(struct ice_pf *pf)
1707 {
1708 clear_bit(ICE_SERVICE_DIS, pf->state);
1709 ice_service_task_schedule(pf);
1710 }
1711
1712 /**
1713 * ice_service_timer - timer callback to schedule service task
1714 * @t: pointer to timer_list
1715 */
ice_service_timer(struct timer_list * t)1716 static void ice_service_timer(struct timer_list *t)
1717 {
1718 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1719
1720 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1721 ice_service_task_schedule(pf);
1722 }
1723
1724 /**
1725 * ice_handle_mdd_event - handle malicious driver detect event
1726 * @pf: pointer to the PF structure
1727 *
1728 * Called from service task. OICR interrupt handler indicates MDD event.
1729 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1730 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1731 * disable the queue, the PF can be configured to reset the VF using ethtool
1732 * private flag mdd-auto-reset-vf.
1733 */
ice_handle_mdd_event(struct ice_pf * pf)1734 static void ice_handle_mdd_event(struct ice_pf *pf)
1735 {
1736 struct device *dev = ice_pf_to_dev(pf);
1737 struct ice_hw *hw = &pf->hw;
1738 struct ice_vf *vf;
1739 unsigned int bkt;
1740 u32 reg;
1741
1742 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1743 /* Since the VF MDD event logging is rate limited, check if
1744 * there are pending MDD events.
1745 */
1746 ice_print_vfs_mdd_events(pf);
1747 return;
1748 }
1749
1750 /* find what triggered an MDD event */
1751 reg = rd32(hw, GL_MDET_TX_PQM);
1752 if (reg & GL_MDET_TX_PQM_VALID_M) {
1753 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1754 GL_MDET_TX_PQM_PF_NUM_S;
1755 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1756 GL_MDET_TX_PQM_VF_NUM_S;
1757 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1758 GL_MDET_TX_PQM_MAL_TYPE_S;
1759 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1760 GL_MDET_TX_PQM_QNUM_S);
1761
1762 if (netif_msg_tx_err(pf))
1763 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1764 event, queue, pf_num, vf_num);
1765 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1766 }
1767
1768 reg = rd32(hw, GL_MDET_TX_TCLAN);
1769 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1770 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1771 GL_MDET_TX_TCLAN_PF_NUM_S;
1772 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1773 GL_MDET_TX_TCLAN_VF_NUM_S;
1774 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1775 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1776 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1777 GL_MDET_TX_TCLAN_QNUM_S);
1778
1779 if (netif_msg_tx_err(pf))
1780 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1781 event, queue, pf_num, vf_num);
1782 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1783 }
1784
1785 reg = rd32(hw, GL_MDET_RX);
1786 if (reg & GL_MDET_RX_VALID_M) {
1787 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1788 GL_MDET_RX_PF_NUM_S;
1789 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1790 GL_MDET_RX_VF_NUM_S;
1791 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1792 GL_MDET_RX_MAL_TYPE_S;
1793 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1794 GL_MDET_RX_QNUM_S);
1795
1796 if (netif_msg_rx_err(pf))
1797 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1798 event, queue, pf_num, vf_num);
1799 wr32(hw, GL_MDET_RX, 0xffffffff);
1800 }
1801
1802 /* check to see if this PF caused an MDD event */
1803 reg = rd32(hw, PF_MDET_TX_PQM);
1804 if (reg & PF_MDET_TX_PQM_VALID_M) {
1805 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1806 if (netif_msg_tx_err(pf))
1807 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1808 }
1809
1810 reg = rd32(hw, PF_MDET_TX_TCLAN);
1811 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1812 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1813 if (netif_msg_tx_err(pf))
1814 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1815 }
1816
1817 reg = rd32(hw, PF_MDET_RX);
1818 if (reg & PF_MDET_RX_VALID_M) {
1819 wr32(hw, PF_MDET_RX, 0xFFFF);
1820 if (netif_msg_rx_err(pf))
1821 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1822 }
1823
1824 /* Check to see if one of the VFs caused an MDD event, and then
1825 * increment counters and set print pending
1826 */
1827 mutex_lock(&pf->vfs.table_lock);
1828 ice_for_each_vf(pf, bkt, vf) {
1829 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1830 if (reg & VP_MDET_TX_PQM_VALID_M) {
1831 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1832 vf->mdd_tx_events.count++;
1833 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1834 if (netif_msg_tx_err(pf))
1835 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1836 vf->vf_id);
1837 }
1838
1839 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1840 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1841 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1842 vf->mdd_tx_events.count++;
1843 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1844 if (netif_msg_tx_err(pf))
1845 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1846 vf->vf_id);
1847 }
1848
1849 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1850 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1851 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1852 vf->mdd_tx_events.count++;
1853 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1854 if (netif_msg_tx_err(pf))
1855 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1856 vf->vf_id);
1857 }
1858
1859 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1860 if (reg & VP_MDET_RX_VALID_M) {
1861 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1862 vf->mdd_rx_events.count++;
1863 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1864 if (netif_msg_rx_err(pf))
1865 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1866 vf->vf_id);
1867
1868 /* Since the queue is disabled on VF Rx MDD events, the
1869 * PF can be configured to reset the VF through ethtool
1870 * private flag mdd-auto-reset-vf.
1871 */
1872 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1873 /* VF MDD event counters will be cleared by
1874 * reset, so print the event prior to reset.
1875 */
1876 ice_print_vf_rx_mdd_event(vf);
1877 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1878 }
1879 }
1880 }
1881 mutex_unlock(&pf->vfs.table_lock);
1882
1883 ice_print_vfs_mdd_events(pf);
1884 }
1885
1886 /**
1887 * ice_force_phys_link_state - Force the physical link state
1888 * @vsi: VSI to force the physical link state to up/down
1889 * @link_up: true/false indicates to set the physical link to up/down
1890 *
1891 * Force the physical link state by getting the current PHY capabilities from
1892 * hardware and setting the PHY config based on the determined capabilities. If
1893 * link changes a link event will be triggered because both the Enable Automatic
1894 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1895 *
1896 * Returns 0 on success, negative on failure
1897 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1898 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1899 {
1900 struct ice_aqc_get_phy_caps_data *pcaps;
1901 struct ice_aqc_set_phy_cfg_data *cfg;
1902 struct ice_port_info *pi;
1903 struct device *dev;
1904 int retcode;
1905
1906 if (!vsi || !vsi->port_info || !vsi->back)
1907 return -EINVAL;
1908 if (vsi->type != ICE_VSI_PF)
1909 return 0;
1910
1911 dev = ice_pf_to_dev(vsi->back);
1912
1913 pi = vsi->port_info;
1914
1915 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1916 if (!pcaps)
1917 return -ENOMEM;
1918
1919 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1920 NULL);
1921 if (retcode) {
1922 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1923 vsi->vsi_num, retcode);
1924 retcode = -EIO;
1925 goto out;
1926 }
1927
1928 /* No change in link */
1929 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1930 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1931 goto out;
1932
1933 /* Use the current user PHY configuration. The current user PHY
1934 * configuration is initialized during probe from PHY capabilities
1935 * software mode, and updated on set PHY configuration.
1936 */
1937 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1938 if (!cfg) {
1939 retcode = -ENOMEM;
1940 goto out;
1941 }
1942
1943 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1944 if (link_up)
1945 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1946 else
1947 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1948
1949 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1950 if (retcode) {
1951 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1952 vsi->vsi_num, retcode);
1953 retcode = -EIO;
1954 }
1955
1956 kfree(cfg);
1957 out:
1958 kfree(pcaps);
1959 return retcode;
1960 }
1961
1962 /**
1963 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1964 * @pi: port info structure
1965 *
1966 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1967 */
ice_init_nvm_phy_type(struct ice_port_info * pi)1968 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1969 {
1970 struct ice_aqc_get_phy_caps_data *pcaps;
1971 struct ice_pf *pf = pi->hw->back;
1972 int err;
1973
1974 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1975 if (!pcaps)
1976 return -ENOMEM;
1977
1978 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1979 pcaps, NULL);
1980
1981 if (err) {
1982 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1983 goto out;
1984 }
1985
1986 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1987 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1988
1989 out:
1990 kfree(pcaps);
1991 return err;
1992 }
1993
1994 /**
1995 * ice_init_link_dflt_override - Initialize link default override
1996 * @pi: port info structure
1997 *
1998 * Initialize link default override and PHY total port shutdown during probe
1999 */
ice_init_link_dflt_override(struct ice_port_info * pi)2000 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2001 {
2002 struct ice_link_default_override_tlv *ldo;
2003 struct ice_pf *pf = pi->hw->back;
2004
2005 ldo = &pf->link_dflt_override;
2006 if (ice_get_link_default_override(ldo, pi))
2007 return;
2008
2009 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2010 return;
2011
2012 /* Enable Total Port Shutdown (override/replace link-down-on-close
2013 * ethtool private flag) for ports with Port Disable bit set.
2014 */
2015 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2016 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2017 }
2018
2019 /**
2020 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2021 * @pi: port info structure
2022 *
2023 * If default override is enabled, initialize the user PHY cfg speed and FEC
2024 * settings using the default override mask from the NVM.
2025 *
2026 * The PHY should only be configured with the default override settings the
2027 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2028 * is used to indicate that the user PHY cfg default override is initialized
2029 * and the PHY has not been configured with the default override settings. The
2030 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2031 * configured.
2032 *
2033 * This function should be called only if the FW doesn't support default
2034 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2035 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2036 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2037 {
2038 struct ice_link_default_override_tlv *ldo;
2039 struct ice_aqc_set_phy_cfg_data *cfg;
2040 struct ice_phy_info *phy = &pi->phy;
2041 struct ice_pf *pf = pi->hw->back;
2042
2043 ldo = &pf->link_dflt_override;
2044
2045 /* If link default override is enabled, use to mask NVM PHY capabilities
2046 * for speed and FEC default configuration.
2047 */
2048 cfg = &phy->curr_user_phy_cfg;
2049
2050 if (ldo->phy_type_low || ldo->phy_type_high) {
2051 cfg->phy_type_low = pf->nvm_phy_type_lo &
2052 cpu_to_le64(ldo->phy_type_low);
2053 cfg->phy_type_high = pf->nvm_phy_type_hi &
2054 cpu_to_le64(ldo->phy_type_high);
2055 }
2056 cfg->link_fec_opt = ldo->fec_options;
2057 phy->curr_user_fec_req = ICE_FEC_AUTO;
2058
2059 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2060 }
2061
2062 /**
2063 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2064 * @pi: port info structure
2065 *
2066 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2067 * mode to default. The PHY defaults are from get PHY capabilities topology
2068 * with media so call when media is first available. An error is returned if
2069 * called when media is not available. The PHY initialization completed state is
2070 * set here.
2071 *
2072 * These configurations are used when setting PHY
2073 * configuration. The user PHY configuration is updated on set PHY
2074 * configuration. Returns 0 on success, negative on failure
2075 */
ice_init_phy_user_cfg(struct ice_port_info * pi)2076 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2077 {
2078 struct ice_aqc_get_phy_caps_data *pcaps;
2079 struct ice_phy_info *phy = &pi->phy;
2080 struct ice_pf *pf = pi->hw->back;
2081 int err;
2082
2083 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2084 return -EIO;
2085
2086 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2087 if (!pcaps)
2088 return -ENOMEM;
2089
2090 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2091 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2092 pcaps, NULL);
2093 else
2094 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2095 pcaps, NULL);
2096 if (err) {
2097 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2098 goto err_out;
2099 }
2100
2101 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2102
2103 /* check if lenient mode is supported and enabled */
2104 if (ice_fw_supports_link_override(pi->hw) &&
2105 !(pcaps->module_compliance_enforcement &
2106 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2107 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2108
2109 /* if the FW supports default PHY configuration mode, then the driver
2110 * does not have to apply link override settings. If not,
2111 * initialize user PHY configuration with link override values
2112 */
2113 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2114 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2115 ice_init_phy_cfg_dflt_override(pi);
2116 goto out;
2117 }
2118 }
2119
2120 /* if link default override is not enabled, set user flow control and
2121 * FEC settings based on what get_phy_caps returned
2122 */
2123 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2124 pcaps->link_fec_options);
2125 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2126
2127 out:
2128 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2129 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2130 err_out:
2131 kfree(pcaps);
2132 return err;
2133 }
2134
2135 /**
2136 * ice_configure_phy - configure PHY
2137 * @vsi: VSI of PHY
2138 *
2139 * Set the PHY configuration. If the current PHY configuration is the same as
2140 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2141 * configure the based get PHY capabilities for topology with media.
2142 */
ice_configure_phy(struct ice_vsi * vsi)2143 static int ice_configure_phy(struct ice_vsi *vsi)
2144 {
2145 struct device *dev = ice_pf_to_dev(vsi->back);
2146 struct ice_port_info *pi = vsi->port_info;
2147 struct ice_aqc_get_phy_caps_data *pcaps;
2148 struct ice_aqc_set_phy_cfg_data *cfg;
2149 struct ice_phy_info *phy = &pi->phy;
2150 struct ice_pf *pf = vsi->back;
2151 int err;
2152
2153 /* Ensure we have media as we cannot configure a medialess port */
2154 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2155 return -ENOMEDIUM;
2156
2157 ice_print_topo_conflict(vsi);
2158
2159 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2160 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2161 return -EPERM;
2162
2163 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2164 return ice_force_phys_link_state(vsi, true);
2165
2166 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2167 if (!pcaps)
2168 return -ENOMEM;
2169
2170 /* Get current PHY config */
2171 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2172 NULL);
2173 if (err) {
2174 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2175 vsi->vsi_num, err);
2176 goto done;
2177 }
2178
2179 /* If PHY enable link is configured and configuration has not changed,
2180 * there's nothing to do
2181 */
2182 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2183 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2184 goto done;
2185
2186 /* Use PHY topology as baseline for configuration */
2187 memset(pcaps, 0, sizeof(*pcaps));
2188 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2189 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2190 pcaps, NULL);
2191 else
2192 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2193 pcaps, NULL);
2194 if (err) {
2195 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2196 vsi->vsi_num, err);
2197 goto done;
2198 }
2199
2200 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2201 if (!cfg) {
2202 err = -ENOMEM;
2203 goto done;
2204 }
2205
2206 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2207
2208 /* Speed - If default override pending, use curr_user_phy_cfg set in
2209 * ice_init_phy_user_cfg_ldo.
2210 */
2211 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2212 vsi->back->state)) {
2213 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2214 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2215 } else {
2216 u64 phy_low = 0, phy_high = 0;
2217
2218 ice_update_phy_type(&phy_low, &phy_high,
2219 pi->phy.curr_user_speed_req);
2220 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2221 cfg->phy_type_high = pcaps->phy_type_high &
2222 cpu_to_le64(phy_high);
2223 }
2224
2225 /* Can't provide what was requested; use PHY capabilities */
2226 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2227 cfg->phy_type_low = pcaps->phy_type_low;
2228 cfg->phy_type_high = pcaps->phy_type_high;
2229 }
2230
2231 /* FEC */
2232 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2233
2234 /* Can't provide what was requested; use PHY capabilities */
2235 if (cfg->link_fec_opt !=
2236 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2237 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2238 cfg->link_fec_opt = pcaps->link_fec_options;
2239 }
2240
2241 /* Flow Control - always supported; no need to check against
2242 * capabilities
2243 */
2244 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2245
2246 /* Enable link and link update */
2247 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2248
2249 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2250 if (err)
2251 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2252 vsi->vsi_num, err);
2253
2254 kfree(cfg);
2255 done:
2256 kfree(pcaps);
2257 return err;
2258 }
2259
2260 /**
2261 * ice_check_media_subtask - Check for media
2262 * @pf: pointer to PF struct
2263 *
2264 * If media is available, then initialize PHY user configuration if it is not
2265 * been, and configure the PHY if the interface is up.
2266 */
ice_check_media_subtask(struct ice_pf * pf)2267 static void ice_check_media_subtask(struct ice_pf *pf)
2268 {
2269 struct ice_port_info *pi;
2270 struct ice_vsi *vsi;
2271 int err;
2272
2273 /* No need to check for media if it's already present */
2274 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2275 return;
2276
2277 vsi = ice_get_main_vsi(pf);
2278 if (!vsi)
2279 return;
2280
2281 /* Refresh link info and check if media is present */
2282 pi = vsi->port_info;
2283 err = ice_update_link_info(pi);
2284 if (err)
2285 return;
2286
2287 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2288
2289 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2290 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2291 ice_init_phy_user_cfg(pi);
2292
2293 /* PHY settings are reset on media insertion, reconfigure
2294 * PHY to preserve settings.
2295 */
2296 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2297 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2298 return;
2299
2300 err = ice_configure_phy(vsi);
2301 if (!err)
2302 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2303
2304 /* A Link Status Event will be generated; the event handler
2305 * will complete bringing the interface up
2306 */
2307 }
2308 }
2309
2310 /**
2311 * ice_service_task - manage and run subtasks
2312 * @work: pointer to work_struct contained by the PF struct
2313 */
ice_service_task(struct work_struct * work)2314 static void ice_service_task(struct work_struct *work)
2315 {
2316 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2317 unsigned long start_time = jiffies;
2318
2319 /* subtasks */
2320
2321 /* process reset requests first */
2322 ice_reset_subtask(pf);
2323
2324 /* bail if a reset/recovery cycle is pending or rebuild failed */
2325 if (ice_is_reset_in_progress(pf->state) ||
2326 test_bit(ICE_SUSPENDED, pf->state) ||
2327 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2328 ice_service_task_complete(pf);
2329 return;
2330 }
2331
2332 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2333 struct iidc_event *event;
2334
2335 event = kzalloc(sizeof(*event), GFP_KERNEL);
2336 if (event) {
2337 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2338 /* report the entire OICR value to AUX driver */
2339 swap(event->reg, pf->oicr_err_reg);
2340 ice_send_event_to_aux(pf, event);
2341 kfree(event);
2342 }
2343 }
2344
2345 /* unplug aux dev per request, if an unplug request came in
2346 * while processing a plug request, this will handle it
2347 */
2348 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2349 ice_unplug_aux_dev(pf);
2350
2351 /* Plug aux device per request */
2352 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2353 ice_plug_aux_dev(pf);
2354
2355 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2356 struct iidc_event *event;
2357
2358 event = kzalloc(sizeof(*event), GFP_KERNEL);
2359 if (event) {
2360 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2361 ice_send_event_to_aux(pf, event);
2362 kfree(event);
2363 }
2364 }
2365
2366 ice_clean_adminq_subtask(pf);
2367 ice_check_media_subtask(pf);
2368 ice_check_for_hang_subtask(pf);
2369 ice_sync_fltr_subtask(pf);
2370 ice_handle_mdd_event(pf);
2371 ice_watchdog_subtask(pf);
2372
2373 if (ice_is_safe_mode(pf)) {
2374 ice_service_task_complete(pf);
2375 return;
2376 }
2377
2378 ice_process_vflr_event(pf);
2379 ice_clean_mailboxq_subtask(pf);
2380 ice_clean_sbq_subtask(pf);
2381 ice_sync_arfs_fltrs(pf);
2382 ice_flush_fdir_ctx(pf);
2383
2384 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2385 ice_service_task_complete(pf);
2386
2387 /* If the tasks have taken longer than one service timer period
2388 * or there is more work to be done, reset the service timer to
2389 * schedule the service task now.
2390 */
2391 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2392 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2393 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2394 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2395 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2396 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2397 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2398 mod_timer(&pf->serv_tmr, jiffies);
2399 }
2400
2401 /**
2402 * ice_set_ctrlq_len - helper function to set controlq length
2403 * @hw: pointer to the HW instance
2404 */
ice_set_ctrlq_len(struct ice_hw * hw)2405 static void ice_set_ctrlq_len(struct ice_hw *hw)
2406 {
2407 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2408 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2409 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2410 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2411 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2412 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2413 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2414 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2415 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2416 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2417 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2418 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2419 }
2420
2421 /**
2422 * ice_schedule_reset - schedule a reset
2423 * @pf: board private structure
2424 * @reset: reset being requested
2425 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2426 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2427 {
2428 struct device *dev = ice_pf_to_dev(pf);
2429
2430 /* bail out if earlier reset has failed */
2431 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2432 dev_dbg(dev, "earlier reset has failed\n");
2433 return -EIO;
2434 }
2435 /* bail if reset/recovery already in progress */
2436 if (ice_is_reset_in_progress(pf->state)) {
2437 dev_dbg(dev, "Reset already in progress\n");
2438 return -EBUSY;
2439 }
2440
2441 switch (reset) {
2442 case ICE_RESET_PFR:
2443 set_bit(ICE_PFR_REQ, pf->state);
2444 break;
2445 case ICE_RESET_CORER:
2446 set_bit(ICE_CORER_REQ, pf->state);
2447 break;
2448 case ICE_RESET_GLOBR:
2449 set_bit(ICE_GLOBR_REQ, pf->state);
2450 break;
2451 default:
2452 return -EINVAL;
2453 }
2454
2455 ice_service_task_schedule(pf);
2456 return 0;
2457 }
2458
2459 /**
2460 * ice_irq_affinity_notify - Callback for affinity changes
2461 * @notify: context as to what irq was changed
2462 * @mask: the new affinity mask
2463 *
2464 * This is a callback function used by the irq_set_affinity_notifier function
2465 * so that we may register to receive changes to the irq affinity masks.
2466 */
2467 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2468 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2469 const cpumask_t *mask)
2470 {
2471 struct ice_q_vector *q_vector =
2472 container_of(notify, struct ice_q_vector, affinity_notify);
2473
2474 cpumask_copy(&q_vector->affinity_mask, mask);
2475 }
2476
2477 /**
2478 * ice_irq_affinity_release - Callback for affinity notifier release
2479 * @ref: internal core kernel usage
2480 *
2481 * This is a callback function used by the irq_set_affinity_notifier function
2482 * to inform the current notification subscriber that they will no longer
2483 * receive notifications.
2484 */
ice_irq_affinity_release(struct kref __always_unused * ref)2485 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2486
2487 /**
2488 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2489 * @vsi: the VSI being configured
2490 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2491 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2492 {
2493 struct ice_hw *hw = &vsi->back->hw;
2494 int i;
2495
2496 ice_for_each_q_vector(vsi, i)
2497 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2498
2499 ice_flush(hw);
2500 return 0;
2501 }
2502
2503 /**
2504 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2505 * @vsi: the VSI being configured
2506 * @basename: name for the vector
2507 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2508 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2509 {
2510 int q_vectors = vsi->num_q_vectors;
2511 struct ice_pf *pf = vsi->back;
2512 struct device *dev;
2513 int rx_int_idx = 0;
2514 int tx_int_idx = 0;
2515 int vector, err;
2516 int irq_num;
2517
2518 dev = ice_pf_to_dev(pf);
2519 for (vector = 0; vector < q_vectors; vector++) {
2520 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2521
2522 irq_num = q_vector->irq.virq;
2523
2524 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2525 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2526 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2527 tx_int_idx++;
2528 } else if (q_vector->rx.rx_ring) {
2529 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2530 "%s-%s-%d", basename, "rx", rx_int_idx++);
2531 } else if (q_vector->tx.tx_ring) {
2532 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2533 "%s-%s-%d", basename, "tx", tx_int_idx++);
2534 } else {
2535 /* skip this unused q_vector */
2536 continue;
2537 }
2538 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2539 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2540 IRQF_SHARED, q_vector->name,
2541 q_vector);
2542 else
2543 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2544 0, q_vector->name, q_vector);
2545 if (err) {
2546 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2547 err);
2548 goto free_q_irqs;
2549 }
2550
2551 /* register for affinity change notifications */
2552 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2553 struct irq_affinity_notify *affinity_notify;
2554
2555 affinity_notify = &q_vector->affinity_notify;
2556 affinity_notify->notify = ice_irq_affinity_notify;
2557 affinity_notify->release = ice_irq_affinity_release;
2558 irq_set_affinity_notifier(irq_num, affinity_notify);
2559 }
2560
2561 /* assign the mask for this irq */
2562 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2563 }
2564
2565 err = ice_set_cpu_rx_rmap(vsi);
2566 if (err) {
2567 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2568 vsi->vsi_num, ERR_PTR(err));
2569 goto free_q_irqs;
2570 }
2571
2572 vsi->irqs_ready = true;
2573 return 0;
2574
2575 free_q_irqs:
2576 while (vector--) {
2577 irq_num = vsi->q_vectors[vector]->irq.virq;
2578 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2579 irq_set_affinity_notifier(irq_num, NULL);
2580 irq_set_affinity_hint(irq_num, NULL);
2581 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2582 }
2583 return err;
2584 }
2585
2586 /**
2587 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2588 * @vsi: VSI to setup Tx rings used by XDP
2589 *
2590 * Return 0 on success and negative value on error
2591 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2592 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2593 {
2594 struct device *dev = ice_pf_to_dev(vsi->back);
2595 struct ice_tx_desc *tx_desc;
2596 int i, j;
2597
2598 ice_for_each_xdp_txq(vsi, i) {
2599 u16 xdp_q_idx = vsi->alloc_txq + i;
2600 struct ice_ring_stats *ring_stats;
2601 struct ice_tx_ring *xdp_ring;
2602
2603 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2604 if (!xdp_ring)
2605 goto free_xdp_rings;
2606
2607 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2608 if (!ring_stats) {
2609 ice_free_tx_ring(xdp_ring);
2610 goto free_xdp_rings;
2611 }
2612
2613 xdp_ring->ring_stats = ring_stats;
2614 xdp_ring->q_index = xdp_q_idx;
2615 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2616 xdp_ring->vsi = vsi;
2617 xdp_ring->netdev = NULL;
2618 xdp_ring->dev = dev;
2619 xdp_ring->count = vsi->num_tx_desc;
2620 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2621 if (ice_setup_tx_ring(xdp_ring))
2622 goto free_xdp_rings;
2623 ice_set_ring_xdp(xdp_ring);
2624 spin_lock_init(&xdp_ring->tx_lock);
2625 for (j = 0; j < xdp_ring->count; j++) {
2626 tx_desc = ICE_TX_DESC(xdp_ring, j);
2627 tx_desc->cmd_type_offset_bsz = 0;
2628 }
2629 }
2630
2631 return 0;
2632
2633 free_xdp_rings:
2634 for (; i >= 0; i--) {
2635 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2636 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2637 vsi->xdp_rings[i]->ring_stats = NULL;
2638 ice_free_tx_ring(vsi->xdp_rings[i]);
2639 }
2640 }
2641 return -ENOMEM;
2642 }
2643
2644 /**
2645 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2646 * @vsi: VSI to set the bpf prog on
2647 * @prog: the bpf prog pointer
2648 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2649 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2650 {
2651 struct bpf_prog *old_prog;
2652 int i;
2653
2654 old_prog = xchg(&vsi->xdp_prog, prog);
2655 ice_for_each_rxq(vsi, i)
2656 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2657
2658 if (old_prog)
2659 bpf_prog_put(old_prog);
2660 }
2661
2662 /**
2663 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2664 * @vsi: VSI to bring up Tx rings used by XDP
2665 * @prog: bpf program that will be assigned to VSI
2666 * @cfg_type: create from scratch or restore the existing configuration
2667 *
2668 * Return 0 on success and negative value on error
2669 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2670 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2671 enum ice_xdp_cfg cfg_type)
2672 {
2673 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2674 int xdp_rings_rem = vsi->num_xdp_txq;
2675 struct ice_pf *pf = vsi->back;
2676 struct ice_qs_cfg xdp_qs_cfg = {
2677 .qs_mutex = &pf->avail_q_mutex,
2678 .pf_map = pf->avail_txqs,
2679 .pf_map_size = pf->max_pf_txqs,
2680 .q_count = vsi->num_xdp_txq,
2681 .scatter_count = ICE_MAX_SCATTER_TXQS,
2682 .vsi_map = vsi->txq_map,
2683 .vsi_map_offset = vsi->alloc_txq,
2684 .mapping_mode = ICE_VSI_MAP_CONTIG
2685 };
2686 struct device *dev;
2687 int i, v_idx;
2688 int status;
2689
2690 dev = ice_pf_to_dev(pf);
2691 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2692 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2693 if (!vsi->xdp_rings)
2694 return -ENOMEM;
2695
2696 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2697 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2698 goto err_map_xdp;
2699
2700 if (static_key_enabled(&ice_xdp_locking_key))
2701 netdev_warn(vsi->netdev,
2702 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2703
2704 if (ice_xdp_alloc_setup_rings(vsi))
2705 goto clear_xdp_rings;
2706
2707 /* follow the logic from ice_vsi_map_rings_to_vectors */
2708 ice_for_each_q_vector(vsi, v_idx) {
2709 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2710 int xdp_rings_per_v, q_id, q_base;
2711
2712 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2713 vsi->num_q_vectors - v_idx);
2714 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2715
2716 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2717 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2718
2719 xdp_ring->q_vector = q_vector;
2720 xdp_ring->next = q_vector->tx.tx_ring;
2721 q_vector->tx.tx_ring = xdp_ring;
2722 }
2723 xdp_rings_rem -= xdp_rings_per_v;
2724 }
2725
2726 ice_for_each_rxq(vsi, i) {
2727 if (static_key_enabled(&ice_xdp_locking_key)) {
2728 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2729 } else {
2730 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2731 struct ice_tx_ring *ring;
2732
2733 ice_for_each_tx_ring(ring, q_vector->tx) {
2734 if (ice_ring_is_xdp(ring)) {
2735 vsi->rx_rings[i]->xdp_ring = ring;
2736 break;
2737 }
2738 }
2739 }
2740 ice_tx_xsk_pool(vsi, i);
2741 }
2742
2743 /* omit the scheduler update if in reset path; XDP queues will be
2744 * taken into account at the end of ice_vsi_rebuild, where
2745 * ice_cfg_vsi_lan is being called
2746 */
2747 if (cfg_type == ICE_XDP_CFG_PART)
2748 return 0;
2749
2750 /* tell the Tx scheduler that right now we have
2751 * additional queues
2752 */
2753 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2754 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2755
2756 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2757 max_txqs);
2758 if (status) {
2759 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2760 status);
2761 goto clear_xdp_rings;
2762 }
2763
2764 /* assign the prog only when it's not already present on VSI;
2765 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2766 * VSI rebuild that happens under ethtool -L can expose us to
2767 * the bpf_prog refcount issues as we would be swapping same
2768 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2769 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2770 * this is not harmful as dev_xdp_install bumps the refcount
2771 * before calling the op exposed by the driver;
2772 */
2773 if (!ice_is_xdp_ena_vsi(vsi))
2774 ice_vsi_assign_bpf_prog(vsi, prog);
2775
2776 return 0;
2777 clear_xdp_rings:
2778 ice_for_each_xdp_txq(vsi, i)
2779 if (vsi->xdp_rings[i]) {
2780 kfree_rcu(vsi->xdp_rings[i], rcu);
2781 vsi->xdp_rings[i] = NULL;
2782 }
2783
2784 err_map_xdp:
2785 mutex_lock(&pf->avail_q_mutex);
2786 ice_for_each_xdp_txq(vsi, i) {
2787 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2788 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2789 }
2790 mutex_unlock(&pf->avail_q_mutex);
2791
2792 devm_kfree(dev, vsi->xdp_rings);
2793 return -ENOMEM;
2794 }
2795
2796 /**
2797 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2798 * @vsi: VSI to remove XDP rings
2799 * @cfg_type: disable XDP permanently or allow it to be restored later
2800 *
2801 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2802 * resources
2803 */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2804 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2805 {
2806 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2807 struct ice_pf *pf = vsi->back;
2808 int i, v_idx;
2809
2810 /* q_vectors are freed in reset path so there's no point in detaching
2811 * rings
2812 */
2813 if (cfg_type == ICE_XDP_CFG_PART)
2814 goto free_qmap;
2815
2816 ice_for_each_q_vector(vsi, v_idx) {
2817 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2818 struct ice_tx_ring *ring;
2819
2820 ice_for_each_tx_ring(ring, q_vector->tx)
2821 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2822 break;
2823
2824 /* restore the value of last node prior to XDP setup */
2825 q_vector->tx.tx_ring = ring;
2826 }
2827
2828 free_qmap:
2829 mutex_lock(&pf->avail_q_mutex);
2830 ice_for_each_xdp_txq(vsi, i) {
2831 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2832 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2833 }
2834 mutex_unlock(&pf->avail_q_mutex);
2835
2836 ice_for_each_xdp_txq(vsi, i)
2837 if (vsi->xdp_rings[i]) {
2838 if (vsi->xdp_rings[i]->desc) {
2839 synchronize_rcu();
2840 ice_free_tx_ring(vsi->xdp_rings[i]);
2841 }
2842 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2843 vsi->xdp_rings[i]->ring_stats = NULL;
2844 kfree_rcu(vsi->xdp_rings[i], rcu);
2845 vsi->xdp_rings[i] = NULL;
2846 }
2847
2848 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2849 vsi->xdp_rings = NULL;
2850
2851 if (static_key_enabled(&ice_xdp_locking_key))
2852 static_branch_dec(&ice_xdp_locking_key);
2853
2854 if (cfg_type == ICE_XDP_CFG_PART)
2855 return 0;
2856
2857 ice_vsi_assign_bpf_prog(vsi, NULL);
2858
2859 /* notify Tx scheduler that we destroyed XDP queues and bring
2860 * back the old number of child nodes
2861 */
2862 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2863 max_txqs[i] = vsi->num_txq;
2864
2865 /* change number of XDP Tx queues to 0 */
2866 vsi->num_xdp_txq = 0;
2867
2868 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2869 max_txqs);
2870 }
2871
2872 /**
2873 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2874 * @vsi: VSI to schedule napi on
2875 */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2876 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2877 {
2878 int i;
2879
2880 ice_for_each_rxq(vsi, i) {
2881 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2882
2883 if (rx_ring->xsk_pool)
2884 napi_schedule(&rx_ring->q_vector->napi);
2885 }
2886 }
2887
2888 /**
2889 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2890 * @vsi: VSI to determine the count of XDP Tx qs
2891 *
2892 * returns 0 if Tx qs count is higher than at least half of CPU count,
2893 * -ENOMEM otherwise
2894 */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2895 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2896 {
2897 u16 avail = ice_get_avail_txq_count(vsi->back);
2898 u16 cpus = num_possible_cpus();
2899
2900 if (avail < cpus / 2)
2901 return -ENOMEM;
2902
2903 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2904
2905 if (vsi->num_xdp_txq < cpus)
2906 static_branch_inc(&ice_xdp_locking_key);
2907
2908 return 0;
2909 }
2910
2911 /**
2912 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2913 * @vsi: Pointer to VSI structure
2914 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)2915 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2916 {
2917 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2918 return ICE_RXBUF_1664;
2919 else
2920 return ICE_RXBUF_3072;
2921 }
2922
2923 /**
2924 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2925 * @vsi: VSI to setup XDP for
2926 * @prog: XDP program
2927 * @extack: netlink extended ack
2928 */
2929 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2930 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2931 struct netlink_ext_ack *extack)
2932 {
2933 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2934 int ret = 0, xdp_ring_err = 0;
2935 bool if_running;
2936
2937 if (prog && !prog->aux->xdp_has_frags) {
2938 if (frame_size > ice_max_xdp_frame_size(vsi)) {
2939 NL_SET_ERR_MSG_MOD(extack,
2940 "MTU is too large for linear frames and XDP prog does not support frags");
2941 return -EOPNOTSUPP;
2942 }
2943 }
2944
2945 /* hot swap progs and avoid toggling link */
2946 if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
2947 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
2948 ice_vsi_assign_bpf_prog(vsi, prog);
2949 return 0;
2950 }
2951
2952 if_running = netif_running(vsi->netdev) &&
2953 !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
2954
2955 /* need to stop netdev while setting up the program for Rx rings */
2956 if (if_running) {
2957 ret = ice_down(vsi);
2958 if (ret) {
2959 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2960 return ret;
2961 }
2962 }
2963
2964 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2965 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2966 if (xdp_ring_err) {
2967 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2968 } else {
2969 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
2970 ICE_XDP_CFG_FULL);
2971 if (xdp_ring_err)
2972 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2973 }
2974 xdp_features_set_redirect_target(vsi->netdev, true);
2975 /* reallocate Rx queues that are used for zero-copy */
2976 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2977 if (xdp_ring_err)
2978 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2979 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2980 xdp_features_clear_redirect_target(vsi->netdev);
2981 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
2982 if (xdp_ring_err)
2983 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2984 /* reallocate Rx queues that were used for zero-copy */
2985 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2986 if (xdp_ring_err)
2987 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2988 }
2989
2990 if (if_running)
2991 ret = ice_up(vsi);
2992
2993 if (!ret && prog)
2994 ice_vsi_rx_napi_schedule(vsi);
2995
2996 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2997 }
2998
2999 /**
3000 * ice_xdp_safe_mode - XDP handler for safe mode
3001 * @dev: netdevice
3002 * @xdp: XDP command
3003 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)3004 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3005 struct netdev_bpf *xdp)
3006 {
3007 NL_SET_ERR_MSG_MOD(xdp->extack,
3008 "Please provide working DDP firmware package in order to use XDP\n"
3009 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3010 return -EOPNOTSUPP;
3011 }
3012
3013 /**
3014 * ice_xdp - implements XDP handler
3015 * @dev: netdevice
3016 * @xdp: XDP command
3017 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3018 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3019 {
3020 struct ice_netdev_priv *np = netdev_priv(dev);
3021 struct ice_vsi *vsi = np->vsi;
3022 int ret;
3023
3024 if (vsi->type != ICE_VSI_PF) {
3025 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3026 return -EINVAL;
3027 }
3028
3029 mutex_lock(&vsi->xdp_state_lock);
3030
3031 switch (xdp->command) {
3032 case XDP_SETUP_PROG:
3033 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3034 break;
3035 case XDP_SETUP_XSK_POOL:
3036 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3037 break;
3038 default:
3039 ret = -EINVAL;
3040 }
3041
3042 mutex_unlock(&vsi->xdp_state_lock);
3043 return ret;
3044 }
3045
3046 /**
3047 * ice_ena_misc_vector - enable the non-queue interrupts
3048 * @pf: board private structure
3049 */
ice_ena_misc_vector(struct ice_pf * pf)3050 static void ice_ena_misc_vector(struct ice_pf *pf)
3051 {
3052 struct ice_hw *hw = &pf->hw;
3053 u32 val;
3054
3055 /* Disable anti-spoof detection interrupt to prevent spurious event
3056 * interrupts during a function reset. Anti-spoof functionally is
3057 * still supported.
3058 */
3059 val = rd32(hw, GL_MDCK_TX_TDPU);
3060 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3061 wr32(hw, GL_MDCK_TX_TDPU, val);
3062
3063 /* clear things first */
3064 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3065 rd32(hw, PFINT_OICR); /* read to clear */
3066
3067 val = (PFINT_OICR_ECC_ERR_M |
3068 PFINT_OICR_MAL_DETECT_M |
3069 PFINT_OICR_GRST_M |
3070 PFINT_OICR_PCI_EXCEPTION_M |
3071 PFINT_OICR_VFLR_M |
3072 PFINT_OICR_HMC_ERR_M |
3073 PFINT_OICR_PE_PUSH_M |
3074 PFINT_OICR_PE_CRITERR_M);
3075
3076 wr32(hw, PFINT_OICR_ENA, val);
3077
3078 /* SW_ITR_IDX = 0, but don't change INTENA */
3079 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3080 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3081 }
3082
3083 /**
3084 * ice_misc_intr - misc interrupt handler
3085 * @irq: interrupt number
3086 * @data: pointer to a q_vector
3087 */
ice_misc_intr(int __always_unused irq,void * data)3088 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3089 {
3090 struct ice_pf *pf = (struct ice_pf *)data;
3091 struct ice_hw *hw = &pf->hw;
3092 struct device *dev;
3093 u32 oicr, ena_mask;
3094
3095 dev = ice_pf_to_dev(pf);
3096 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3097 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3098 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3099
3100 oicr = rd32(hw, PFINT_OICR);
3101 ena_mask = rd32(hw, PFINT_OICR_ENA);
3102
3103 if (oicr & PFINT_OICR_SWINT_M) {
3104 ena_mask &= ~PFINT_OICR_SWINT_M;
3105 pf->sw_int_count++;
3106 }
3107
3108 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3109 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3110 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3111 }
3112 if (oicr & PFINT_OICR_VFLR_M) {
3113 /* disable any further VFLR event notifications */
3114 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3115 u32 reg = rd32(hw, PFINT_OICR_ENA);
3116
3117 reg &= ~PFINT_OICR_VFLR_M;
3118 wr32(hw, PFINT_OICR_ENA, reg);
3119 } else {
3120 ena_mask &= ~PFINT_OICR_VFLR_M;
3121 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3122 }
3123 }
3124
3125 if (oicr & PFINT_OICR_GRST_M) {
3126 u32 reset;
3127
3128 /* we have a reset warning */
3129 ena_mask &= ~PFINT_OICR_GRST_M;
3130 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3131 GLGEN_RSTAT_RESET_TYPE_S;
3132
3133 if (reset == ICE_RESET_CORER)
3134 pf->corer_count++;
3135 else if (reset == ICE_RESET_GLOBR)
3136 pf->globr_count++;
3137 else if (reset == ICE_RESET_EMPR)
3138 pf->empr_count++;
3139 else
3140 dev_dbg(dev, "Invalid reset type %d\n", reset);
3141
3142 /* If a reset cycle isn't already in progress, we set a bit in
3143 * pf->state so that the service task can start a reset/rebuild.
3144 */
3145 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3146 if (reset == ICE_RESET_CORER)
3147 set_bit(ICE_CORER_RECV, pf->state);
3148 else if (reset == ICE_RESET_GLOBR)
3149 set_bit(ICE_GLOBR_RECV, pf->state);
3150 else
3151 set_bit(ICE_EMPR_RECV, pf->state);
3152
3153 /* There are couple of different bits at play here.
3154 * hw->reset_ongoing indicates whether the hardware is
3155 * in reset. This is set to true when a reset interrupt
3156 * is received and set back to false after the driver
3157 * has determined that the hardware is out of reset.
3158 *
3159 * ICE_RESET_OICR_RECV in pf->state indicates
3160 * that a post reset rebuild is required before the
3161 * driver is operational again. This is set above.
3162 *
3163 * As this is the start of the reset/rebuild cycle, set
3164 * both to indicate that.
3165 */
3166 hw->reset_ongoing = true;
3167 }
3168 }
3169
3170 if (oicr & PFINT_OICR_TSYN_TX_M) {
3171 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3172 if (!hw->reset_ongoing)
3173 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3174 }
3175
3176 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3177 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3178 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3179
3180 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3181
3182 if (hw->func_caps.ts_func_info.src_tmr_owned) {
3183 /* Save EVENTs from GLTSYN register */
3184 pf->ptp.ext_ts_irq |= gltsyn_stat &
3185 (GLTSYN_STAT_EVENT0_M |
3186 GLTSYN_STAT_EVENT1_M |
3187 GLTSYN_STAT_EVENT2_M);
3188
3189 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3190 }
3191 }
3192
3193 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3194 if (oicr & ICE_AUX_CRIT_ERR) {
3195 pf->oicr_err_reg |= oicr;
3196 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3197 ena_mask &= ~ICE_AUX_CRIT_ERR;
3198 }
3199
3200 /* Report any remaining unexpected interrupts */
3201 oicr &= ena_mask;
3202 if (oicr) {
3203 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3204 /* If a critical error is pending there is no choice but to
3205 * reset the device.
3206 */
3207 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3208 PFINT_OICR_ECC_ERR_M)) {
3209 set_bit(ICE_PFR_REQ, pf->state);
3210 }
3211 }
3212
3213 return IRQ_WAKE_THREAD;
3214 }
3215
3216 /**
3217 * ice_misc_intr_thread_fn - misc interrupt thread function
3218 * @irq: interrupt number
3219 * @data: pointer to a q_vector
3220 */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3221 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3222 {
3223 struct ice_pf *pf = data;
3224 struct ice_hw *hw;
3225
3226 hw = &pf->hw;
3227
3228 if (ice_is_reset_in_progress(pf->state))
3229 return IRQ_HANDLED;
3230
3231 ice_service_task_schedule(pf);
3232
3233 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3234 ice_ptp_extts_event(pf);
3235
3236 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3237 /* Process outstanding Tx timestamps. If there is more work,
3238 * re-arm the interrupt to trigger again.
3239 */
3240 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3241 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3242 ice_flush(hw);
3243 }
3244 }
3245
3246 ice_irq_dynamic_ena(hw, NULL, NULL);
3247
3248 return IRQ_HANDLED;
3249 }
3250
3251 /**
3252 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3253 * @hw: pointer to HW structure
3254 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3255 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3256 {
3257 /* disable Admin queue Interrupt causes */
3258 wr32(hw, PFINT_FW_CTL,
3259 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3260
3261 /* disable Mailbox queue Interrupt causes */
3262 wr32(hw, PFINT_MBX_CTL,
3263 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3264
3265 wr32(hw, PFINT_SB_CTL,
3266 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3267
3268 /* disable Control queue Interrupt causes */
3269 wr32(hw, PFINT_OICR_CTL,
3270 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3271
3272 ice_flush(hw);
3273 }
3274
3275 /**
3276 * ice_free_irq_msix_misc - Unroll misc vector setup
3277 * @pf: board private structure
3278 */
ice_free_irq_msix_misc(struct ice_pf * pf)3279 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3280 {
3281 int misc_irq_num = pf->oicr_irq.virq;
3282 struct ice_hw *hw = &pf->hw;
3283
3284 ice_dis_ctrlq_interrupts(hw);
3285
3286 /* disable OICR interrupt */
3287 wr32(hw, PFINT_OICR_ENA, 0);
3288 ice_flush(hw);
3289
3290 synchronize_irq(misc_irq_num);
3291 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3292
3293 ice_free_irq(pf, pf->oicr_irq);
3294 }
3295
3296 /**
3297 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3298 * @hw: pointer to HW structure
3299 * @reg_idx: HW vector index to associate the control queue interrupts with
3300 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3301 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3302 {
3303 u32 val;
3304
3305 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3306 PFINT_OICR_CTL_CAUSE_ENA_M);
3307 wr32(hw, PFINT_OICR_CTL, val);
3308
3309 /* enable Admin queue Interrupt causes */
3310 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3311 PFINT_FW_CTL_CAUSE_ENA_M);
3312 wr32(hw, PFINT_FW_CTL, val);
3313
3314 /* enable Mailbox queue Interrupt causes */
3315 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3316 PFINT_MBX_CTL_CAUSE_ENA_M);
3317 wr32(hw, PFINT_MBX_CTL, val);
3318
3319 /* This enables Sideband queue Interrupt causes */
3320 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3321 PFINT_SB_CTL_CAUSE_ENA_M);
3322 wr32(hw, PFINT_SB_CTL, val);
3323
3324 ice_flush(hw);
3325 }
3326
3327 /**
3328 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3329 * @pf: board private structure
3330 *
3331 * This sets up the handler for MSIX 0, which is used to manage the
3332 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3333 * when in MSI or Legacy interrupt mode.
3334 */
ice_req_irq_msix_misc(struct ice_pf * pf)3335 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3336 {
3337 struct device *dev = ice_pf_to_dev(pf);
3338 struct ice_hw *hw = &pf->hw;
3339 struct msi_map oicr_irq;
3340 int err = 0;
3341
3342 if (!pf->int_name[0])
3343 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3344 dev_driver_string(dev), dev_name(dev));
3345
3346 /* Do not request IRQ but do enable OICR interrupt since settings are
3347 * lost during reset. Note that this function is called only during
3348 * rebuild path and not while reset is in progress.
3349 */
3350 if (ice_is_reset_in_progress(pf->state))
3351 goto skip_req_irq;
3352
3353 /* reserve one vector in irq_tracker for misc interrupts */
3354 oicr_irq = ice_alloc_irq(pf, false);
3355 if (oicr_irq.index < 0)
3356 return oicr_irq.index;
3357
3358 pf->oicr_irq = oicr_irq;
3359 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3360 ice_misc_intr_thread_fn, 0,
3361 pf->int_name, pf);
3362 if (err) {
3363 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3364 pf->int_name, err);
3365 ice_free_irq(pf, pf->oicr_irq);
3366 return err;
3367 }
3368
3369 skip_req_irq:
3370 ice_ena_misc_vector(pf);
3371
3372 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3373 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3374 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3375
3376 ice_flush(hw);
3377 ice_irq_dynamic_ena(hw, NULL, NULL);
3378
3379 return 0;
3380 }
3381
3382 /**
3383 * ice_napi_add - register NAPI handler for the VSI
3384 * @vsi: VSI for which NAPI handler is to be registered
3385 *
3386 * This function is only called in the driver's load path. Registering the NAPI
3387 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3388 * reset/rebuild, etc.)
3389 */
ice_napi_add(struct ice_vsi * vsi)3390 static void ice_napi_add(struct ice_vsi *vsi)
3391 {
3392 int v_idx;
3393
3394 if (!vsi->netdev)
3395 return;
3396
3397 ice_for_each_q_vector(vsi, v_idx)
3398 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3399 ice_napi_poll);
3400 }
3401
3402 /**
3403 * ice_set_ops - set netdev and ethtools ops for the given netdev
3404 * @vsi: the VSI associated with the new netdev
3405 */
ice_set_ops(struct ice_vsi * vsi)3406 static void ice_set_ops(struct ice_vsi *vsi)
3407 {
3408 struct net_device *netdev = vsi->netdev;
3409 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3410
3411 if (ice_is_safe_mode(pf)) {
3412 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3413 ice_set_ethtool_safe_mode_ops(netdev);
3414 return;
3415 }
3416
3417 netdev->netdev_ops = &ice_netdev_ops;
3418 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3419 ice_set_ethtool_ops(netdev);
3420
3421 if (vsi->type != ICE_VSI_PF)
3422 return;
3423
3424 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3425 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3426 NETDEV_XDP_ACT_RX_SG;
3427 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3428 }
3429
3430 /**
3431 * ice_set_netdev_features - set features for the given netdev
3432 * @netdev: netdev instance
3433 */
ice_set_netdev_features(struct net_device * netdev)3434 static void ice_set_netdev_features(struct net_device *netdev)
3435 {
3436 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3437 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3438 netdev_features_t csumo_features;
3439 netdev_features_t vlano_features;
3440 netdev_features_t dflt_features;
3441 netdev_features_t tso_features;
3442
3443 if (ice_is_safe_mode(pf)) {
3444 /* safe mode */
3445 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3446 netdev->hw_features = netdev->features;
3447 return;
3448 }
3449
3450 dflt_features = NETIF_F_SG |
3451 NETIF_F_HIGHDMA |
3452 NETIF_F_NTUPLE |
3453 NETIF_F_RXHASH;
3454
3455 csumo_features = NETIF_F_RXCSUM |
3456 NETIF_F_IP_CSUM |
3457 NETIF_F_SCTP_CRC |
3458 NETIF_F_IPV6_CSUM;
3459
3460 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3461 NETIF_F_HW_VLAN_CTAG_TX |
3462 NETIF_F_HW_VLAN_CTAG_RX;
3463
3464 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3465 if (is_dvm_ena)
3466 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3467
3468 tso_features = NETIF_F_TSO |
3469 NETIF_F_TSO_ECN |
3470 NETIF_F_TSO6 |
3471 NETIF_F_GSO_GRE |
3472 NETIF_F_GSO_UDP_TUNNEL |
3473 NETIF_F_GSO_GRE_CSUM |
3474 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3475 NETIF_F_GSO_PARTIAL |
3476 NETIF_F_GSO_IPXIP4 |
3477 NETIF_F_GSO_IPXIP6 |
3478 NETIF_F_GSO_UDP_L4;
3479
3480 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3481 NETIF_F_GSO_GRE_CSUM;
3482 /* set features that user can change */
3483 netdev->hw_features = dflt_features | csumo_features |
3484 vlano_features | tso_features;
3485
3486 /* add support for HW_CSUM on packets with MPLS header */
3487 netdev->mpls_features = NETIF_F_HW_CSUM |
3488 NETIF_F_TSO |
3489 NETIF_F_TSO6;
3490
3491 /* enable features */
3492 netdev->features |= netdev->hw_features;
3493
3494 netdev->hw_features |= NETIF_F_HW_TC;
3495 netdev->hw_features |= NETIF_F_LOOPBACK;
3496
3497 /* encap and VLAN devices inherit default, csumo and tso features */
3498 netdev->hw_enc_features |= dflt_features | csumo_features |
3499 tso_features;
3500 netdev->vlan_features |= dflt_features | csumo_features |
3501 tso_features;
3502
3503 /* advertise support but don't enable by default since only one type of
3504 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3505 * type turns on the other has to be turned off. This is enforced by the
3506 * ice_fix_features() ndo callback.
3507 */
3508 if (is_dvm_ena)
3509 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3510 NETIF_F_HW_VLAN_STAG_TX;
3511
3512 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3513 * be changed at runtime
3514 */
3515 netdev->hw_features |= NETIF_F_RXFCS;
3516
3517 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3518 }
3519
3520 /**
3521 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3522 * @lut: Lookup table
3523 * @rss_table_size: Lookup table size
3524 * @rss_size: Range of queue number for hashing
3525 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3526 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3527 {
3528 u16 i;
3529
3530 for (i = 0; i < rss_table_size; i++)
3531 lut[i] = i % rss_size;
3532 }
3533
3534 /**
3535 * ice_pf_vsi_setup - Set up a PF VSI
3536 * @pf: board private structure
3537 * @pi: pointer to the port_info instance
3538 *
3539 * Returns pointer to the successfully allocated VSI software struct
3540 * on success, otherwise returns NULL on failure.
3541 */
3542 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3543 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3544 {
3545 struct ice_vsi_cfg_params params = {};
3546
3547 params.type = ICE_VSI_PF;
3548 params.pi = pi;
3549 params.flags = ICE_VSI_FLAG_INIT;
3550
3551 return ice_vsi_setup(pf, ¶ms);
3552 }
3553
3554 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3555 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3556 struct ice_channel *ch)
3557 {
3558 struct ice_vsi_cfg_params params = {};
3559
3560 params.type = ICE_VSI_CHNL;
3561 params.pi = pi;
3562 params.ch = ch;
3563 params.flags = ICE_VSI_FLAG_INIT;
3564
3565 return ice_vsi_setup(pf, ¶ms);
3566 }
3567
3568 /**
3569 * ice_ctrl_vsi_setup - Set up a control VSI
3570 * @pf: board private structure
3571 * @pi: pointer to the port_info instance
3572 *
3573 * Returns pointer to the successfully allocated VSI software struct
3574 * on success, otherwise returns NULL on failure.
3575 */
3576 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3577 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3578 {
3579 struct ice_vsi_cfg_params params = {};
3580
3581 params.type = ICE_VSI_CTRL;
3582 params.pi = pi;
3583 params.flags = ICE_VSI_FLAG_INIT;
3584
3585 return ice_vsi_setup(pf, ¶ms);
3586 }
3587
3588 /**
3589 * ice_lb_vsi_setup - Set up a loopback VSI
3590 * @pf: board private structure
3591 * @pi: pointer to the port_info instance
3592 *
3593 * Returns pointer to the successfully allocated VSI software struct
3594 * on success, otherwise returns NULL on failure.
3595 */
3596 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3597 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3598 {
3599 struct ice_vsi_cfg_params params = {};
3600
3601 params.type = ICE_VSI_LB;
3602 params.pi = pi;
3603 params.flags = ICE_VSI_FLAG_INIT;
3604
3605 return ice_vsi_setup(pf, ¶ms);
3606 }
3607
3608 /**
3609 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3610 * @netdev: network interface to be adjusted
3611 * @proto: VLAN TPID
3612 * @vid: VLAN ID to be added
3613 *
3614 * net_device_ops implementation for adding VLAN IDs
3615 */
3616 static int
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3617 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3618 {
3619 struct ice_netdev_priv *np = netdev_priv(netdev);
3620 struct ice_vsi_vlan_ops *vlan_ops;
3621 struct ice_vsi *vsi = np->vsi;
3622 struct ice_vlan vlan;
3623 int ret;
3624
3625 /* VLAN 0 is added by default during load/reset */
3626 if (!vid)
3627 return 0;
3628
3629 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3630 usleep_range(1000, 2000);
3631
3632 /* Add multicast promisc rule for the VLAN ID to be added if
3633 * all-multicast is currently enabled.
3634 */
3635 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3636 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3637 ICE_MCAST_VLAN_PROMISC_BITS,
3638 vid);
3639 if (ret)
3640 goto finish;
3641 }
3642
3643 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3644
3645 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3646 * packets aren't pruned by the device's internal switch on Rx
3647 */
3648 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3649 ret = vlan_ops->add_vlan(vsi, &vlan);
3650 if (ret)
3651 goto finish;
3652
3653 /* If all-multicast is currently enabled and this VLAN ID is only one
3654 * besides VLAN-0 we have to update look-up type of multicast promisc
3655 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3656 */
3657 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3658 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3659 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3660 ICE_MCAST_PROMISC_BITS, 0);
3661 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3662 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3663 }
3664
3665 finish:
3666 clear_bit(ICE_CFG_BUSY, vsi->state);
3667
3668 return ret;
3669 }
3670
3671 /**
3672 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3673 * @netdev: network interface to be adjusted
3674 * @proto: VLAN TPID
3675 * @vid: VLAN ID to be removed
3676 *
3677 * net_device_ops implementation for removing VLAN IDs
3678 */
3679 static int
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3680 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3681 {
3682 struct ice_netdev_priv *np = netdev_priv(netdev);
3683 struct ice_vsi_vlan_ops *vlan_ops;
3684 struct ice_vsi *vsi = np->vsi;
3685 struct ice_vlan vlan;
3686 int ret;
3687
3688 /* don't allow removal of VLAN 0 */
3689 if (!vid)
3690 return 0;
3691
3692 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3693 usleep_range(1000, 2000);
3694
3695 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3696 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3697 if (ret) {
3698 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3699 vsi->vsi_num);
3700 vsi->current_netdev_flags |= IFF_ALLMULTI;
3701 }
3702
3703 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3704
3705 /* Make sure VLAN delete is successful before updating VLAN
3706 * information
3707 */
3708 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3709 ret = vlan_ops->del_vlan(vsi, &vlan);
3710 if (ret)
3711 goto finish;
3712
3713 /* Remove multicast promisc rule for the removed VLAN ID if
3714 * all-multicast is enabled.
3715 */
3716 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3717 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3718 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3719
3720 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3721 /* Update look-up type of multicast promisc rule for VLAN 0
3722 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3723 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3724 */
3725 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3726 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3727 ICE_MCAST_VLAN_PROMISC_BITS,
3728 0);
3729 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3730 ICE_MCAST_PROMISC_BITS, 0);
3731 }
3732 }
3733
3734 finish:
3735 clear_bit(ICE_CFG_BUSY, vsi->state);
3736
3737 return ret;
3738 }
3739
3740 /**
3741 * ice_rep_indr_tc_block_unbind
3742 * @cb_priv: indirection block private data
3743 */
ice_rep_indr_tc_block_unbind(void * cb_priv)3744 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3745 {
3746 struct ice_indr_block_priv *indr_priv = cb_priv;
3747
3748 list_del(&indr_priv->list);
3749 kfree(indr_priv);
3750 }
3751
3752 /**
3753 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3754 * @vsi: VSI struct which has the netdev
3755 */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3756 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3757 {
3758 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3759
3760 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3761 ice_rep_indr_tc_block_unbind);
3762 }
3763
3764 /**
3765 * ice_tc_indir_block_register - Register TC indirect block notifications
3766 * @vsi: VSI struct which has the netdev
3767 *
3768 * Returns 0 on success, negative value on failure
3769 */
ice_tc_indir_block_register(struct ice_vsi * vsi)3770 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3771 {
3772 struct ice_netdev_priv *np;
3773
3774 if (!vsi || !vsi->netdev)
3775 return -EINVAL;
3776
3777 np = netdev_priv(vsi->netdev);
3778
3779 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3780 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3781 }
3782
3783 /**
3784 * ice_get_avail_q_count - Get count of queues in use
3785 * @pf_qmap: bitmap to get queue use count from
3786 * @lock: pointer to a mutex that protects access to pf_qmap
3787 * @size: size of the bitmap
3788 */
3789 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3790 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3791 {
3792 unsigned long bit;
3793 u16 count = 0;
3794
3795 mutex_lock(lock);
3796 for_each_clear_bit(bit, pf_qmap, size)
3797 count++;
3798 mutex_unlock(lock);
3799
3800 return count;
3801 }
3802
3803 /**
3804 * ice_get_avail_txq_count - Get count of Tx queues in use
3805 * @pf: pointer to an ice_pf instance
3806 */
ice_get_avail_txq_count(struct ice_pf * pf)3807 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3808 {
3809 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3810 pf->max_pf_txqs);
3811 }
3812
3813 /**
3814 * ice_get_avail_rxq_count - Get count of Rx queues in use
3815 * @pf: pointer to an ice_pf instance
3816 */
ice_get_avail_rxq_count(struct ice_pf * pf)3817 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3818 {
3819 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3820 pf->max_pf_rxqs);
3821 }
3822
3823 /**
3824 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3825 * @pf: board private structure to initialize
3826 */
ice_deinit_pf(struct ice_pf * pf)3827 static void ice_deinit_pf(struct ice_pf *pf)
3828 {
3829 ice_service_task_stop(pf);
3830 mutex_destroy(&pf->lag_mutex);
3831 mutex_destroy(&pf->adev_mutex);
3832 mutex_destroy(&pf->sw_mutex);
3833 mutex_destroy(&pf->tc_mutex);
3834 mutex_destroy(&pf->avail_q_mutex);
3835 mutex_destroy(&pf->vfs.table_lock);
3836
3837 if (pf->avail_txqs) {
3838 bitmap_free(pf->avail_txqs);
3839 pf->avail_txqs = NULL;
3840 }
3841
3842 if (pf->avail_rxqs) {
3843 bitmap_free(pf->avail_rxqs);
3844 pf->avail_rxqs = NULL;
3845 }
3846
3847 if (pf->ptp.clock)
3848 ptp_clock_unregister(pf->ptp.clock);
3849 }
3850
3851 /**
3852 * ice_set_pf_caps - set PFs capability flags
3853 * @pf: pointer to the PF instance
3854 */
ice_set_pf_caps(struct ice_pf * pf)3855 static void ice_set_pf_caps(struct ice_pf *pf)
3856 {
3857 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3858
3859 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3860 if (func_caps->common_cap.rdma)
3861 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3862 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3863 if (func_caps->common_cap.dcb)
3864 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3865 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3866 if (func_caps->common_cap.sr_iov_1_1) {
3867 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3868 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3869 ICE_MAX_SRIOV_VFS);
3870 }
3871 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3872 if (func_caps->common_cap.rss_table_size)
3873 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3874
3875 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3876 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3877 u16 unused;
3878
3879 /* ctrl_vsi_idx will be set to a valid value when flow director
3880 * is setup by ice_init_fdir
3881 */
3882 pf->ctrl_vsi_idx = ICE_NO_VSI;
3883 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3884 /* force guaranteed filter pool for PF */
3885 ice_alloc_fd_guar_item(&pf->hw, &unused,
3886 func_caps->fd_fltr_guar);
3887 /* force shared filter pool for PF */
3888 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3889 func_caps->fd_fltr_best_effort);
3890 }
3891
3892 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3893 if (func_caps->common_cap.ieee_1588)
3894 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3895
3896 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3897 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3898 }
3899
3900 /**
3901 * ice_init_pf - Initialize general software structures (struct ice_pf)
3902 * @pf: board private structure to initialize
3903 */
ice_init_pf(struct ice_pf * pf)3904 static int ice_init_pf(struct ice_pf *pf)
3905 {
3906 ice_set_pf_caps(pf);
3907
3908 mutex_init(&pf->sw_mutex);
3909 mutex_init(&pf->tc_mutex);
3910 mutex_init(&pf->adev_mutex);
3911 mutex_init(&pf->lag_mutex);
3912
3913 INIT_HLIST_HEAD(&pf->aq_wait_list);
3914 spin_lock_init(&pf->aq_wait_lock);
3915 init_waitqueue_head(&pf->aq_wait_queue);
3916
3917 init_waitqueue_head(&pf->reset_wait_queue);
3918
3919 /* setup service timer and periodic service task */
3920 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3921 pf->serv_tmr_period = HZ;
3922 INIT_WORK(&pf->serv_task, ice_service_task);
3923 clear_bit(ICE_SERVICE_SCHED, pf->state);
3924
3925 mutex_init(&pf->avail_q_mutex);
3926 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3927 if (!pf->avail_txqs)
3928 return -ENOMEM;
3929
3930 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3931 if (!pf->avail_rxqs) {
3932 bitmap_free(pf->avail_txqs);
3933 pf->avail_txqs = NULL;
3934 return -ENOMEM;
3935 }
3936
3937 mutex_init(&pf->vfs.table_lock);
3938 hash_init(pf->vfs.table);
3939 ice_mbx_init_snapshot(&pf->hw);
3940
3941 return 0;
3942 }
3943
3944 /**
3945 * ice_is_wol_supported - check if WoL is supported
3946 * @hw: pointer to hardware info
3947 *
3948 * Check if WoL is supported based on the HW configuration.
3949 * Returns true if NVM supports and enables WoL for this port, false otherwise
3950 */
ice_is_wol_supported(struct ice_hw * hw)3951 bool ice_is_wol_supported(struct ice_hw *hw)
3952 {
3953 u16 wol_ctrl;
3954
3955 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3956 * word) indicates WoL is not supported on the corresponding PF ID.
3957 */
3958 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3959 return false;
3960
3961 return !(BIT(hw->port_info->lport) & wol_ctrl);
3962 }
3963
3964 /**
3965 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3966 * @vsi: VSI being changed
3967 * @new_rx: new number of Rx queues
3968 * @new_tx: new number of Tx queues
3969 * @locked: is adev device_lock held
3970 *
3971 * Only change the number of queues if new_tx, or new_rx is non-0.
3972 *
3973 * Returns 0 on success.
3974 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)3975 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3976 {
3977 struct ice_pf *pf = vsi->back;
3978 int i, err = 0, timeout = 50;
3979
3980 if (!new_rx && !new_tx)
3981 return -EINVAL;
3982
3983 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3984 timeout--;
3985 if (!timeout)
3986 return -EBUSY;
3987 usleep_range(1000, 2000);
3988 }
3989
3990 if (new_tx)
3991 vsi->req_txq = (u16)new_tx;
3992 if (new_rx)
3993 vsi->req_rxq = (u16)new_rx;
3994
3995 /* set for the next time the netdev is started */
3996 if (!netif_running(vsi->netdev)) {
3997 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3998 if (err)
3999 goto rebuild_err;
4000 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4001 goto done;
4002 }
4003
4004 ice_vsi_close(vsi);
4005 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4006 if (err)
4007 goto rebuild_err;
4008
4009 ice_for_each_traffic_class(i) {
4010 if (vsi->tc_cfg.ena_tc & BIT(i))
4011 netdev_set_tc_queue(vsi->netdev,
4012 vsi->tc_cfg.tc_info[i].netdev_tc,
4013 vsi->tc_cfg.tc_info[i].qcount_tx,
4014 vsi->tc_cfg.tc_info[i].qoffset);
4015 }
4016 ice_pf_dcb_recfg(pf, locked);
4017 ice_vsi_open(vsi);
4018 goto done;
4019
4020 rebuild_err:
4021 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4022 err);
4023 done:
4024 clear_bit(ICE_CFG_BUSY, pf->state);
4025 return err;
4026 }
4027
4028 /**
4029 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4030 * @pf: PF to configure
4031 *
4032 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4033 * VSI can still Tx/Rx VLAN tagged packets.
4034 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4035 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4036 {
4037 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4038 struct ice_vsi_ctx *ctxt;
4039 struct ice_hw *hw;
4040 int status;
4041
4042 if (!vsi)
4043 return;
4044
4045 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4046 if (!ctxt)
4047 return;
4048
4049 hw = &pf->hw;
4050 ctxt->info = vsi->info;
4051
4052 ctxt->info.valid_sections =
4053 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4054 ICE_AQ_VSI_PROP_SECURITY_VALID |
4055 ICE_AQ_VSI_PROP_SW_VALID);
4056
4057 /* disable VLAN anti-spoof */
4058 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4059 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4060
4061 /* disable VLAN pruning and keep all other settings */
4062 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4063
4064 /* allow all VLANs on Tx and don't strip on Rx */
4065 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4066 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4067
4068 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4069 if (status) {
4070 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4071 status, ice_aq_str(hw->adminq.sq_last_status));
4072 } else {
4073 vsi->info.sec_flags = ctxt->info.sec_flags;
4074 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4075 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4076 }
4077
4078 kfree(ctxt);
4079 }
4080
4081 /**
4082 * ice_log_pkg_init - log result of DDP package load
4083 * @hw: pointer to hardware info
4084 * @state: state of package load
4085 */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4086 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4087 {
4088 struct ice_pf *pf = hw->back;
4089 struct device *dev;
4090
4091 dev = ice_pf_to_dev(pf);
4092
4093 switch (state) {
4094 case ICE_DDP_PKG_SUCCESS:
4095 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4096 hw->active_pkg_name,
4097 hw->active_pkg_ver.major,
4098 hw->active_pkg_ver.minor,
4099 hw->active_pkg_ver.update,
4100 hw->active_pkg_ver.draft);
4101 break;
4102 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4103 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4104 hw->active_pkg_name,
4105 hw->active_pkg_ver.major,
4106 hw->active_pkg_ver.minor,
4107 hw->active_pkg_ver.update,
4108 hw->active_pkg_ver.draft);
4109 break;
4110 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4111 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4112 hw->active_pkg_name,
4113 hw->active_pkg_ver.major,
4114 hw->active_pkg_ver.minor,
4115 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4116 break;
4117 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4118 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4119 hw->active_pkg_name,
4120 hw->active_pkg_ver.major,
4121 hw->active_pkg_ver.minor,
4122 hw->active_pkg_ver.update,
4123 hw->active_pkg_ver.draft,
4124 hw->pkg_name,
4125 hw->pkg_ver.major,
4126 hw->pkg_ver.minor,
4127 hw->pkg_ver.update,
4128 hw->pkg_ver.draft);
4129 break;
4130 case ICE_DDP_PKG_FW_MISMATCH:
4131 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4132 break;
4133 case ICE_DDP_PKG_INVALID_FILE:
4134 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4135 break;
4136 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4137 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4138 break;
4139 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4140 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4141 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4142 break;
4143 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4144 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4145 break;
4146 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4147 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4148 break;
4149 case ICE_DDP_PKG_LOAD_ERROR:
4150 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4151 /* poll for reset to complete */
4152 if (ice_check_reset(hw))
4153 dev_err(dev, "Error resetting device. Please reload the driver\n");
4154 break;
4155 case ICE_DDP_PKG_ERR:
4156 default:
4157 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4158 break;
4159 }
4160 }
4161
4162 /**
4163 * ice_load_pkg - load/reload the DDP Package file
4164 * @firmware: firmware structure when firmware requested or NULL for reload
4165 * @pf: pointer to the PF instance
4166 *
4167 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4168 * initialize HW tables.
4169 */
4170 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4171 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4172 {
4173 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4174 struct device *dev = ice_pf_to_dev(pf);
4175 struct ice_hw *hw = &pf->hw;
4176
4177 /* Load DDP Package */
4178 if (firmware && !hw->pkg_copy) {
4179 state = ice_copy_and_init_pkg(hw, firmware->data,
4180 firmware->size);
4181 ice_log_pkg_init(hw, state);
4182 } else if (!firmware && hw->pkg_copy) {
4183 /* Reload package during rebuild after CORER/GLOBR reset */
4184 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4185 ice_log_pkg_init(hw, state);
4186 } else {
4187 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4188 }
4189
4190 if (!ice_is_init_pkg_successful(state)) {
4191 /* Safe Mode */
4192 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4193 return;
4194 }
4195
4196 /* Successful download package is the precondition for advanced
4197 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4198 */
4199 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4200 }
4201
4202 /**
4203 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4204 * @pf: pointer to the PF structure
4205 *
4206 * There is no error returned here because the driver should be able to handle
4207 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4208 * specifically with Tx.
4209 */
ice_verify_cacheline_size(struct ice_pf * pf)4210 static void ice_verify_cacheline_size(struct ice_pf *pf)
4211 {
4212 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4213 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4214 ICE_CACHE_LINE_BYTES);
4215 }
4216
4217 /**
4218 * ice_send_version - update firmware with driver version
4219 * @pf: PF struct
4220 *
4221 * Returns 0 on success, else error code
4222 */
ice_send_version(struct ice_pf * pf)4223 static int ice_send_version(struct ice_pf *pf)
4224 {
4225 struct ice_driver_ver dv;
4226
4227 dv.major_ver = 0xff;
4228 dv.minor_ver = 0xff;
4229 dv.build_ver = 0xff;
4230 dv.subbuild_ver = 0;
4231 strscpy((char *)dv.driver_string, UTS_RELEASE,
4232 sizeof(dv.driver_string));
4233 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4234 }
4235
4236 /**
4237 * ice_init_fdir - Initialize flow director VSI and configuration
4238 * @pf: pointer to the PF instance
4239 *
4240 * returns 0 on success, negative on error
4241 */
ice_init_fdir(struct ice_pf * pf)4242 static int ice_init_fdir(struct ice_pf *pf)
4243 {
4244 struct device *dev = ice_pf_to_dev(pf);
4245 struct ice_vsi *ctrl_vsi;
4246 int err;
4247
4248 /* Side Band Flow Director needs to have a control VSI.
4249 * Allocate it and store it in the PF.
4250 */
4251 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4252 if (!ctrl_vsi) {
4253 dev_dbg(dev, "could not create control VSI\n");
4254 return -ENOMEM;
4255 }
4256
4257 err = ice_vsi_open_ctrl(ctrl_vsi);
4258 if (err) {
4259 dev_dbg(dev, "could not open control VSI\n");
4260 goto err_vsi_open;
4261 }
4262
4263 mutex_init(&pf->hw.fdir_fltr_lock);
4264
4265 err = ice_fdir_create_dflt_rules(pf);
4266 if (err)
4267 goto err_fdir_rule;
4268
4269 return 0;
4270
4271 err_fdir_rule:
4272 ice_fdir_release_flows(&pf->hw);
4273 ice_vsi_close(ctrl_vsi);
4274 err_vsi_open:
4275 ice_vsi_release(ctrl_vsi);
4276 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4277 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4278 pf->ctrl_vsi_idx = ICE_NO_VSI;
4279 }
4280 return err;
4281 }
4282
ice_deinit_fdir(struct ice_pf * pf)4283 static void ice_deinit_fdir(struct ice_pf *pf)
4284 {
4285 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4286
4287 if (!vsi)
4288 return;
4289
4290 ice_vsi_manage_fdir(vsi, false);
4291 ice_vsi_release(vsi);
4292 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4293 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4294 pf->ctrl_vsi_idx = ICE_NO_VSI;
4295 }
4296
4297 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4298 }
4299
4300 /**
4301 * ice_get_opt_fw_name - return optional firmware file name or NULL
4302 * @pf: pointer to the PF instance
4303 */
ice_get_opt_fw_name(struct ice_pf * pf)4304 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4305 {
4306 /* Optional firmware name same as default with additional dash
4307 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4308 */
4309 struct pci_dev *pdev = pf->pdev;
4310 char *opt_fw_filename;
4311 u64 dsn;
4312
4313 /* Determine the name of the optional file using the DSN (two
4314 * dwords following the start of the DSN Capability).
4315 */
4316 dsn = pci_get_dsn(pdev);
4317 if (!dsn)
4318 return NULL;
4319
4320 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4321 if (!opt_fw_filename)
4322 return NULL;
4323
4324 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4325 ICE_DDP_PKG_PATH, dsn);
4326
4327 return opt_fw_filename;
4328 }
4329
4330 /**
4331 * ice_request_fw - Device initialization routine
4332 * @pf: pointer to the PF instance
4333 */
ice_request_fw(struct ice_pf * pf)4334 static void ice_request_fw(struct ice_pf *pf)
4335 {
4336 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4337 const struct firmware *firmware = NULL;
4338 struct device *dev = ice_pf_to_dev(pf);
4339 int err = 0;
4340
4341 /* optional device-specific DDP (if present) overrides the default DDP
4342 * package file. kernel logs a debug message if the file doesn't exist,
4343 * and warning messages for other errors.
4344 */
4345 if (opt_fw_filename) {
4346 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4347 if (err) {
4348 kfree(opt_fw_filename);
4349 goto dflt_pkg_load;
4350 }
4351
4352 /* request for firmware was successful. Download to device */
4353 ice_load_pkg(firmware, pf);
4354 kfree(opt_fw_filename);
4355 release_firmware(firmware);
4356 return;
4357 }
4358
4359 dflt_pkg_load:
4360 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4361 if (err) {
4362 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4363 return;
4364 }
4365
4366 /* request for firmware was successful. Download to device */
4367 ice_load_pkg(firmware, pf);
4368 release_firmware(firmware);
4369 }
4370
4371 /**
4372 * ice_print_wake_reason - show the wake up cause in the log
4373 * @pf: pointer to the PF struct
4374 */
ice_print_wake_reason(struct ice_pf * pf)4375 static void ice_print_wake_reason(struct ice_pf *pf)
4376 {
4377 u32 wus = pf->wakeup_reason;
4378 const char *wake_str;
4379
4380 /* if no wake event, nothing to print */
4381 if (!wus)
4382 return;
4383
4384 if (wus & PFPM_WUS_LNKC_M)
4385 wake_str = "Link\n";
4386 else if (wus & PFPM_WUS_MAG_M)
4387 wake_str = "Magic Packet\n";
4388 else if (wus & PFPM_WUS_MNG_M)
4389 wake_str = "Management\n";
4390 else if (wus & PFPM_WUS_FW_RST_WK_M)
4391 wake_str = "Firmware Reset\n";
4392 else
4393 wake_str = "Unknown\n";
4394
4395 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4396 }
4397
4398 /**
4399 * ice_register_netdev - register netdev
4400 * @vsi: pointer to the VSI struct
4401 */
ice_register_netdev(struct ice_vsi * vsi)4402 static int ice_register_netdev(struct ice_vsi *vsi)
4403 {
4404 int err;
4405
4406 if (!vsi || !vsi->netdev)
4407 return -EIO;
4408
4409 err = register_netdev(vsi->netdev);
4410 if (err)
4411 return err;
4412
4413 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4414 netif_carrier_off(vsi->netdev);
4415 netif_tx_stop_all_queues(vsi->netdev);
4416
4417 return 0;
4418 }
4419
ice_unregister_netdev(struct ice_vsi * vsi)4420 static void ice_unregister_netdev(struct ice_vsi *vsi)
4421 {
4422 if (!vsi || !vsi->netdev)
4423 return;
4424
4425 unregister_netdev(vsi->netdev);
4426 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4427 }
4428
4429 /**
4430 * ice_cfg_netdev - Allocate, configure and register a netdev
4431 * @vsi: the VSI associated with the new netdev
4432 *
4433 * Returns 0 on success, negative value on failure
4434 */
ice_cfg_netdev(struct ice_vsi * vsi)4435 static int ice_cfg_netdev(struct ice_vsi *vsi)
4436 {
4437 struct ice_netdev_priv *np;
4438 struct net_device *netdev;
4439 u8 mac_addr[ETH_ALEN];
4440
4441 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4442 vsi->alloc_rxq);
4443 if (!netdev)
4444 return -ENOMEM;
4445
4446 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4447 vsi->netdev = netdev;
4448 np = netdev_priv(netdev);
4449 np->vsi = vsi;
4450
4451 ice_set_netdev_features(netdev);
4452 ice_set_ops(vsi);
4453
4454 if (vsi->type == ICE_VSI_PF) {
4455 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4456 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4457 eth_hw_addr_set(netdev, mac_addr);
4458 }
4459
4460 netdev->priv_flags |= IFF_UNICAST_FLT;
4461
4462 /* Setup netdev TC information */
4463 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4464
4465 netdev->max_mtu = ICE_MAX_MTU;
4466
4467 return 0;
4468 }
4469
ice_decfg_netdev(struct ice_vsi * vsi)4470 static void ice_decfg_netdev(struct ice_vsi *vsi)
4471 {
4472 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4473 free_netdev(vsi->netdev);
4474 vsi->netdev = NULL;
4475 }
4476
ice_start_eth(struct ice_vsi * vsi)4477 static int ice_start_eth(struct ice_vsi *vsi)
4478 {
4479 int err;
4480
4481 err = ice_init_mac_fltr(vsi->back);
4482 if (err)
4483 return err;
4484
4485 err = ice_vsi_open(vsi);
4486 if (err)
4487 ice_fltr_remove_all(vsi);
4488
4489 return err;
4490 }
4491
ice_stop_eth(struct ice_vsi * vsi)4492 static void ice_stop_eth(struct ice_vsi *vsi)
4493 {
4494 ice_fltr_remove_all(vsi);
4495 ice_vsi_close(vsi);
4496 }
4497
ice_init_eth(struct ice_pf * pf)4498 static int ice_init_eth(struct ice_pf *pf)
4499 {
4500 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4501 int err;
4502
4503 if (!vsi)
4504 return -EINVAL;
4505
4506 /* init channel list */
4507 INIT_LIST_HEAD(&vsi->ch_list);
4508
4509 err = ice_cfg_netdev(vsi);
4510 if (err)
4511 return err;
4512 /* Setup DCB netlink interface */
4513 ice_dcbnl_setup(vsi);
4514
4515 err = ice_init_mac_fltr(pf);
4516 if (err)
4517 goto err_init_mac_fltr;
4518
4519 err = ice_devlink_create_pf_port(pf);
4520 if (err)
4521 goto err_devlink_create_pf_port;
4522
4523 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4524
4525 err = ice_register_netdev(vsi);
4526 if (err)
4527 goto err_register_netdev;
4528
4529 err = ice_tc_indir_block_register(vsi);
4530 if (err)
4531 goto err_tc_indir_block_register;
4532
4533 ice_napi_add(vsi);
4534
4535 return 0;
4536
4537 err_tc_indir_block_register:
4538 ice_unregister_netdev(vsi);
4539 err_register_netdev:
4540 ice_devlink_destroy_pf_port(pf);
4541 err_devlink_create_pf_port:
4542 err_init_mac_fltr:
4543 ice_decfg_netdev(vsi);
4544 return err;
4545 }
4546
ice_deinit_eth(struct ice_pf * pf)4547 static void ice_deinit_eth(struct ice_pf *pf)
4548 {
4549 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4550
4551 if (!vsi)
4552 return;
4553
4554 ice_vsi_close(vsi);
4555 ice_unregister_netdev(vsi);
4556 ice_devlink_destroy_pf_port(pf);
4557 ice_tc_indir_block_unregister(vsi);
4558 ice_decfg_netdev(vsi);
4559 }
4560
4561 /**
4562 * ice_wait_for_fw - wait for full FW readiness
4563 * @hw: pointer to the hardware structure
4564 * @timeout: milliseconds that can elapse before timing out
4565 */
ice_wait_for_fw(struct ice_hw * hw,u32 timeout)4566 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4567 {
4568 int fw_loading;
4569 u32 elapsed = 0;
4570
4571 while (elapsed <= timeout) {
4572 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4573
4574 /* firmware was not yet loaded, we have to wait more */
4575 if (fw_loading) {
4576 elapsed += 100;
4577 msleep(100);
4578 continue;
4579 }
4580 return 0;
4581 }
4582
4583 return -ETIMEDOUT;
4584 }
4585
ice_init_dev(struct ice_pf * pf)4586 static int ice_init_dev(struct ice_pf *pf)
4587 {
4588 struct device *dev = ice_pf_to_dev(pf);
4589 struct ice_hw *hw = &pf->hw;
4590 int err;
4591
4592 err = ice_init_hw(hw);
4593 if (err) {
4594 dev_err(dev, "ice_init_hw failed: %d\n", err);
4595 return err;
4596 }
4597
4598 /* Some cards require longer initialization times
4599 * due to necessity of loading FW from an external source.
4600 * This can take even half a minute.
4601 */
4602 if (ice_is_pf_c827(hw)) {
4603 err = ice_wait_for_fw(hw, 30000);
4604 if (err) {
4605 dev_err(dev, "ice_wait_for_fw timed out");
4606 return err;
4607 }
4608 }
4609
4610 ice_init_feature_support(pf);
4611
4612 ice_request_fw(pf);
4613
4614 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4615 * set in pf->state, which will cause ice_is_safe_mode to return
4616 * true
4617 */
4618 if (ice_is_safe_mode(pf)) {
4619 /* we already got function/device capabilities but these don't
4620 * reflect what the driver needs to do in safe mode. Instead of
4621 * adding conditional logic everywhere to ignore these
4622 * device/function capabilities, override them.
4623 */
4624 ice_set_safe_mode_caps(hw);
4625 }
4626
4627 err = ice_init_pf(pf);
4628 if (err) {
4629 dev_err(dev, "ice_init_pf failed: %d\n", err);
4630 goto err_init_pf;
4631 }
4632
4633 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4634 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4635 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4636 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4637 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4638 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4639 pf->hw.tnl.valid_count[TNL_VXLAN];
4640 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4641 UDP_TUNNEL_TYPE_VXLAN;
4642 }
4643 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4644 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4645 pf->hw.tnl.valid_count[TNL_GENEVE];
4646 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4647 UDP_TUNNEL_TYPE_GENEVE;
4648 }
4649
4650 err = ice_init_interrupt_scheme(pf);
4651 if (err) {
4652 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4653 err = -EIO;
4654 goto err_init_interrupt_scheme;
4655 }
4656
4657 /* In case of MSIX we are going to setup the misc vector right here
4658 * to handle admin queue events etc. In case of legacy and MSI
4659 * the misc functionality and queue processing is combined in
4660 * the same vector and that gets setup at open.
4661 */
4662 err = ice_req_irq_msix_misc(pf);
4663 if (err) {
4664 dev_err(dev, "setup of misc vector failed: %d\n", err);
4665 goto err_req_irq_msix_misc;
4666 }
4667
4668 return 0;
4669
4670 err_req_irq_msix_misc:
4671 ice_clear_interrupt_scheme(pf);
4672 err_init_interrupt_scheme:
4673 ice_deinit_pf(pf);
4674 err_init_pf:
4675 ice_deinit_hw(hw);
4676 return err;
4677 }
4678
ice_deinit_dev(struct ice_pf * pf)4679 static void ice_deinit_dev(struct ice_pf *pf)
4680 {
4681 ice_free_irq_msix_misc(pf);
4682 ice_deinit_pf(pf);
4683 ice_deinit_hw(&pf->hw);
4684
4685 /* Service task is already stopped, so call reset directly. */
4686 ice_reset(&pf->hw, ICE_RESET_PFR);
4687 pci_wait_for_pending_transaction(pf->pdev);
4688 ice_clear_interrupt_scheme(pf);
4689 }
4690
ice_init_features(struct ice_pf * pf)4691 static void ice_init_features(struct ice_pf *pf)
4692 {
4693 struct device *dev = ice_pf_to_dev(pf);
4694
4695 if (ice_is_safe_mode(pf))
4696 return;
4697
4698 /* initialize DDP driven features */
4699 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4700 ice_ptp_init(pf);
4701
4702 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4703 ice_gnss_init(pf);
4704
4705 /* Note: Flow director init failure is non-fatal to load */
4706 if (ice_init_fdir(pf))
4707 dev_err(dev, "could not initialize flow director\n");
4708
4709 /* Note: DCB init failure is non-fatal to load */
4710 if (ice_init_pf_dcb(pf, false)) {
4711 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4712 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4713 } else {
4714 ice_cfg_lldp_mib_change(&pf->hw, true);
4715 }
4716
4717 if (ice_init_lag(pf))
4718 dev_warn(dev, "Failed to init link aggregation support\n");
4719 }
4720
ice_deinit_features(struct ice_pf * pf)4721 static void ice_deinit_features(struct ice_pf *pf)
4722 {
4723 if (ice_is_safe_mode(pf))
4724 return;
4725
4726 ice_deinit_lag(pf);
4727 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4728 ice_cfg_lldp_mib_change(&pf->hw, false);
4729 ice_deinit_fdir(pf);
4730 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4731 ice_gnss_exit(pf);
4732 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4733 ice_ptp_release(pf);
4734 }
4735
ice_init_wakeup(struct ice_pf * pf)4736 static void ice_init_wakeup(struct ice_pf *pf)
4737 {
4738 /* Save wakeup reason register for later use */
4739 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4740
4741 /* check for a power management event */
4742 ice_print_wake_reason(pf);
4743
4744 /* clear wake status, all bits */
4745 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4746
4747 /* Disable WoL at init, wait for user to enable */
4748 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4749 }
4750
ice_init_link(struct ice_pf * pf)4751 static int ice_init_link(struct ice_pf *pf)
4752 {
4753 struct device *dev = ice_pf_to_dev(pf);
4754 int err;
4755
4756 err = ice_init_link_events(pf->hw.port_info);
4757 if (err) {
4758 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4759 return err;
4760 }
4761
4762 /* not a fatal error if this fails */
4763 err = ice_init_nvm_phy_type(pf->hw.port_info);
4764 if (err)
4765 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4766
4767 /* not a fatal error if this fails */
4768 err = ice_update_link_info(pf->hw.port_info);
4769 if (err)
4770 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4771
4772 ice_init_link_dflt_override(pf->hw.port_info);
4773
4774 ice_check_link_cfg_err(pf,
4775 pf->hw.port_info->phy.link_info.link_cfg_err);
4776
4777 /* if media available, initialize PHY settings */
4778 if (pf->hw.port_info->phy.link_info.link_info &
4779 ICE_AQ_MEDIA_AVAILABLE) {
4780 /* not a fatal error if this fails */
4781 err = ice_init_phy_user_cfg(pf->hw.port_info);
4782 if (err)
4783 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4784
4785 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4786 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4787
4788 if (vsi)
4789 ice_configure_phy(vsi);
4790 }
4791 } else {
4792 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4793 }
4794
4795 return err;
4796 }
4797
ice_init_pf_sw(struct ice_pf * pf)4798 static int ice_init_pf_sw(struct ice_pf *pf)
4799 {
4800 bool dvm = ice_is_dvm_ena(&pf->hw);
4801 struct ice_vsi *vsi;
4802 int err;
4803
4804 /* create switch struct for the switch element created by FW on boot */
4805 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4806 if (!pf->first_sw)
4807 return -ENOMEM;
4808
4809 if (pf->hw.evb_veb)
4810 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4811 else
4812 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4813
4814 pf->first_sw->pf = pf;
4815
4816 /* record the sw_id available for later use */
4817 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4818
4819 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4820 if (err)
4821 goto err_aq_set_port_params;
4822
4823 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4824 if (!vsi) {
4825 err = -ENOMEM;
4826 goto err_pf_vsi_setup;
4827 }
4828
4829 return 0;
4830
4831 err_pf_vsi_setup:
4832 err_aq_set_port_params:
4833 kfree(pf->first_sw);
4834 return err;
4835 }
4836
ice_deinit_pf_sw(struct ice_pf * pf)4837 static void ice_deinit_pf_sw(struct ice_pf *pf)
4838 {
4839 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4840
4841 if (!vsi)
4842 return;
4843
4844 ice_vsi_release(vsi);
4845 kfree(pf->first_sw);
4846 }
4847
ice_alloc_vsis(struct ice_pf * pf)4848 static int ice_alloc_vsis(struct ice_pf *pf)
4849 {
4850 struct device *dev = ice_pf_to_dev(pf);
4851
4852 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4853 if (!pf->num_alloc_vsi)
4854 return -EIO;
4855
4856 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4857 dev_warn(dev,
4858 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4859 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4860 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4861 }
4862
4863 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4864 GFP_KERNEL);
4865 if (!pf->vsi)
4866 return -ENOMEM;
4867
4868 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4869 sizeof(*pf->vsi_stats), GFP_KERNEL);
4870 if (!pf->vsi_stats) {
4871 devm_kfree(dev, pf->vsi);
4872 return -ENOMEM;
4873 }
4874
4875 return 0;
4876 }
4877
ice_dealloc_vsis(struct ice_pf * pf)4878 static void ice_dealloc_vsis(struct ice_pf *pf)
4879 {
4880 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4881 pf->vsi_stats = NULL;
4882
4883 pf->num_alloc_vsi = 0;
4884 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4885 pf->vsi = NULL;
4886 }
4887
ice_init_devlink(struct ice_pf * pf)4888 static int ice_init_devlink(struct ice_pf *pf)
4889 {
4890 int err;
4891
4892 err = ice_devlink_register_params(pf);
4893 if (err)
4894 return err;
4895
4896 ice_devlink_init_regions(pf);
4897 ice_devlink_register(pf);
4898
4899 return 0;
4900 }
4901
ice_deinit_devlink(struct ice_pf * pf)4902 static void ice_deinit_devlink(struct ice_pf *pf)
4903 {
4904 ice_devlink_unregister(pf);
4905 ice_devlink_destroy_regions(pf);
4906 ice_devlink_unregister_params(pf);
4907 }
4908
ice_init(struct ice_pf * pf)4909 static int ice_init(struct ice_pf *pf)
4910 {
4911 int err;
4912
4913 err = ice_init_dev(pf);
4914 if (err)
4915 return err;
4916
4917 err = ice_alloc_vsis(pf);
4918 if (err)
4919 goto err_alloc_vsis;
4920
4921 err = ice_init_pf_sw(pf);
4922 if (err)
4923 goto err_init_pf_sw;
4924
4925 ice_init_wakeup(pf);
4926
4927 err = ice_init_link(pf);
4928 if (err)
4929 goto err_init_link;
4930
4931 err = ice_send_version(pf);
4932 if (err)
4933 goto err_init_link;
4934
4935 ice_verify_cacheline_size(pf);
4936
4937 if (ice_is_safe_mode(pf))
4938 ice_set_safe_mode_vlan_cfg(pf);
4939 else
4940 /* print PCI link speed and width */
4941 pcie_print_link_status(pf->pdev);
4942
4943 /* ready to go, so clear down state bit */
4944 clear_bit(ICE_DOWN, pf->state);
4945 clear_bit(ICE_SERVICE_DIS, pf->state);
4946
4947 /* since everything is good, start the service timer */
4948 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4949
4950 return 0;
4951
4952 err_init_link:
4953 ice_deinit_pf_sw(pf);
4954 err_init_pf_sw:
4955 ice_dealloc_vsis(pf);
4956 err_alloc_vsis:
4957 ice_deinit_dev(pf);
4958 return err;
4959 }
4960
ice_deinit(struct ice_pf * pf)4961 static void ice_deinit(struct ice_pf *pf)
4962 {
4963 set_bit(ICE_SERVICE_DIS, pf->state);
4964 set_bit(ICE_DOWN, pf->state);
4965
4966 ice_deinit_pf_sw(pf);
4967 ice_dealloc_vsis(pf);
4968 ice_deinit_dev(pf);
4969 }
4970
4971 /**
4972 * ice_load - load pf by init hw and starting VSI
4973 * @pf: pointer to the pf instance
4974 */
ice_load(struct ice_pf * pf)4975 int ice_load(struct ice_pf *pf)
4976 {
4977 struct ice_vsi_cfg_params params = {};
4978 struct ice_vsi *vsi;
4979 int err;
4980
4981 err = ice_init_dev(pf);
4982 if (err)
4983 return err;
4984
4985 vsi = ice_get_main_vsi(pf);
4986
4987 params = ice_vsi_to_params(vsi);
4988 params.flags = ICE_VSI_FLAG_INIT;
4989
4990 rtnl_lock();
4991 err = ice_vsi_cfg(vsi, ¶ms);
4992 if (err)
4993 goto err_vsi_cfg;
4994
4995 err = ice_start_eth(ice_get_main_vsi(pf));
4996 if (err)
4997 goto err_start_eth;
4998 rtnl_unlock();
4999
5000 err = ice_init_rdma(pf);
5001 if (err)
5002 goto err_init_rdma;
5003
5004 ice_init_features(pf);
5005 ice_service_task_restart(pf);
5006
5007 clear_bit(ICE_DOWN, pf->state);
5008
5009 return 0;
5010
5011 err_init_rdma:
5012 ice_vsi_close(ice_get_main_vsi(pf));
5013 rtnl_lock();
5014 err_start_eth:
5015 ice_vsi_decfg(ice_get_main_vsi(pf));
5016 err_vsi_cfg:
5017 rtnl_unlock();
5018 ice_deinit_dev(pf);
5019 return err;
5020 }
5021
5022 /**
5023 * ice_unload - unload pf by stopping VSI and deinit hw
5024 * @pf: pointer to the pf instance
5025 */
ice_unload(struct ice_pf * pf)5026 void ice_unload(struct ice_pf *pf)
5027 {
5028 ice_deinit_features(pf);
5029 ice_deinit_rdma(pf);
5030 rtnl_lock();
5031 ice_stop_eth(ice_get_main_vsi(pf));
5032 ice_vsi_decfg(ice_get_main_vsi(pf));
5033 rtnl_unlock();
5034 ice_deinit_dev(pf);
5035 }
5036
5037 /**
5038 * ice_probe - Device initialization routine
5039 * @pdev: PCI device information struct
5040 * @ent: entry in ice_pci_tbl
5041 *
5042 * Returns 0 on success, negative on failure
5043 */
5044 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5045 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5046 {
5047 struct device *dev = &pdev->dev;
5048 struct ice_pf *pf;
5049 struct ice_hw *hw;
5050 int err;
5051
5052 if (pdev->is_virtfn) {
5053 dev_err(dev, "can't probe a virtual function\n");
5054 return -EINVAL;
5055 }
5056
5057 /* when under a kdump kernel initiate a reset before enabling the
5058 * device in order to clear out any pending DMA transactions. These
5059 * transactions can cause some systems to machine check when doing
5060 * the pcim_enable_device() below.
5061 */
5062 if (is_kdump_kernel()) {
5063 pci_save_state(pdev);
5064 pci_clear_master(pdev);
5065 err = pcie_flr(pdev);
5066 if (err)
5067 return err;
5068 pci_restore_state(pdev);
5069 }
5070
5071 /* this driver uses devres, see
5072 * Documentation/driver-api/driver-model/devres.rst
5073 */
5074 err = pcim_enable_device(pdev);
5075 if (err)
5076 return err;
5077
5078 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5079 if (err) {
5080 dev_err(dev, "BAR0 I/O map error %d\n", err);
5081 return err;
5082 }
5083
5084 pf = ice_allocate_pf(dev);
5085 if (!pf)
5086 return -ENOMEM;
5087
5088 /* initialize Auxiliary index to invalid value */
5089 pf->aux_idx = -1;
5090
5091 /* set up for high or low DMA */
5092 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5093 if (err) {
5094 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5095 return err;
5096 }
5097
5098 pci_set_master(pdev);
5099
5100 pf->pdev = pdev;
5101 pci_set_drvdata(pdev, pf);
5102 set_bit(ICE_DOWN, pf->state);
5103 /* Disable service task until DOWN bit is cleared */
5104 set_bit(ICE_SERVICE_DIS, pf->state);
5105
5106 hw = &pf->hw;
5107 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5108 pci_save_state(pdev);
5109
5110 hw->back = pf;
5111 hw->port_info = NULL;
5112 hw->vendor_id = pdev->vendor;
5113 hw->device_id = pdev->device;
5114 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5115 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5116 hw->subsystem_device_id = pdev->subsystem_device;
5117 hw->bus.device = PCI_SLOT(pdev->devfn);
5118 hw->bus.func = PCI_FUNC(pdev->devfn);
5119 ice_set_ctrlq_len(hw);
5120
5121 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5122
5123 #ifndef CONFIG_DYNAMIC_DEBUG
5124 if (debug < -1)
5125 hw->debug_mask = debug;
5126 #endif
5127
5128 err = ice_init(pf);
5129 if (err)
5130 goto err_init;
5131
5132 err = ice_init_eth(pf);
5133 if (err)
5134 goto err_init_eth;
5135
5136 err = ice_init_rdma(pf);
5137 if (err)
5138 goto err_init_rdma;
5139
5140 err = ice_init_devlink(pf);
5141 if (err)
5142 goto err_init_devlink;
5143
5144 ice_init_features(pf);
5145
5146 return 0;
5147
5148 err_init_devlink:
5149 ice_deinit_rdma(pf);
5150 err_init_rdma:
5151 ice_deinit_eth(pf);
5152 err_init_eth:
5153 ice_deinit(pf);
5154 err_init:
5155 pci_disable_device(pdev);
5156 return err;
5157 }
5158
5159 /**
5160 * ice_set_wake - enable or disable Wake on LAN
5161 * @pf: pointer to the PF struct
5162 *
5163 * Simple helper for WoL control
5164 */
ice_set_wake(struct ice_pf * pf)5165 static void ice_set_wake(struct ice_pf *pf)
5166 {
5167 struct ice_hw *hw = &pf->hw;
5168 bool wol = pf->wol_ena;
5169
5170 /* clear wake state, otherwise new wake events won't fire */
5171 wr32(hw, PFPM_WUS, U32_MAX);
5172
5173 /* enable / disable APM wake up, no RMW needed */
5174 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5175
5176 /* set magic packet filter enabled */
5177 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5178 }
5179
5180 /**
5181 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5182 * @pf: pointer to the PF struct
5183 *
5184 * Issue firmware command to enable multicast magic wake, making
5185 * sure that any locally administered address (LAA) is used for
5186 * wake, and that PF reset doesn't undo the LAA.
5187 */
ice_setup_mc_magic_wake(struct ice_pf * pf)5188 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5189 {
5190 struct device *dev = ice_pf_to_dev(pf);
5191 struct ice_hw *hw = &pf->hw;
5192 u8 mac_addr[ETH_ALEN];
5193 struct ice_vsi *vsi;
5194 int status;
5195 u8 flags;
5196
5197 if (!pf->wol_ena)
5198 return;
5199
5200 vsi = ice_get_main_vsi(pf);
5201 if (!vsi)
5202 return;
5203
5204 /* Get current MAC address in case it's an LAA */
5205 if (vsi->netdev)
5206 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5207 else
5208 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5209
5210 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5211 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5212 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5213
5214 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5215 if (status)
5216 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5217 status, ice_aq_str(hw->adminq.sq_last_status));
5218 }
5219
5220 /**
5221 * ice_remove - Device removal routine
5222 * @pdev: PCI device information struct
5223 */
ice_remove(struct pci_dev * pdev)5224 static void ice_remove(struct pci_dev *pdev)
5225 {
5226 struct ice_pf *pf = pci_get_drvdata(pdev);
5227 int i;
5228
5229 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5230 if (!ice_is_reset_in_progress(pf->state))
5231 break;
5232 msleep(100);
5233 }
5234
5235 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5236 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5237 ice_free_vfs(pf);
5238 }
5239
5240 ice_service_task_stop(pf);
5241 ice_aq_cancel_waiting_tasks(pf);
5242 set_bit(ICE_DOWN, pf->state);
5243
5244 if (!ice_is_safe_mode(pf))
5245 ice_remove_arfs(pf);
5246 ice_deinit_features(pf);
5247 ice_deinit_devlink(pf);
5248 ice_deinit_rdma(pf);
5249 ice_deinit_eth(pf);
5250 ice_deinit(pf);
5251
5252 ice_vsi_release_all(pf);
5253
5254 ice_setup_mc_magic_wake(pf);
5255 ice_set_wake(pf);
5256
5257 pci_disable_device(pdev);
5258 }
5259
5260 /**
5261 * ice_shutdown - PCI callback for shutting down device
5262 * @pdev: PCI device information struct
5263 */
ice_shutdown(struct pci_dev * pdev)5264 static void ice_shutdown(struct pci_dev *pdev)
5265 {
5266 struct ice_pf *pf = pci_get_drvdata(pdev);
5267
5268 ice_remove(pdev);
5269
5270 if (system_state == SYSTEM_POWER_OFF) {
5271 pci_wake_from_d3(pdev, pf->wol_ena);
5272 pci_set_power_state(pdev, PCI_D3hot);
5273 }
5274 }
5275
5276 #ifdef CONFIG_PM
5277 /**
5278 * ice_prepare_for_shutdown - prep for PCI shutdown
5279 * @pf: board private structure
5280 *
5281 * Inform or close all dependent features in prep for PCI device shutdown
5282 */
ice_prepare_for_shutdown(struct ice_pf * pf)5283 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5284 {
5285 struct ice_hw *hw = &pf->hw;
5286 u32 v;
5287
5288 /* Notify VFs of impending reset */
5289 if (ice_check_sq_alive(hw, &hw->mailboxq))
5290 ice_vc_notify_reset(pf);
5291
5292 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5293
5294 /* disable the VSIs and their queues that are not already DOWN */
5295 ice_pf_dis_all_vsi(pf, false);
5296
5297 ice_for_each_vsi(pf, v)
5298 if (pf->vsi[v])
5299 pf->vsi[v]->vsi_num = 0;
5300
5301 ice_shutdown_all_ctrlq(hw);
5302 }
5303
5304 /**
5305 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5306 * @pf: board private structure to reinitialize
5307 *
5308 * This routine reinitialize interrupt scheme that was cleared during
5309 * power management suspend callback.
5310 *
5311 * This should be called during resume routine to re-allocate the q_vectors
5312 * and reacquire interrupts.
5313 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5314 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5315 {
5316 struct device *dev = ice_pf_to_dev(pf);
5317 int ret, v;
5318
5319 /* Since we clear MSIX flag during suspend, we need to
5320 * set it back during resume...
5321 */
5322
5323 ret = ice_init_interrupt_scheme(pf);
5324 if (ret) {
5325 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5326 return ret;
5327 }
5328
5329 /* Remap vectors and rings, after successful re-init interrupts */
5330 ice_for_each_vsi(pf, v) {
5331 if (!pf->vsi[v])
5332 continue;
5333
5334 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5335 if (ret)
5336 goto err_reinit;
5337 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5338 }
5339
5340 ret = ice_req_irq_msix_misc(pf);
5341 if (ret) {
5342 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5343 ret);
5344 goto err_reinit;
5345 }
5346
5347 return 0;
5348
5349 err_reinit:
5350 while (v--)
5351 if (pf->vsi[v])
5352 ice_vsi_free_q_vectors(pf->vsi[v]);
5353
5354 return ret;
5355 }
5356
5357 /**
5358 * ice_suspend
5359 * @dev: generic device information structure
5360 *
5361 * Power Management callback to quiesce the device and prepare
5362 * for D3 transition.
5363 */
ice_suspend(struct device * dev)5364 static int __maybe_unused ice_suspend(struct device *dev)
5365 {
5366 struct pci_dev *pdev = to_pci_dev(dev);
5367 struct ice_pf *pf;
5368 int disabled, v;
5369
5370 pf = pci_get_drvdata(pdev);
5371
5372 if (!ice_pf_state_is_nominal(pf)) {
5373 dev_err(dev, "Device is not ready, no need to suspend it\n");
5374 return -EBUSY;
5375 }
5376
5377 /* Stop watchdog tasks until resume completion.
5378 * Even though it is most likely that the service task is
5379 * disabled if the device is suspended or down, the service task's
5380 * state is controlled by a different state bit, and we should
5381 * store and honor whatever state that bit is in at this point.
5382 */
5383 disabled = ice_service_task_stop(pf);
5384
5385 ice_deinit_rdma(pf);
5386
5387 /* Already suspended?, then there is nothing to do */
5388 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5389 if (!disabled)
5390 ice_service_task_restart(pf);
5391 return 0;
5392 }
5393
5394 if (test_bit(ICE_DOWN, pf->state) ||
5395 ice_is_reset_in_progress(pf->state)) {
5396 dev_err(dev, "can't suspend device in reset or already down\n");
5397 if (!disabled)
5398 ice_service_task_restart(pf);
5399 return 0;
5400 }
5401
5402 ice_setup_mc_magic_wake(pf);
5403
5404 ice_prepare_for_shutdown(pf);
5405
5406 ice_set_wake(pf);
5407
5408 /* Free vectors, clear the interrupt scheme and release IRQs
5409 * for proper hibernation, especially with large number of CPUs.
5410 * Otherwise hibernation might fail when mapping all the vectors back
5411 * to CPU0.
5412 */
5413 ice_free_irq_msix_misc(pf);
5414 ice_for_each_vsi(pf, v) {
5415 if (!pf->vsi[v])
5416 continue;
5417 ice_vsi_free_q_vectors(pf->vsi[v]);
5418 }
5419 ice_clear_interrupt_scheme(pf);
5420
5421 pci_save_state(pdev);
5422 pci_wake_from_d3(pdev, pf->wol_ena);
5423 pci_set_power_state(pdev, PCI_D3hot);
5424 return 0;
5425 }
5426
5427 /**
5428 * ice_resume - PM callback for waking up from D3
5429 * @dev: generic device information structure
5430 */
ice_resume(struct device * dev)5431 static int __maybe_unused ice_resume(struct device *dev)
5432 {
5433 struct pci_dev *pdev = to_pci_dev(dev);
5434 enum ice_reset_req reset_type;
5435 struct ice_pf *pf;
5436 struct ice_hw *hw;
5437 int ret;
5438
5439 pci_set_power_state(pdev, PCI_D0);
5440 pci_restore_state(pdev);
5441 pci_save_state(pdev);
5442
5443 if (!pci_device_is_present(pdev))
5444 return -ENODEV;
5445
5446 ret = pci_enable_device_mem(pdev);
5447 if (ret) {
5448 dev_err(dev, "Cannot enable device after suspend\n");
5449 return ret;
5450 }
5451
5452 pf = pci_get_drvdata(pdev);
5453 hw = &pf->hw;
5454
5455 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5456 ice_print_wake_reason(pf);
5457
5458 /* We cleared the interrupt scheme when we suspended, so we need to
5459 * restore it now to resume device functionality.
5460 */
5461 ret = ice_reinit_interrupt_scheme(pf);
5462 if (ret)
5463 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5464
5465 ret = ice_init_rdma(pf);
5466 if (ret)
5467 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5468 ret);
5469
5470 clear_bit(ICE_DOWN, pf->state);
5471 /* Now perform PF reset and rebuild */
5472 reset_type = ICE_RESET_PFR;
5473 /* re-enable service task for reset, but allow reset to schedule it */
5474 clear_bit(ICE_SERVICE_DIS, pf->state);
5475
5476 if (ice_schedule_reset(pf, reset_type))
5477 dev_err(dev, "Reset during resume failed.\n");
5478
5479 clear_bit(ICE_SUSPENDED, pf->state);
5480 ice_service_task_restart(pf);
5481
5482 /* Restart the service task */
5483 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5484
5485 return 0;
5486 }
5487 #endif /* CONFIG_PM */
5488
5489 /**
5490 * ice_pci_err_detected - warning that PCI error has been detected
5491 * @pdev: PCI device information struct
5492 * @err: the type of PCI error
5493 *
5494 * Called to warn that something happened on the PCI bus and the error handling
5495 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5496 */
5497 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5498 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5499 {
5500 struct ice_pf *pf = pci_get_drvdata(pdev);
5501
5502 if (!pf) {
5503 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5504 __func__, err);
5505 return PCI_ERS_RESULT_DISCONNECT;
5506 }
5507
5508 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5509 ice_service_task_stop(pf);
5510
5511 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5512 set_bit(ICE_PFR_REQ, pf->state);
5513 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5514 }
5515 }
5516
5517 return PCI_ERS_RESULT_NEED_RESET;
5518 }
5519
5520 /**
5521 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5522 * @pdev: PCI device information struct
5523 *
5524 * Called to determine if the driver can recover from the PCI slot reset by
5525 * using a register read to determine if the device is recoverable.
5526 */
ice_pci_err_slot_reset(struct pci_dev * pdev)5527 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5528 {
5529 struct ice_pf *pf = pci_get_drvdata(pdev);
5530 pci_ers_result_t result;
5531 int err;
5532 u32 reg;
5533
5534 err = pci_enable_device_mem(pdev);
5535 if (err) {
5536 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5537 err);
5538 result = PCI_ERS_RESULT_DISCONNECT;
5539 } else {
5540 pci_set_master(pdev);
5541 pci_restore_state(pdev);
5542 pci_save_state(pdev);
5543 pci_wake_from_d3(pdev, false);
5544
5545 /* Check for life */
5546 reg = rd32(&pf->hw, GLGEN_RTRIG);
5547 if (!reg)
5548 result = PCI_ERS_RESULT_RECOVERED;
5549 else
5550 result = PCI_ERS_RESULT_DISCONNECT;
5551 }
5552
5553 return result;
5554 }
5555
5556 /**
5557 * ice_pci_err_resume - restart operations after PCI error recovery
5558 * @pdev: PCI device information struct
5559 *
5560 * Called to allow the driver to bring things back up after PCI error and/or
5561 * reset recovery have finished
5562 */
ice_pci_err_resume(struct pci_dev * pdev)5563 static void ice_pci_err_resume(struct pci_dev *pdev)
5564 {
5565 struct ice_pf *pf = pci_get_drvdata(pdev);
5566
5567 if (!pf) {
5568 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5569 __func__);
5570 return;
5571 }
5572
5573 if (test_bit(ICE_SUSPENDED, pf->state)) {
5574 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5575 __func__);
5576 return;
5577 }
5578
5579 ice_restore_all_vfs_msi_state(pdev);
5580
5581 ice_do_reset(pf, ICE_RESET_PFR);
5582 ice_service_task_restart(pf);
5583 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5584 }
5585
5586 /**
5587 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5588 * @pdev: PCI device information struct
5589 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5590 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5591 {
5592 struct ice_pf *pf = pci_get_drvdata(pdev);
5593
5594 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5595 ice_service_task_stop(pf);
5596
5597 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5598 set_bit(ICE_PFR_REQ, pf->state);
5599 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5600 }
5601 }
5602 }
5603
5604 /**
5605 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5606 * @pdev: PCI device information struct
5607 */
ice_pci_err_reset_done(struct pci_dev * pdev)5608 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5609 {
5610 ice_pci_err_resume(pdev);
5611 }
5612
5613 /* ice_pci_tbl - PCI Device ID Table
5614 *
5615 * Wildcard entries (PCI_ANY_ID) should come last
5616 * Last entry must be all 0s
5617 *
5618 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5619 * Class, Class Mask, private data (not used) }
5620 */
5621 static const struct pci_device_id ice_pci_tbl[] = {
5622 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5623 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5624 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5625 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5626 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5627 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5628 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5629 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5630 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5631 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5632 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5633 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5634 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5635 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5636 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5637 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5638 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5639 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5640 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5641 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5642 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5643 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5644 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5645 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5646 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5647 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5648 /* required last entry */
5649 { 0, }
5650 };
5651 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5652
5653 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5654
5655 static const struct pci_error_handlers ice_pci_err_handler = {
5656 .error_detected = ice_pci_err_detected,
5657 .slot_reset = ice_pci_err_slot_reset,
5658 .reset_prepare = ice_pci_err_reset_prepare,
5659 .reset_done = ice_pci_err_reset_done,
5660 .resume = ice_pci_err_resume
5661 };
5662
5663 static struct pci_driver ice_driver = {
5664 .name = KBUILD_MODNAME,
5665 .id_table = ice_pci_tbl,
5666 .probe = ice_probe,
5667 .remove = ice_remove,
5668 #ifdef CONFIG_PM
5669 .driver.pm = &ice_pm_ops,
5670 #endif /* CONFIG_PM */
5671 .shutdown = ice_shutdown,
5672 .sriov_configure = ice_sriov_configure,
5673 .err_handler = &ice_pci_err_handler
5674 };
5675
5676 /**
5677 * ice_module_init - Driver registration routine
5678 *
5679 * ice_module_init is the first routine called when the driver is
5680 * loaded. All it does is register with the PCI subsystem.
5681 */
ice_module_init(void)5682 static int __init ice_module_init(void)
5683 {
5684 int status = -ENOMEM;
5685
5686 pr_info("%s\n", ice_driver_string);
5687 pr_info("%s\n", ice_copyright);
5688
5689 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5690 if (!ice_wq) {
5691 pr_err("Failed to create workqueue\n");
5692 return status;
5693 }
5694
5695 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5696 if (!ice_lag_wq) {
5697 pr_err("Failed to create LAG workqueue\n");
5698 goto err_dest_wq;
5699 }
5700
5701 status = pci_register_driver(&ice_driver);
5702 if (status) {
5703 pr_err("failed to register PCI driver, err %d\n", status);
5704 goto err_dest_lag_wq;
5705 }
5706
5707 return 0;
5708
5709 err_dest_lag_wq:
5710 destroy_workqueue(ice_lag_wq);
5711 err_dest_wq:
5712 destroy_workqueue(ice_wq);
5713 return status;
5714 }
5715 module_init(ice_module_init);
5716
5717 /**
5718 * ice_module_exit - Driver exit cleanup routine
5719 *
5720 * ice_module_exit is called just before the driver is removed
5721 * from memory.
5722 */
ice_module_exit(void)5723 static void __exit ice_module_exit(void)
5724 {
5725 pci_unregister_driver(&ice_driver);
5726 destroy_workqueue(ice_wq);
5727 destroy_workqueue(ice_lag_wq);
5728 pr_info("module unloaded\n");
5729 }
5730 module_exit(ice_module_exit);
5731
5732 /**
5733 * ice_set_mac_address - NDO callback to set MAC address
5734 * @netdev: network interface device structure
5735 * @pi: pointer to an address structure
5736 *
5737 * Returns 0 on success, negative on failure
5738 */
ice_set_mac_address(struct net_device * netdev,void * pi)5739 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5740 {
5741 struct ice_netdev_priv *np = netdev_priv(netdev);
5742 struct ice_vsi *vsi = np->vsi;
5743 struct ice_pf *pf = vsi->back;
5744 struct ice_hw *hw = &pf->hw;
5745 struct sockaddr *addr = pi;
5746 u8 old_mac[ETH_ALEN];
5747 u8 flags = 0;
5748 u8 *mac;
5749 int err;
5750
5751 mac = (u8 *)addr->sa_data;
5752
5753 if (!is_valid_ether_addr(mac))
5754 return -EADDRNOTAVAIL;
5755
5756 if (test_bit(ICE_DOWN, pf->state) ||
5757 ice_is_reset_in_progress(pf->state)) {
5758 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5759 mac);
5760 return -EBUSY;
5761 }
5762
5763 if (ice_chnl_dmac_fltr_cnt(pf)) {
5764 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5765 mac);
5766 return -EAGAIN;
5767 }
5768
5769 netif_addr_lock_bh(netdev);
5770 ether_addr_copy(old_mac, netdev->dev_addr);
5771 /* change the netdev's MAC address */
5772 eth_hw_addr_set(netdev, mac);
5773 netif_addr_unlock_bh(netdev);
5774
5775 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5776 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5777 if (err && err != -ENOENT) {
5778 err = -EADDRNOTAVAIL;
5779 goto err_update_filters;
5780 }
5781
5782 /* Add filter for new MAC. If filter exists, return success */
5783 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5784 if (err == -EEXIST) {
5785 /* Although this MAC filter is already present in hardware it's
5786 * possible in some cases (e.g. bonding) that dev_addr was
5787 * modified outside of the driver and needs to be restored back
5788 * to this value.
5789 */
5790 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5791
5792 return 0;
5793 } else if (err) {
5794 /* error if the new filter addition failed */
5795 err = -EADDRNOTAVAIL;
5796 }
5797
5798 err_update_filters:
5799 if (err) {
5800 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5801 mac);
5802 netif_addr_lock_bh(netdev);
5803 eth_hw_addr_set(netdev, old_mac);
5804 netif_addr_unlock_bh(netdev);
5805 return err;
5806 }
5807
5808 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5809 netdev->dev_addr);
5810
5811 /* write new MAC address to the firmware */
5812 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5813 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5814 if (err) {
5815 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5816 mac, err);
5817 }
5818 return 0;
5819 }
5820
5821 /**
5822 * ice_set_rx_mode - NDO callback to set the netdev filters
5823 * @netdev: network interface device structure
5824 */
ice_set_rx_mode(struct net_device * netdev)5825 static void ice_set_rx_mode(struct net_device *netdev)
5826 {
5827 struct ice_netdev_priv *np = netdev_priv(netdev);
5828 struct ice_vsi *vsi = np->vsi;
5829
5830 if (!vsi || ice_is_switchdev_running(vsi->back))
5831 return;
5832
5833 /* Set the flags to synchronize filters
5834 * ndo_set_rx_mode may be triggered even without a change in netdev
5835 * flags
5836 */
5837 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5838 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5839 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5840
5841 /* schedule our worker thread which will take care of
5842 * applying the new filter changes
5843 */
5844 ice_service_task_schedule(vsi->back);
5845 }
5846
5847 /**
5848 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5849 * @netdev: network interface device structure
5850 * @queue_index: Queue ID
5851 * @maxrate: maximum bandwidth in Mbps
5852 */
5853 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)5854 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5855 {
5856 struct ice_netdev_priv *np = netdev_priv(netdev);
5857 struct ice_vsi *vsi = np->vsi;
5858 u16 q_handle;
5859 int status;
5860 u8 tc;
5861
5862 /* Validate maxrate requested is within permitted range */
5863 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5864 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5865 maxrate, queue_index);
5866 return -EINVAL;
5867 }
5868
5869 q_handle = vsi->tx_rings[queue_index]->q_handle;
5870 tc = ice_dcb_get_tc(vsi, queue_index);
5871
5872 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5873 if (!vsi) {
5874 netdev_err(netdev, "Invalid VSI for given queue %d\n",
5875 queue_index);
5876 return -EINVAL;
5877 }
5878
5879 /* Set BW back to default, when user set maxrate to 0 */
5880 if (!maxrate)
5881 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5882 q_handle, ICE_MAX_BW);
5883 else
5884 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5885 q_handle, ICE_MAX_BW, maxrate * 1000);
5886 if (status)
5887 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5888 status);
5889
5890 return status;
5891 }
5892
5893 /**
5894 * ice_fdb_add - add an entry to the hardware database
5895 * @ndm: the input from the stack
5896 * @tb: pointer to array of nladdr (unused)
5897 * @dev: the net device pointer
5898 * @addr: the MAC address entry being added
5899 * @vid: VLAN ID
5900 * @flags: instructions from stack about fdb operation
5901 * @extack: netlink extended ack
5902 */
5903 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)5904 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5905 struct net_device *dev, const unsigned char *addr, u16 vid,
5906 u16 flags, struct netlink_ext_ack __always_unused *extack)
5907 {
5908 int err;
5909
5910 if (vid) {
5911 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5912 return -EINVAL;
5913 }
5914 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5915 netdev_err(dev, "FDB only supports static addresses\n");
5916 return -EINVAL;
5917 }
5918
5919 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5920 err = dev_uc_add_excl(dev, addr);
5921 else if (is_multicast_ether_addr(addr))
5922 err = dev_mc_add_excl(dev, addr);
5923 else
5924 err = -EINVAL;
5925
5926 /* Only return duplicate errors if NLM_F_EXCL is set */
5927 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5928 err = 0;
5929
5930 return err;
5931 }
5932
5933 /**
5934 * ice_fdb_del - delete an entry from the hardware database
5935 * @ndm: the input from the stack
5936 * @tb: pointer to array of nladdr (unused)
5937 * @dev: the net device pointer
5938 * @addr: the MAC address entry being added
5939 * @vid: VLAN ID
5940 * @extack: netlink extended ack
5941 */
5942 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,struct netlink_ext_ack * extack)5943 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5944 struct net_device *dev, const unsigned char *addr,
5945 __always_unused u16 vid, struct netlink_ext_ack *extack)
5946 {
5947 int err;
5948
5949 if (ndm->ndm_state & NUD_PERMANENT) {
5950 netdev_err(dev, "FDB only supports static addresses\n");
5951 return -EINVAL;
5952 }
5953
5954 if (is_unicast_ether_addr(addr))
5955 err = dev_uc_del(dev, addr);
5956 else if (is_multicast_ether_addr(addr))
5957 err = dev_mc_del(dev, addr);
5958 else
5959 err = -EINVAL;
5960
5961 return err;
5962 }
5963
5964 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5965 NETIF_F_HW_VLAN_CTAG_TX | \
5966 NETIF_F_HW_VLAN_STAG_RX | \
5967 NETIF_F_HW_VLAN_STAG_TX)
5968
5969 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5970 NETIF_F_HW_VLAN_STAG_RX)
5971
5972 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5973 NETIF_F_HW_VLAN_STAG_FILTER)
5974
5975 /**
5976 * ice_fix_features - fix the netdev features flags based on device limitations
5977 * @netdev: ptr to the netdev that flags are being fixed on
5978 * @features: features that need to be checked and possibly fixed
5979 *
5980 * Make sure any fixups are made to features in this callback. This enables the
5981 * driver to not have to check unsupported configurations throughout the driver
5982 * because that's the responsiblity of this callback.
5983 *
5984 * Single VLAN Mode (SVM) Supported Features:
5985 * NETIF_F_HW_VLAN_CTAG_FILTER
5986 * NETIF_F_HW_VLAN_CTAG_RX
5987 * NETIF_F_HW_VLAN_CTAG_TX
5988 *
5989 * Double VLAN Mode (DVM) Supported Features:
5990 * NETIF_F_HW_VLAN_CTAG_FILTER
5991 * NETIF_F_HW_VLAN_CTAG_RX
5992 * NETIF_F_HW_VLAN_CTAG_TX
5993 *
5994 * NETIF_F_HW_VLAN_STAG_FILTER
5995 * NETIF_HW_VLAN_STAG_RX
5996 * NETIF_HW_VLAN_STAG_TX
5997 *
5998 * Features that need fixing:
5999 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6000 * These are mutually exlusive as the VSI context cannot support multiple
6001 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
6002 * is not done, then default to clearing the requested STAG offload
6003 * settings.
6004 *
6005 * All supported filtering has to be enabled or disabled together. For
6006 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6007 * together. If this is not done, then default to VLAN filtering disabled.
6008 * These are mutually exclusive as there is currently no way to
6009 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6010 * prune rules.
6011 */
6012 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)6013 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6014 {
6015 struct ice_netdev_priv *np = netdev_priv(netdev);
6016 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6017 bool cur_ctag, cur_stag, req_ctag, req_stag;
6018
6019 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6020 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6021 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6022
6023 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6024 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6025 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6026
6027 if (req_vlan_fltr != cur_vlan_fltr) {
6028 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6029 if (req_ctag && req_stag) {
6030 features |= NETIF_VLAN_FILTERING_FEATURES;
6031 } else if (!req_ctag && !req_stag) {
6032 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6033 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6034 (!cur_stag && req_stag && !cur_ctag)) {
6035 features |= NETIF_VLAN_FILTERING_FEATURES;
6036 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6037 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6038 (cur_stag && !req_stag && cur_ctag)) {
6039 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6040 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6041 }
6042 } else {
6043 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6044 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6045
6046 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6047 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6048 }
6049 }
6050
6051 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6052 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6053 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6054 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6055 NETIF_F_HW_VLAN_STAG_TX);
6056 }
6057
6058 if (!(netdev->features & NETIF_F_RXFCS) &&
6059 (features & NETIF_F_RXFCS) &&
6060 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6061 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6062 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6063 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6064 }
6065
6066 return features;
6067 }
6068
6069 /**
6070 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6071 * @vsi: PF's VSI
6072 * @features: features used to determine VLAN offload settings
6073 *
6074 * First, determine the vlan_ethertype based on the VLAN offload bits in
6075 * features. Then determine if stripping and insertion should be enabled or
6076 * disabled. Finally enable or disable VLAN stripping and insertion.
6077 */
6078 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6079 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6080 {
6081 bool enable_stripping = true, enable_insertion = true;
6082 struct ice_vsi_vlan_ops *vlan_ops;
6083 int strip_err = 0, insert_err = 0;
6084 u16 vlan_ethertype = 0;
6085
6086 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6087
6088 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6089 vlan_ethertype = ETH_P_8021AD;
6090 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6091 vlan_ethertype = ETH_P_8021Q;
6092
6093 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6094 enable_stripping = false;
6095 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6096 enable_insertion = false;
6097
6098 if (enable_stripping)
6099 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6100 else
6101 strip_err = vlan_ops->dis_stripping(vsi);
6102
6103 if (enable_insertion)
6104 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6105 else
6106 insert_err = vlan_ops->dis_insertion(vsi);
6107
6108 if (strip_err || insert_err)
6109 return -EIO;
6110
6111 return 0;
6112 }
6113
6114 /**
6115 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6116 * @vsi: PF's VSI
6117 * @features: features used to determine VLAN filtering settings
6118 *
6119 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6120 * features.
6121 */
6122 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6123 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6124 {
6125 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6126 int err = 0;
6127
6128 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6129 * if either bit is set
6130 */
6131 if (features &
6132 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6133 err = vlan_ops->ena_rx_filtering(vsi);
6134 else
6135 err = vlan_ops->dis_rx_filtering(vsi);
6136
6137 return err;
6138 }
6139
6140 /**
6141 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6142 * @netdev: ptr to the netdev being adjusted
6143 * @features: the feature set that the stack is suggesting
6144 *
6145 * Only update VLAN settings if the requested_vlan_features are different than
6146 * the current_vlan_features.
6147 */
6148 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6149 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6150 {
6151 netdev_features_t current_vlan_features, requested_vlan_features;
6152 struct ice_netdev_priv *np = netdev_priv(netdev);
6153 struct ice_vsi *vsi = np->vsi;
6154 int err;
6155
6156 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6157 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6158 if (current_vlan_features ^ requested_vlan_features) {
6159 if ((features & NETIF_F_RXFCS) &&
6160 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6161 dev_err(ice_pf_to_dev(vsi->back),
6162 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6163 return -EIO;
6164 }
6165
6166 err = ice_set_vlan_offload_features(vsi, features);
6167 if (err)
6168 return err;
6169 }
6170
6171 current_vlan_features = netdev->features &
6172 NETIF_VLAN_FILTERING_FEATURES;
6173 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6174 if (current_vlan_features ^ requested_vlan_features) {
6175 err = ice_set_vlan_filtering_features(vsi, features);
6176 if (err)
6177 return err;
6178 }
6179
6180 return 0;
6181 }
6182
6183 /**
6184 * ice_set_loopback - turn on/off loopback mode on underlying PF
6185 * @vsi: ptr to VSI
6186 * @ena: flag to indicate the on/off setting
6187 */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6188 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6189 {
6190 bool if_running = netif_running(vsi->netdev);
6191 int ret;
6192
6193 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6194 ret = ice_down(vsi);
6195 if (ret) {
6196 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6197 return ret;
6198 }
6199 }
6200 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6201 if (ret)
6202 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6203 if (if_running)
6204 ret = ice_up(vsi);
6205
6206 return ret;
6207 }
6208
6209 /**
6210 * ice_set_features - set the netdev feature flags
6211 * @netdev: ptr to the netdev being adjusted
6212 * @features: the feature set that the stack is suggesting
6213 */
6214 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6215 ice_set_features(struct net_device *netdev, netdev_features_t features)
6216 {
6217 netdev_features_t changed = netdev->features ^ features;
6218 struct ice_netdev_priv *np = netdev_priv(netdev);
6219 struct ice_vsi *vsi = np->vsi;
6220 struct ice_pf *pf = vsi->back;
6221 int ret = 0;
6222
6223 /* Don't set any netdev advanced features with device in Safe Mode */
6224 if (ice_is_safe_mode(pf)) {
6225 dev_err(ice_pf_to_dev(pf),
6226 "Device is in Safe Mode - not enabling advanced netdev features\n");
6227 return ret;
6228 }
6229
6230 /* Do not change setting during reset */
6231 if (ice_is_reset_in_progress(pf->state)) {
6232 dev_err(ice_pf_to_dev(pf),
6233 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6234 return -EBUSY;
6235 }
6236
6237 /* Multiple features can be changed in one call so keep features in
6238 * separate if/else statements to guarantee each feature is checked
6239 */
6240 if (changed & NETIF_F_RXHASH)
6241 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6242
6243 ret = ice_set_vlan_features(netdev, features);
6244 if (ret)
6245 return ret;
6246
6247 /* Turn on receive of FCS aka CRC, and after setting this
6248 * flag the packet data will have the 4 byte CRC appended
6249 */
6250 if (changed & NETIF_F_RXFCS) {
6251 if ((features & NETIF_F_RXFCS) &&
6252 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6253 dev_err(ice_pf_to_dev(vsi->back),
6254 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6255 return -EIO;
6256 }
6257
6258 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6259 ret = ice_down_up(vsi);
6260 if (ret)
6261 return ret;
6262 }
6263
6264 if (changed & NETIF_F_NTUPLE) {
6265 bool ena = !!(features & NETIF_F_NTUPLE);
6266
6267 ice_vsi_manage_fdir(vsi, ena);
6268 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6269 }
6270
6271 /* don't turn off hw_tc_offload when ADQ is already enabled */
6272 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6273 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6274 return -EACCES;
6275 }
6276
6277 if (changed & NETIF_F_HW_TC) {
6278 bool ena = !!(features & NETIF_F_HW_TC);
6279
6280 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6281 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6282 }
6283
6284 if (changed & NETIF_F_LOOPBACK)
6285 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6286
6287 return ret;
6288 }
6289
6290 /**
6291 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6292 * @vsi: VSI to setup VLAN properties for
6293 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6294 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6295 {
6296 int err;
6297
6298 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6299 if (err)
6300 return err;
6301
6302 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6303 if (err)
6304 return err;
6305
6306 return ice_vsi_add_vlan_zero(vsi);
6307 }
6308
6309 /**
6310 * ice_vsi_cfg_lan - Setup the VSI lan related config
6311 * @vsi: the VSI being configured
6312 *
6313 * Return 0 on success and negative value on error
6314 */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6315 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6316 {
6317 int err;
6318
6319 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6320 ice_set_rx_mode(vsi->netdev);
6321
6322 err = ice_vsi_vlan_setup(vsi);
6323 if (err)
6324 return err;
6325 }
6326 ice_vsi_cfg_dcb_rings(vsi);
6327
6328 err = ice_vsi_cfg_lan_txqs(vsi);
6329 if (!err && ice_is_xdp_ena_vsi(vsi))
6330 err = ice_vsi_cfg_xdp_txqs(vsi);
6331 if (!err)
6332 err = ice_vsi_cfg_rxqs(vsi);
6333
6334 return err;
6335 }
6336
6337 /* THEORY OF MODERATION:
6338 * The ice driver hardware works differently than the hardware that DIMLIB was
6339 * originally made for. ice hardware doesn't have packet count limits that
6340 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6341 * which is hard-coded to a limit of 250,000 ints/second.
6342 * If not using dynamic moderation, the INTRL value can be modified
6343 * by ethtool rx-usecs-high.
6344 */
6345 struct ice_dim {
6346 /* the throttle rate for interrupts, basically worst case delay before
6347 * an initial interrupt fires, value is stored in microseconds.
6348 */
6349 u16 itr;
6350 };
6351
6352 /* Make a different profile for Rx that doesn't allow quite so aggressive
6353 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6354 * second.
6355 */
6356 static const struct ice_dim rx_profile[] = {
6357 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6358 {8}, /* 125,000 ints/s */
6359 {16}, /* 62,500 ints/s */
6360 {62}, /* 16,129 ints/s */
6361 {126} /* 7,936 ints/s */
6362 };
6363
6364 /* The transmit profile, which has the same sorts of values
6365 * as the previous struct
6366 */
6367 static const struct ice_dim tx_profile[] = {
6368 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6369 {8}, /* 125,000 ints/s */
6370 {40}, /* 16,125 ints/s */
6371 {128}, /* 7,812 ints/s */
6372 {256} /* 3,906 ints/s */
6373 };
6374
ice_tx_dim_work(struct work_struct * work)6375 static void ice_tx_dim_work(struct work_struct *work)
6376 {
6377 struct ice_ring_container *rc;
6378 struct dim *dim;
6379 u16 itr;
6380
6381 dim = container_of(work, struct dim, work);
6382 rc = dim->priv;
6383
6384 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6385
6386 /* look up the values in our local table */
6387 itr = tx_profile[dim->profile_ix].itr;
6388
6389 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6390 ice_write_itr(rc, itr);
6391
6392 dim->state = DIM_START_MEASURE;
6393 }
6394
ice_rx_dim_work(struct work_struct * work)6395 static void ice_rx_dim_work(struct work_struct *work)
6396 {
6397 struct ice_ring_container *rc;
6398 struct dim *dim;
6399 u16 itr;
6400
6401 dim = container_of(work, struct dim, work);
6402 rc = dim->priv;
6403
6404 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6405
6406 /* look up the values in our local table */
6407 itr = rx_profile[dim->profile_ix].itr;
6408
6409 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6410 ice_write_itr(rc, itr);
6411
6412 dim->state = DIM_START_MEASURE;
6413 }
6414
6415 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6416
6417 /**
6418 * ice_init_moderation - set up interrupt moderation
6419 * @q_vector: the vector containing rings to be configured
6420 *
6421 * Set up interrupt moderation registers, with the intent to do the right thing
6422 * when called from reset or from probe, and whether or not dynamic moderation
6423 * is enabled or not. Take special care to write all the registers in both
6424 * dynamic moderation mode or not in order to make sure hardware is in a known
6425 * state.
6426 */
ice_init_moderation(struct ice_q_vector * q_vector)6427 static void ice_init_moderation(struct ice_q_vector *q_vector)
6428 {
6429 struct ice_ring_container *rc;
6430 bool tx_dynamic, rx_dynamic;
6431
6432 rc = &q_vector->tx;
6433 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6434 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6435 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6436 rc->dim.priv = rc;
6437 tx_dynamic = ITR_IS_DYNAMIC(rc);
6438
6439 /* set the initial TX ITR to match the above */
6440 ice_write_itr(rc, tx_dynamic ?
6441 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6442
6443 rc = &q_vector->rx;
6444 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6445 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6446 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6447 rc->dim.priv = rc;
6448 rx_dynamic = ITR_IS_DYNAMIC(rc);
6449
6450 /* set the initial RX ITR to match the above */
6451 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6452 rc->itr_setting);
6453
6454 ice_set_q_vector_intrl(q_vector);
6455 }
6456
6457 /**
6458 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6459 * @vsi: the VSI being configured
6460 */
ice_napi_enable_all(struct ice_vsi * vsi)6461 static void ice_napi_enable_all(struct ice_vsi *vsi)
6462 {
6463 int q_idx;
6464
6465 if (!vsi->netdev)
6466 return;
6467
6468 ice_for_each_q_vector(vsi, q_idx) {
6469 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6470
6471 ice_init_moderation(q_vector);
6472
6473 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6474 napi_enable(&q_vector->napi);
6475 }
6476 }
6477
6478 /**
6479 * ice_up_complete - Finish the last steps of bringing up a connection
6480 * @vsi: The VSI being configured
6481 *
6482 * Return 0 on success and negative value on error
6483 */
ice_up_complete(struct ice_vsi * vsi)6484 static int ice_up_complete(struct ice_vsi *vsi)
6485 {
6486 struct ice_pf *pf = vsi->back;
6487 int err;
6488
6489 ice_vsi_cfg_msix(vsi);
6490
6491 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6492 * Tx queue group list was configured and the context bits were
6493 * programmed using ice_vsi_cfg_txqs
6494 */
6495 err = ice_vsi_start_all_rx_rings(vsi);
6496 if (err)
6497 return err;
6498
6499 clear_bit(ICE_VSI_DOWN, vsi->state);
6500 ice_napi_enable_all(vsi);
6501 ice_vsi_ena_irq(vsi);
6502
6503 if (vsi->port_info &&
6504 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6505 vsi->netdev && vsi->type == ICE_VSI_PF) {
6506 ice_print_link_msg(vsi, true);
6507 netif_tx_start_all_queues(vsi->netdev);
6508 netif_carrier_on(vsi->netdev);
6509 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6510 }
6511
6512 /* Perform an initial read of the statistics registers now to
6513 * set the baseline so counters are ready when interface is up
6514 */
6515 ice_update_eth_stats(vsi);
6516
6517 if (vsi->type == ICE_VSI_PF)
6518 ice_service_task_schedule(pf);
6519
6520 return 0;
6521 }
6522
6523 /**
6524 * ice_up - Bring the connection back up after being down
6525 * @vsi: VSI being configured
6526 */
ice_up(struct ice_vsi * vsi)6527 int ice_up(struct ice_vsi *vsi)
6528 {
6529 int err;
6530
6531 err = ice_vsi_cfg_lan(vsi);
6532 if (!err)
6533 err = ice_up_complete(vsi);
6534
6535 return err;
6536 }
6537
6538 /**
6539 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6540 * @syncp: pointer to u64_stats_sync
6541 * @stats: stats that pkts and bytes count will be taken from
6542 * @pkts: packets stats counter
6543 * @bytes: bytes stats counter
6544 *
6545 * This function fetches stats from the ring considering the atomic operations
6546 * that needs to be performed to read u64 values in 32 bit machine.
6547 */
6548 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6549 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6550 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6551 {
6552 unsigned int start;
6553
6554 do {
6555 start = u64_stats_fetch_begin(syncp);
6556 *pkts = stats.pkts;
6557 *bytes = stats.bytes;
6558 } while (u64_stats_fetch_retry(syncp, start));
6559 }
6560
6561 /**
6562 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6563 * @vsi: the VSI to be updated
6564 * @vsi_stats: the stats struct to be updated
6565 * @rings: rings to work on
6566 * @count: number of rings
6567 */
6568 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6569 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6570 struct rtnl_link_stats64 *vsi_stats,
6571 struct ice_tx_ring **rings, u16 count)
6572 {
6573 u16 i;
6574
6575 for (i = 0; i < count; i++) {
6576 struct ice_tx_ring *ring;
6577 u64 pkts = 0, bytes = 0;
6578
6579 ring = READ_ONCE(rings[i]);
6580 if (!ring || !ring->ring_stats)
6581 continue;
6582 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6583 ring->ring_stats->stats, &pkts,
6584 &bytes);
6585 vsi_stats->tx_packets += pkts;
6586 vsi_stats->tx_bytes += bytes;
6587 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6588 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6589 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6590 }
6591 }
6592
6593 /**
6594 * ice_update_vsi_ring_stats - Update VSI stats counters
6595 * @vsi: the VSI to be updated
6596 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6597 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6598 {
6599 struct rtnl_link_stats64 *net_stats, *stats_prev;
6600 struct rtnl_link_stats64 *vsi_stats;
6601 struct ice_pf *pf = vsi->back;
6602 u64 pkts, bytes;
6603 int i;
6604
6605 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6606 if (!vsi_stats)
6607 return;
6608
6609 /* reset non-netdev (extended) stats */
6610 vsi->tx_restart = 0;
6611 vsi->tx_busy = 0;
6612 vsi->tx_linearize = 0;
6613 vsi->rx_buf_failed = 0;
6614 vsi->rx_page_failed = 0;
6615
6616 rcu_read_lock();
6617
6618 /* update Tx rings counters */
6619 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6620 vsi->num_txq);
6621
6622 /* update Rx rings counters */
6623 ice_for_each_rxq(vsi, i) {
6624 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6625 struct ice_ring_stats *ring_stats;
6626
6627 ring_stats = ring->ring_stats;
6628 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6629 ring_stats->stats, &pkts,
6630 &bytes);
6631 vsi_stats->rx_packets += pkts;
6632 vsi_stats->rx_bytes += bytes;
6633 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6634 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6635 }
6636
6637 /* update XDP Tx rings counters */
6638 if (ice_is_xdp_ena_vsi(vsi))
6639 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6640 vsi->num_xdp_txq);
6641
6642 rcu_read_unlock();
6643
6644 net_stats = &vsi->net_stats;
6645 stats_prev = &vsi->net_stats_prev;
6646
6647 /* Update netdev counters, but keep in mind that values could start at
6648 * random value after PF reset. And as we increase the reported stat by
6649 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6650 * let's skip this round.
6651 */
6652 if (likely(pf->stat_prev_loaded)) {
6653 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6654 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6655 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6656 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6657 }
6658
6659 stats_prev->tx_packets = vsi_stats->tx_packets;
6660 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6661 stats_prev->rx_packets = vsi_stats->rx_packets;
6662 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6663
6664 kfree(vsi_stats);
6665 }
6666
6667 /**
6668 * ice_update_vsi_stats - Update VSI stats counters
6669 * @vsi: the VSI to be updated
6670 */
ice_update_vsi_stats(struct ice_vsi * vsi)6671 void ice_update_vsi_stats(struct ice_vsi *vsi)
6672 {
6673 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6674 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6675 struct ice_pf *pf = vsi->back;
6676
6677 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6678 test_bit(ICE_CFG_BUSY, pf->state))
6679 return;
6680
6681 /* get stats as recorded by Tx/Rx rings */
6682 ice_update_vsi_ring_stats(vsi);
6683
6684 /* get VSI stats as recorded by the hardware */
6685 ice_update_eth_stats(vsi);
6686
6687 cur_ns->tx_errors = cur_es->tx_errors;
6688 cur_ns->rx_dropped = cur_es->rx_discards;
6689 cur_ns->tx_dropped = cur_es->tx_discards;
6690 cur_ns->multicast = cur_es->rx_multicast;
6691
6692 /* update some more netdev stats if this is main VSI */
6693 if (vsi->type == ICE_VSI_PF) {
6694 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6695 cur_ns->rx_errors = pf->stats.crc_errors +
6696 pf->stats.illegal_bytes +
6697 pf->stats.rx_len_errors +
6698 pf->stats.rx_undersize +
6699 pf->hw_csum_rx_error +
6700 pf->stats.rx_jabber +
6701 pf->stats.rx_fragments +
6702 pf->stats.rx_oversize;
6703 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6704 /* record drops from the port level */
6705 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6706 }
6707 }
6708
6709 /**
6710 * ice_update_pf_stats - Update PF port stats counters
6711 * @pf: PF whose stats needs to be updated
6712 */
ice_update_pf_stats(struct ice_pf * pf)6713 void ice_update_pf_stats(struct ice_pf *pf)
6714 {
6715 struct ice_hw_port_stats *prev_ps, *cur_ps;
6716 struct ice_hw *hw = &pf->hw;
6717 u16 fd_ctr_base;
6718 u8 port;
6719
6720 port = hw->port_info->lport;
6721 prev_ps = &pf->stats_prev;
6722 cur_ps = &pf->stats;
6723
6724 if (ice_is_reset_in_progress(pf->state))
6725 pf->stat_prev_loaded = false;
6726
6727 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6728 &prev_ps->eth.rx_bytes,
6729 &cur_ps->eth.rx_bytes);
6730
6731 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6732 &prev_ps->eth.rx_unicast,
6733 &cur_ps->eth.rx_unicast);
6734
6735 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6736 &prev_ps->eth.rx_multicast,
6737 &cur_ps->eth.rx_multicast);
6738
6739 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6740 &prev_ps->eth.rx_broadcast,
6741 &cur_ps->eth.rx_broadcast);
6742
6743 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6744 &prev_ps->eth.rx_discards,
6745 &cur_ps->eth.rx_discards);
6746
6747 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6748 &prev_ps->eth.tx_bytes,
6749 &cur_ps->eth.tx_bytes);
6750
6751 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6752 &prev_ps->eth.tx_unicast,
6753 &cur_ps->eth.tx_unicast);
6754
6755 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6756 &prev_ps->eth.tx_multicast,
6757 &cur_ps->eth.tx_multicast);
6758
6759 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6760 &prev_ps->eth.tx_broadcast,
6761 &cur_ps->eth.tx_broadcast);
6762
6763 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6764 &prev_ps->tx_dropped_link_down,
6765 &cur_ps->tx_dropped_link_down);
6766
6767 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6768 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6769
6770 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6771 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6772
6773 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6774 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6775
6776 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6777 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6778
6779 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6780 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6781
6782 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6783 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6784
6785 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6786 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6787
6788 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6789 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6790
6791 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6792 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6793
6794 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6795 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6796
6797 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6798 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6799
6800 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6801 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6802
6803 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6804 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6805
6806 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6807 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6808
6809 fd_ctr_base = hw->fd_ctr_base;
6810
6811 ice_stat_update40(hw,
6812 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6813 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6814 &cur_ps->fd_sb_match);
6815 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6816 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6817
6818 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6819 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6820
6821 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6822 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6823
6824 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6825 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6826
6827 ice_update_dcb_stats(pf);
6828
6829 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6830 &prev_ps->crc_errors, &cur_ps->crc_errors);
6831
6832 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6833 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6834
6835 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6836 &prev_ps->mac_local_faults,
6837 &cur_ps->mac_local_faults);
6838
6839 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6840 &prev_ps->mac_remote_faults,
6841 &cur_ps->mac_remote_faults);
6842
6843 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6844 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6845
6846 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6847 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6848
6849 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6850 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6851
6852 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6853 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6854
6855 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6856 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6857
6858 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6859
6860 pf->stat_prev_loaded = true;
6861 }
6862
6863 /**
6864 * ice_get_stats64 - get statistics for network device structure
6865 * @netdev: network interface device structure
6866 * @stats: main device statistics structure
6867 */
6868 static
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)6869 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6870 {
6871 struct ice_netdev_priv *np = netdev_priv(netdev);
6872 struct rtnl_link_stats64 *vsi_stats;
6873 struct ice_vsi *vsi = np->vsi;
6874
6875 vsi_stats = &vsi->net_stats;
6876
6877 if (!vsi->num_txq || !vsi->num_rxq)
6878 return;
6879
6880 /* netdev packet/byte stats come from ring counter. These are obtained
6881 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6882 * But, only call the update routine and read the registers if VSI is
6883 * not down.
6884 */
6885 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6886 ice_update_vsi_ring_stats(vsi);
6887 stats->tx_packets = vsi_stats->tx_packets;
6888 stats->tx_bytes = vsi_stats->tx_bytes;
6889 stats->rx_packets = vsi_stats->rx_packets;
6890 stats->rx_bytes = vsi_stats->rx_bytes;
6891
6892 /* The rest of the stats can be read from the hardware but instead we
6893 * just return values that the watchdog task has already obtained from
6894 * the hardware.
6895 */
6896 stats->multicast = vsi_stats->multicast;
6897 stats->tx_errors = vsi_stats->tx_errors;
6898 stats->tx_dropped = vsi_stats->tx_dropped;
6899 stats->rx_errors = vsi_stats->rx_errors;
6900 stats->rx_dropped = vsi_stats->rx_dropped;
6901 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6902 stats->rx_length_errors = vsi_stats->rx_length_errors;
6903 }
6904
6905 /**
6906 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6907 * @vsi: VSI having NAPI disabled
6908 */
ice_napi_disable_all(struct ice_vsi * vsi)6909 static void ice_napi_disable_all(struct ice_vsi *vsi)
6910 {
6911 int q_idx;
6912
6913 if (!vsi->netdev)
6914 return;
6915
6916 ice_for_each_q_vector(vsi, q_idx) {
6917 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6918
6919 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6920 napi_disable(&q_vector->napi);
6921
6922 cancel_work_sync(&q_vector->tx.dim.work);
6923 cancel_work_sync(&q_vector->rx.dim.work);
6924 }
6925 }
6926
6927 /**
6928 * ice_down - Shutdown the connection
6929 * @vsi: The VSI being stopped
6930 *
6931 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6932 */
ice_down(struct ice_vsi * vsi)6933 int ice_down(struct ice_vsi *vsi)
6934 {
6935 int i, tx_err, rx_err, vlan_err = 0;
6936
6937 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6938
6939 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6940 vlan_err = ice_vsi_del_vlan_zero(vsi);
6941 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6942 netif_carrier_off(vsi->netdev);
6943 netif_tx_disable(vsi->netdev);
6944 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6945 ice_eswitch_stop_all_tx_queues(vsi->back);
6946 }
6947
6948 ice_vsi_dis_irq(vsi);
6949
6950 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6951 if (tx_err)
6952 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6953 vsi->vsi_num, tx_err);
6954 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6955 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6956 if (tx_err)
6957 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6958 vsi->vsi_num, tx_err);
6959 }
6960
6961 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6962 if (rx_err)
6963 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6964 vsi->vsi_num, rx_err);
6965
6966 ice_napi_disable_all(vsi);
6967
6968 ice_for_each_txq(vsi, i)
6969 ice_clean_tx_ring(vsi->tx_rings[i]);
6970
6971 if (ice_is_xdp_ena_vsi(vsi))
6972 ice_for_each_xdp_txq(vsi, i)
6973 ice_clean_tx_ring(vsi->xdp_rings[i]);
6974
6975 ice_for_each_rxq(vsi, i)
6976 ice_clean_rx_ring(vsi->rx_rings[i]);
6977
6978 if (tx_err || rx_err || vlan_err) {
6979 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6980 vsi->vsi_num, vsi->vsw->sw_id);
6981 return -EIO;
6982 }
6983
6984 return 0;
6985 }
6986
6987 /**
6988 * ice_down_up - shutdown the VSI connection and bring it up
6989 * @vsi: the VSI to be reconnected
6990 */
ice_down_up(struct ice_vsi * vsi)6991 int ice_down_up(struct ice_vsi *vsi)
6992 {
6993 int ret;
6994
6995 /* if DOWN already set, nothing to do */
6996 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6997 return 0;
6998
6999 ret = ice_down(vsi);
7000 if (ret)
7001 return ret;
7002
7003 ret = ice_up(vsi);
7004 if (ret) {
7005 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7006 return ret;
7007 }
7008
7009 return 0;
7010 }
7011
7012 /**
7013 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7014 * @vsi: VSI having resources allocated
7015 *
7016 * Return 0 on success, negative on failure
7017 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)7018 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7019 {
7020 int i, err = 0;
7021
7022 if (!vsi->num_txq) {
7023 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7024 vsi->vsi_num);
7025 return -EINVAL;
7026 }
7027
7028 ice_for_each_txq(vsi, i) {
7029 struct ice_tx_ring *ring = vsi->tx_rings[i];
7030
7031 if (!ring)
7032 return -EINVAL;
7033
7034 if (vsi->netdev)
7035 ring->netdev = vsi->netdev;
7036 err = ice_setup_tx_ring(ring);
7037 if (err)
7038 break;
7039 }
7040
7041 return err;
7042 }
7043
7044 /**
7045 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7046 * @vsi: VSI having resources allocated
7047 *
7048 * Return 0 on success, negative on failure
7049 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7050 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7051 {
7052 int i, err = 0;
7053
7054 if (!vsi->num_rxq) {
7055 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7056 vsi->vsi_num);
7057 return -EINVAL;
7058 }
7059
7060 ice_for_each_rxq(vsi, i) {
7061 struct ice_rx_ring *ring = vsi->rx_rings[i];
7062
7063 if (!ring)
7064 return -EINVAL;
7065
7066 if (vsi->netdev)
7067 ring->netdev = vsi->netdev;
7068 err = ice_setup_rx_ring(ring);
7069 if (err)
7070 break;
7071 }
7072
7073 return err;
7074 }
7075
7076 /**
7077 * ice_vsi_open_ctrl - open control VSI for use
7078 * @vsi: the VSI to open
7079 *
7080 * Initialization of the Control VSI
7081 *
7082 * Returns 0 on success, negative value on error
7083 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7084 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7085 {
7086 char int_name[ICE_INT_NAME_STR_LEN];
7087 struct ice_pf *pf = vsi->back;
7088 struct device *dev;
7089 int err;
7090
7091 dev = ice_pf_to_dev(pf);
7092 /* allocate descriptors */
7093 err = ice_vsi_setup_tx_rings(vsi);
7094 if (err)
7095 goto err_setup_tx;
7096
7097 err = ice_vsi_setup_rx_rings(vsi);
7098 if (err)
7099 goto err_setup_rx;
7100
7101 err = ice_vsi_cfg_lan(vsi);
7102 if (err)
7103 goto err_setup_rx;
7104
7105 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7106 dev_driver_string(dev), dev_name(dev));
7107 err = ice_vsi_req_irq_msix(vsi, int_name);
7108 if (err)
7109 goto err_setup_rx;
7110
7111 ice_vsi_cfg_msix(vsi);
7112
7113 err = ice_vsi_start_all_rx_rings(vsi);
7114 if (err)
7115 goto err_up_complete;
7116
7117 clear_bit(ICE_VSI_DOWN, vsi->state);
7118 ice_vsi_ena_irq(vsi);
7119
7120 return 0;
7121
7122 err_up_complete:
7123 ice_down(vsi);
7124 err_setup_rx:
7125 ice_vsi_free_rx_rings(vsi);
7126 err_setup_tx:
7127 ice_vsi_free_tx_rings(vsi);
7128
7129 return err;
7130 }
7131
7132 /**
7133 * ice_vsi_open - Called when a network interface is made active
7134 * @vsi: the VSI to open
7135 *
7136 * Initialization of the VSI
7137 *
7138 * Returns 0 on success, negative value on error
7139 */
ice_vsi_open(struct ice_vsi * vsi)7140 int ice_vsi_open(struct ice_vsi *vsi)
7141 {
7142 char int_name[ICE_INT_NAME_STR_LEN];
7143 struct ice_pf *pf = vsi->back;
7144 int err;
7145
7146 /* allocate descriptors */
7147 err = ice_vsi_setup_tx_rings(vsi);
7148 if (err)
7149 goto err_setup_tx;
7150
7151 err = ice_vsi_setup_rx_rings(vsi);
7152 if (err)
7153 goto err_setup_rx;
7154
7155 err = ice_vsi_cfg_lan(vsi);
7156 if (err)
7157 goto err_setup_rx;
7158
7159 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7160 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7161 err = ice_vsi_req_irq_msix(vsi, int_name);
7162 if (err)
7163 goto err_setup_rx;
7164
7165 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7166
7167 if (vsi->type == ICE_VSI_PF) {
7168 /* Notify the stack of the actual queue counts. */
7169 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7170 if (err)
7171 goto err_set_qs;
7172
7173 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7174 if (err)
7175 goto err_set_qs;
7176 }
7177
7178 err = ice_up_complete(vsi);
7179 if (err)
7180 goto err_up_complete;
7181
7182 return 0;
7183
7184 err_up_complete:
7185 ice_down(vsi);
7186 err_set_qs:
7187 ice_vsi_free_irq(vsi);
7188 err_setup_rx:
7189 ice_vsi_free_rx_rings(vsi);
7190 err_setup_tx:
7191 ice_vsi_free_tx_rings(vsi);
7192
7193 return err;
7194 }
7195
7196 /**
7197 * ice_vsi_release_all - Delete all VSIs
7198 * @pf: PF from which all VSIs are being removed
7199 */
ice_vsi_release_all(struct ice_pf * pf)7200 static void ice_vsi_release_all(struct ice_pf *pf)
7201 {
7202 int err, i;
7203
7204 if (!pf->vsi)
7205 return;
7206
7207 ice_for_each_vsi(pf, i) {
7208 if (!pf->vsi[i])
7209 continue;
7210
7211 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7212 continue;
7213
7214 err = ice_vsi_release(pf->vsi[i]);
7215 if (err)
7216 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7217 i, err, pf->vsi[i]->vsi_num);
7218 }
7219 }
7220
7221 /**
7222 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7223 * @pf: pointer to the PF instance
7224 * @type: VSI type to rebuild
7225 *
7226 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7227 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7228 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7229 {
7230 struct device *dev = ice_pf_to_dev(pf);
7231 int i, err;
7232
7233 ice_for_each_vsi(pf, i) {
7234 struct ice_vsi *vsi = pf->vsi[i];
7235
7236 if (!vsi || vsi->type != type)
7237 continue;
7238
7239 /* rebuild the VSI */
7240 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7241 if (err) {
7242 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7243 err, vsi->idx, ice_vsi_type_str(type));
7244 return err;
7245 }
7246
7247 /* replay filters for the VSI */
7248 err = ice_replay_vsi(&pf->hw, vsi->idx);
7249 if (err) {
7250 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7251 err, vsi->idx, ice_vsi_type_str(type));
7252 return err;
7253 }
7254
7255 /* Re-map HW VSI number, using VSI handle that has been
7256 * previously validated in ice_replay_vsi() call above
7257 */
7258 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7259
7260 /* enable the VSI */
7261 err = ice_ena_vsi(vsi, false);
7262 if (err) {
7263 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7264 err, vsi->idx, ice_vsi_type_str(type));
7265 return err;
7266 }
7267
7268 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7269 ice_vsi_type_str(type));
7270 }
7271
7272 return 0;
7273 }
7274
7275 /**
7276 * ice_update_pf_netdev_link - Update PF netdev link status
7277 * @pf: pointer to the PF instance
7278 */
ice_update_pf_netdev_link(struct ice_pf * pf)7279 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7280 {
7281 bool link_up;
7282 int i;
7283
7284 ice_for_each_vsi(pf, i) {
7285 struct ice_vsi *vsi = pf->vsi[i];
7286
7287 if (!vsi || vsi->type != ICE_VSI_PF)
7288 return;
7289
7290 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7291 if (link_up) {
7292 netif_carrier_on(pf->vsi[i]->netdev);
7293 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7294 } else {
7295 netif_carrier_off(pf->vsi[i]->netdev);
7296 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7297 }
7298 }
7299 }
7300
7301 /**
7302 * ice_rebuild - rebuild after reset
7303 * @pf: PF to rebuild
7304 * @reset_type: type of reset
7305 *
7306 * Do not rebuild VF VSI in this flow because that is already handled via
7307 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7308 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7309 * to reset/rebuild all the VF VSI twice.
7310 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7311 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7312 {
7313 struct ice_vsi *vsi = ice_get_main_vsi(pf);
7314 struct device *dev = ice_pf_to_dev(pf);
7315 struct ice_hw *hw = &pf->hw;
7316 bool dvm;
7317 int err;
7318
7319 if (test_bit(ICE_DOWN, pf->state))
7320 goto clear_recovery;
7321
7322 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7323
7324 #define ICE_EMP_RESET_SLEEP_MS 5000
7325 if (reset_type == ICE_RESET_EMPR) {
7326 /* If an EMP reset has occurred, any previously pending flash
7327 * update will have completed. We no longer know whether or
7328 * not the NVM update EMP reset is restricted.
7329 */
7330 pf->fw_emp_reset_disabled = false;
7331
7332 msleep(ICE_EMP_RESET_SLEEP_MS);
7333 }
7334
7335 err = ice_init_all_ctrlq(hw);
7336 if (err) {
7337 dev_err(dev, "control queues init failed %d\n", err);
7338 goto err_init_ctrlq;
7339 }
7340
7341 /* if DDP was previously loaded successfully */
7342 if (!ice_is_safe_mode(pf)) {
7343 /* reload the SW DB of filter tables */
7344 if (reset_type == ICE_RESET_PFR)
7345 ice_fill_blk_tbls(hw);
7346 else
7347 /* Reload DDP Package after CORER/GLOBR reset */
7348 ice_load_pkg(NULL, pf);
7349 }
7350
7351 err = ice_clear_pf_cfg(hw);
7352 if (err) {
7353 dev_err(dev, "clear PF configuration failed %d\n", err);
7354 goto err_init_ctrlq;
7355 }
7356
7357 ice_clear_pxe_mode(hw);
7358
7359 err = ice_init_nvm(hw);
7360 if (err) {
7361 dev_err(dev, "ice_init_nvm failed %d\n", err);
7362 goto err_init_ctrlq;
7363 }
7364
7365 err = ice_get_caps(hw);
7366 if (err) {
7367 dev_err(dev, "ice_get_caps failed %d\n", err);
7368 goto err_init_ctrlq;
7369 }
7370
7371 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7372 if (err) {
7373 dev_err(dev, "set_mac_cfg failed %d\n", err);
7374 goto err_init_ctrlq;
7375 }
7376
7377 dvm = ice_is_dvm_ena(hw);
7378
7379 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7380 if (err)
7381 goto err_init_ctrlq;
7382
7383 err = ice_sched_init_port(hw->port_info);
7384 if (err)
7385 goto err_sched_init_port;
7386
7387 /* start misc vector */
7388 err = ice_req_irq_msix_misc(pf);
7389 if (err) {
7390 dev_err(dev, "misc vector setup failed: %d\n", err);
7391 goto err_sched_init_port;
7392 }
7393
7394 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7395 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7396 if (!rd32(hw, PFQF_FD_SIZE)) {
7397 u16 unused, guar, b_effort;
7398
7399 guar = hw->func_caps.fd_fltr_guar;
7400 b_effort = hw->func_caps.fd_fltr_best_effort;
7401
7402 /* force guaranteed filter pool for PF */
7403 ice_alloc_fd_guar_item(hw, &unused, guar);
7404 /* force shared filter pool for PF */
7405 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7406 }
7407 }
7408
7409 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7410 ice_dcb_rebuild(pf);
7411
7412 /* If the PF previously had enabled PTP, PTP init needs to happen before
7413 * the VSI rebuild. If not, this causes the PTP link status events to
7414 * fail.
7415 */
7416 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7417 ice_ptp_reset(pf);
7418
7419 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7420 ice_gnss_init(pf);
7421
7422 /* rebuild PF VSI */
7423 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7424 if (err) {
7425 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7426 goto err_vsi_rebuild;
7427 }
7428
7429 /* configure PTP timestamping after VSI rebuild */
7430 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7431 ice_ptp_cfg_timestamp(pf, false);
7432
7433 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7434 if (err) {
7435 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7436 goto err_vsi_rebuild;
7437 }
7438
7439 if (reset_type == ICE_RESET_PFR) {
7440 err = ice_rebuild_channels(pf);
7441 if (err) {
7442 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7443 err);
7444 goto err_vsi_rebuild;
7445 }
7446 }
7447
7448 /* If Flow Director is active */
7449 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7450 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7451 if (err) {
7452 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7453 goto err_vsi_rebuild;
7454 }
7455
7456 /* replay HW Flow Director recipes */
7457 if (hw->fdir_prof)
7458 ice_fdir_replay_flows(hw);
7459
7460 /* replay Flow Director filters */
7461 ice_fdir_replay_fltrs(pf);
7462
7463 ice_rebuild_arfs(pf);
7464 }
7465
7466 if (vsi && vsi->netdev)
7467 netif_device_attach(vsi->netdev);
7468
7469 ice_update_pf_netdev_link(pf);
7470
7471 /* tell the firmware we are up */
7472 err = ice_send_version(pf);
7473 if (err) {
7474 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7475 err);
7476 goto err_vsi_rebuild;
7477 }
7478
7479 ice_replay_post(hw);
7480
7481 /* if we get here, reset flow is successful */
7482 clear_bit(ICE_RESET_FAILED, pf->state);
7483
7484 ice_plug_aux_dev(pf);
7485 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7486 ice_lag_rebuild(pf);
7487 return;
7488
7489 err_vsi_rebuild:
7490 err_sched_init_port:
7491 ice_sched_cleanup_all(hw);
7492 err_init_ctrlq:
7493 ice_shutdown_all_ctrlq(hw);
7494 set_bit(ICE_RESET_FAILED, pf->state);
7495 clear_recovery:
7496 /* set this bit in PF state to control service task scheduling */
7497 set_bit(ICE_NEEDS_RESTART, pf->state);
7498 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7499 }
7500
7501 /**
7502 * ice_change_mtu - NDO callback to change the MTU
7503 * @netdev: network interface device structure
7504 * @new_mtu: new value for maximum frame size
7505 *
7506 * Returns 0 on success, negative on failure
7507 */
ice_change_mtu(struct net_device * netdev,int new_mtu)7508 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7509 {
7510 struct ice_netdev_priv *np = netdev_priv(netdev);
7511 struct ice_vsi *vsi = np->vsi;
7512 struct ice_pf *pf = vsi->back;
7513 struct bpf_prog *prog;
7514 u8 count = 0;
7515 int err = 0;
7516
7517 if (new_mtu == (int)netdev->mtu) {
7518 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7519 return 0;
7520 }
7521
7522 prog = vsi->xdp_prog;
7523 if (prog && !prog->aux->xdp_has_frags) {
7524 int frame_size = ice_max_xdp_frame_size(vsi);
7525
7526 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7527 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7528 frame_size - ICE_ETH_PKT_HDR_PAD);
7529 return -EINVAL;
7530 }
7531 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7532 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7533 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7534 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7535 return -EINVAL;
7536 }
7537 }
7538
7539 /* if a reset is in progress, wait for some time for it to complete */
7540 do {
7541 if (ice_is_reset_in_progress(pf->state)) {
7542 count++;
7543 usleep_range(1000, 2000);
7544 } else {
7545 break;
7546 }
7547
7548 } while (count < 100);
7549
7550 if (count == 100) {
7551 netdev_err(netdev, "can't change MTU. Device is busy\n");
7552 return -EBUSY;
7553 }
7554
7555 netdev->mtu = (unsigned int)new_mtu;
7556 err = ice_down_up(vsi);
7557 if (err)
7558 return err;
7559
7560 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7561 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7562
7563 return err;
7564 }
7565
7566 /**
7567 * ice_eth_ioctl - Access the hwtstamp interface
7568 * @netdev: network interface device structure
7569 * @ifr: interface request data
7570 * @cmd: ioctl command
7571 */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)7572 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7573 {
7574 struct ice_netdev_priv *np = netdev_priv(netdev);
7575 struct ice_pf *pf = np->vsi->back;
7576
7577 switch (cmd) {
7578 case SIOCGHWTSTAMP:
7579 return ice_ptp_get_ts_config(pf, ifr);
7580 case SIOCSHWTSTAMP:
7581 return ice_ptp_set_ts_config(pf, ifr);
7582 default:
7583 return -EOPNOTSUPP;
7584 }
7585 }
7586
7587 /**
7588 * ice_aq_str - convert AQ err code to a string
7589 * @aq_err: the AQ error code to convert
7590 */
ice_aq_str(enum ice_aq_err aq_err)7591 const char *ice_aq_str(enum ice_aq_err aq_err)
7592 {
7593 switch (aq_err) {
7594 case ICE_AQ_RC_OK:
7595 return "OK";
7596 case ICE_AQ_RC_EPERM:
7597 return "ICE_AQ_RC_EPERM";
7598 case ICE_AQ_RC_ENOENT:
7599 return "ICE_AQ_RC_ENOENT";
7600 case ICE_AQ_RC_ENOMEM:
7601 return "ICE_AQ_RC_ENOMEM";
7602 case ICE_AQ_RC_EBUSY:
7603 return "ICE_AQ_RC_EBUSY";
7604 case ICE_AQ_RC_EEXIST:
7605 return "ICE_AQ_RC_EEXIST";
7606 case ICE_AQ_RC_EINVAL:
7607 return "ICE_AQ_RC_EINVAL";
7608 case ICE_AQ_RC_ENOSPC:
7609 return "ICE_AQ_RC_ENOSPC";
7610 case ICE_AQ_RC_ENOSYS:
7611 return "ICE_AQ_RC_ENOSYS";
7612 case ICE_AQ_RC_EMODE:
7613 return "ICE_AQ_RC_EMODE";
7614 case ICE_AQ_RC_ENOSEC:
7615 return "ICE_AQ_RC_ENOSEC";
7616 case ICE_AQ_RC_EBADSIG:
7617 return "ICE_AQ_RC_EBADSIG";
7618 case ICE_AQ_RC_ESVN:
7619 return "ICE_AQ_RC_ESVN";
7620 case ICE_AQ_RC_EBADMAN:
7621 return "ICE_AQ_RC_EBADMAN";
7622 case ICE_AQ_RC_EBADBUF:
7623 return "ICE_AQ_RC_EBADBUF";
7624 }
7625
7626 return "ICE_AQ_RC_UNKNOWN";
7627 }
7628
7629 /**
7630 * ice_set_rss_lut - Set RSS LUT
7631 * @vsi: Pointer to VSI structure
7632 * @lut: Lookup table
7633 * @lut_size: Lookup table size
7634 *
7635 * Returns 0 on success, negative on failure
7636 */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7637 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7638 {
7639 struct ice_aq_get_set_rss_lut_params params = {};
7640 struct ice_hw *hw = &vsi->back->hw;
7641 int status;
7642
7643 if (!lut)
7644 return -EINVAL;
7645
7646 params.vsi_handle = vsi->idx;
7647 params.lut_size = lut_size;
7648 params.lut_type = vsi->rss_lut_type;
7649 params.lut = lut;
7650
7651 status = ice_aq_set_rss_lut(hw, ¶ms);
7652 if (status)
7653 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7654 status, ice_aq_str(hw->adminq.sq_last_status));
7655
7656 return status;
7657 }
7658
7659 /**
7660 * ice_set_rss_key - Set RSS key
7661 * @vsi: Pointer to the VSI structure
7662 * @seed: RSS hash seed
7663 *
7664 * Returns 0 on success, negative on failure
7665 */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7666 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7667 {
7668 struct ice_hw *hw = &vsi->back->hw;
7669 int status;
7670
7671 if (!seed)
7672 return -EINVAL;
7673
7674 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7675 if (status)
7676 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7677 status, ice_aq_str(hw->adminq.sq_last_status));
7678
7679 return status;
7680 }
7681
7682 /**
7683 * ice_get_rss_lut - Get RSS LUT
7684 * @vsi: Pointer to VSI structure
7685 * @lut: Buffer to store the lookup table entries
7686 * @lut_size: Size of buffer to store the lookup table entries
7687 *
7688 * Returns 0 on success, negative on failure
7689 */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7690 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7691 {
7692 struct ice_aq_get_set_rss_lut_params params = {};
7693 struct ice_hw *hw = &vsi->back->hw;
7694 int status;
7695
7696 if (!lut)
7697 return -EINVAL;
7698
7699 params.vsi_handle = vsi->idx;
7700 params.lut_size = lut_size;
7701 params.lut_type = vsi->rss_lut_type;
7702 params.lut = lut;
7703
7704 status = ice_aq_get_rss_lut(hw, ¶ms);
7705 if (status)
7706 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7707 status, ice_aq_str(hw->adminq.sq_last_status));
7708
7709 return status;
7710 }
7711
7712 /**
7713 * ice_get_rss_key - Get RSS key
7714 * @vsi: Pointer to VSI structure
7715 * @seed: Buffer to store the key in
7716 *
7717 * Returns 0 on success, negative on failure
7718 */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)7719 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7720 {
7721 struct ice_hw *hw = &vsi->back->hw;
7722 int status;
7723
7724 if (!seed)
7725 return -EINVAL;
7726
7727 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7728 if (status)
7729 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7730 status, ice_aq_str(hw->adminq.sq_last_status));
7731
7732 return status;
7733 }
7734
7735 /**
7736 * ice_bridge_getlink - Get the hardware bridge mode
7737 * @skb: skb buff
7738 * @pid: process ID
7739 * @seq: RTNL message seq
7740 * @dev: the netdev being configured
7741 * @filter_mask: filter mask passed in
7742 * @nlflags: netlink flags passed in
7743 *
7744 * Return the bridge mode (VEB/VEPA)
7745 */
7746 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)7747 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7748 struct net_device *dev, u32 filter_mask, int nlflags)
7749 {
7750 struct ice_netdev_priv *np = netdev_priv(dev);
7751 struct ice_vsi *vsi = np->vsi;
7752 struct ice_pf *pf = vsi->back;
7753 u16 bmode;
7754
7755 bmode = pf->first_sw->bridge_mode;
7756
7757 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7758 filter_mask, NULL);
7759 }
7760
7761 /**
7762 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7763 * @vsi: Pointer to VSI structure
7764 * @bmode: Hardware bridge mode (VEB/VEPA)
7765 *
7766 * Returns 0 on success, negative on failure
7767 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)7768 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7769 {
7770 struct ice_aqc_vsi_props *vsi_props;
7771 struct ice_hw *hw = &vsi->back->hw;
7772 struct ice_vsi_ctx *ctxt;
7773 int ret;
7774
7775 vsi_props = &vsi->info;
7776
7777 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7778 if (!ctxt)
7779 return -ENOMEM;
7780
7781 ctxt->info = vsi->info;
7782
7783 if (bmode == BRIDGE_MODE_VEB)
7784 /* change from VEPA to VEB mode */
7785 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7786 else
7787 /* change from VEB to VEPA mode */
7788 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7789 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7790
7791 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7792 if (ret) {
7793 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7794 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7795 goto out;
7796 }
7797 /* Update sw flags for book keeping */
7798 vsi_props->sw_flags = ctxt->info.sw_flags;
7799
7800 out:
7801 kfree(ctxt);
7802 return ret;
7803 }
7804
7805 /**
7806 * ice_bridge_setlink - Set the hardware bridge mode
7807 * @dev: the netdev being configured
7808 * @nlh: RTNL message
7809 * @flags: bridge setlink flags
7810 * @extack: netlink extended ack
7811 *
7812 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7813 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7814 * not already set for all VSIs connected to this switch. And also update the
7815 * unicast switch filter rules for the corresponding switch of the netdev.
7816 */
7817 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)7818 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7819 u16 __always_unused flags,
7820 struct netlink_ext_ack __always_unused *extack)
7821 {
7822 struct ice_netdev_priv *np = netdev_priv(dev);
7823 struct ice_pf *pf = np->vsi->back;
7824 struct nlattr *attr, *br_spec;
7825 struct ice_hw *hw = &pf->hw;
7826 struct ice_sw *pf_sw;
7827 int rem, v, err = 0;
7828
7829 pf_sw = pf->first_sw;
7830 /* find the attribute in the netlink message */
7831 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7832 if (!br_spec)
7833 return -EINVAL;
7834
7835 nla_for_each_nested(attr, br_spec, rem) {
7836 __u16 mode;
7837
7838 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7839 continue;
7840 mode = nla_get_u16(attr);
7841 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7842 return -EINVAL;
7843 /* Continue if bridge mode is not being flipped */
7844 if (mode == pf_sw->bridge_mode)
7845 continue;
7846 /* Iterates through the PF VSI list and update the loopback
7847 * mode of the VSI
7848 */
7849 ice_for_each_vsi(pf, v) {
7850 if (!pf->vsi[v])
7851 continue;
7852 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7853 if (err)
7854 return err;
7855 }
7856
7857 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7858 /* Update the unicast switch filter rules for the corresponding
7859 * switch of the netdev
7860 */
7861 err = ice_update_sw_rule_bridge_mode(hw);
7862 if (err) {
7863 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7864 mode, err,
7865 ice_aq_str(hw->adminq.sq_last_status));
7866 /* revert hw->evb_veb */
7867 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7868 return err;
7869 }
7870
7871 pf_sw->bridge_mode = mode;
7872 }
7873
7874 return 0;
7875 }
7876
7877 /**
7878 * ice_tx_timeout - Respond to a Tx Hang
7879 * @netdev: network interface device structure
7880 * @txqueue: Tx queue
7881 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)7882 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7883 {
7884 struct ice_netdev_priv *np = netdev_priv(netdev);
7885 struct ice_tx_ring *tx_ring = NULL;
7886 struct ice_vsi *vsi = np->vsi;
7887 struct ice_pf *pf = vsi->back;
7888 u32 i;
7889
7890 pf->tx_timeout_count++;
7891
7892 /* Check if PFC is enabled for the TC to which the queue belongs
7893 * to. If yes then Tx timeout is not caused by a hung queue, no
7894 * need to reset and rebuild
7895 */
7896 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7897 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7898 txqueue);
7899 return;
7900 }
7901
7902 /* now that we have an index, find the tx_ring struct */
7903 ice_for_each_txq(vsi, i)
7904 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7905 if (txqueue == vsi->tx_rings[i]->q_index) {
7906 tx_ring = vsi->tx_rings[i];
7907 break;
7908 }
7909
7910 /* Reset recovery level if enough time has elapsed after last timeout.
7911 * Also ensure no new reset action happens before next timeout period.
7912 */
7913 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7914 pf->tx_timeout_recovery_level = 1;
7915 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7916 netdev->watchdog_timeo)))
7917 return;
7918
7919 if (tx_ring) {
7920 struct ice_hw *hw = &pf->hw;
7921 u32 head, val = 0;
7922
7923 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7924 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7925 /* Read interrupt register */
7926 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7927
7928 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7929 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7930 head, tx_ring->next_to_use, val);
7931 }
7932
7933 pf->tx_timeout_last_recovery = jiffies;
7934 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7935 pf->tx_timeout_recovery_level, txqueue);
7936
7937 switch (pf->tx_timeout_recovery_level) {
7938 case 1:
7939 set_bit(ICE_PFR_REQ, pf->state);
7940 break;
7941 case 2:
7942 set_bit(ICE_CORER_REQ, pf->state);
7943 break;
7944 case 3:
7945 set_bit(ICE_GLOBR_REQ, pf->state);
7946 break;
7947 default:
7948 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7949 set_bit(ICE_DOWN, pf->state);
7950 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7951 set_bit(ICE_SERVICE_DIS, pf->state);
7952 break;
7953 }
7954
7955 ice_service_task_schedule(pf);
7956 pf->tx_timeout_recovery_level++;
7957 }
7958
7959 /**
7960 * ice_setup_tc_cls_flower - flower classifier offloads
7961 * @np: net device to configure
7962 * @filter_dev: device on which filter is added
7963 * @cls_flower: offload data
7964 */
7965 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower)7966 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7967 struct net_device *filter_dev,
7968 struct flow_cls_offload *cls_flower)
7969 {
7970 struct ice_vsi *vsi = np->vsi;
7971
7972 if (cls_flower->common.chain_index)
7973 return -EOPNOTSUPP;
7974
7975 switch (cls_flower->command) {
7976 case FLOW_CLS_REPLACE:
7977 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7978 case FLOW_CLS_DESTROY:
7979 return ice_del_cls_flower(vsi, cls_flower);
7980 default:
7981 return -EINVAL;
7982 }
7983 }
7984
7985 /**
7986 * ice_setup_tc_block_cb - callback handler registered for TC block
7987 * @type: TC SETUP type
7988 * @type_data: TC flower offload data that contains user input
7989 * @cb_priv: netdev private data
7990 */
7991 static int
ice_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)7992 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7993 {
7994 struct ice_netdev_priv *np = cb_priv;
7995
7996 switch (type) {
7997 case TC_SETUP_CLSFLOWER:
7998 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7999 type_data);
8000 default:
8001 return -EOPNOTSUPP;
8002 }
8003 }
8004
8005 /**
8006 * ice_validate_mqprio_qopt - Validate TCF input parameters
8007 * @vsi: Pointer to VSI
8008 * @mqprio_qopt: input parameters for mqprio queue configuration
8009 *
8010 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8011 * needed), and make sure user doesn't specify qcount and BW rate limit
8012 * for TCs, which are more than "num_tc"
8013 */
8014 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)8015 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8016 struct tc_mqprio_qopt_offload *mqprio_qopt)
8017 {
8018 int non_power_of_2_qcount = 0;
8019 struct ice_pf *pf = vsi->back;
8020 int max_rss_q_cnt = 0;
8021 u64 sum_min_rate = 0;
8022 struct device *dev;
8023 int i, speed;
8024 u8 num_tc;
8025
8026 if (vsi->type != ICE_VSI_PF)
8027 return -EINVAL;
8028
8029 if (mqprio_qopt->qopt.offset[0] != 0 ||
8030 mqprio_qopt->qopt.num_tc < 1 ||
8031 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8032 return -EINVAL;
8033
8034 dev = ice_pf_to_dev(pf);
8035 vsi->ch_rss_size = 0;
8036 num_tc = mqprio_qopt->qopt.num_tc;
8037 speed = ice_get_link_speed_kbps(vsi);
8038
8039 for (i = 0; num_tc; i++) {
8040 int qcount = mqprio_qopt->qopt.count[i];
8041 u64 max_rate, min_rate, rem;
8042
8043 if (!qcount)
8044 return -EINVAL;
8045
8046 if (is_power_of_2(qcount)) {
8047 if (non_power_of_2_qcount &&
8048 qcount > non_power_of_2_qcount) {
8049 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8050 qcount, non_power_of_2_qcount);
8051 return -EINVAL;
8052 }
8053 if (qcount > max_rss_q_cnt)
8054 max_rss_q_cnt = qcount;
8055 } else {
8056 if (non_power_of_2_qcount &&
8057 qcount != non_power_of_2_qcount) {
8058 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8059 qcount, non_power_of_2_qcount);
8060 return -EINVAL;
8061 }
8062 if (qcount < max_rss_q_cnt) {
8063 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8064 qcount, max_rss_q_cnt);
8065 return -EINVAL;
8066 }
8067 max_rss_q_cnt = qcount;
8068 non_power_of_2_qcount = qcount;
8069 }
8070
8071 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8072 * converts the bandwidth rate limit into Bytes/s when
8073 * passing it down to the driver. So convert input bandwidth
8074 * from Bytes/s to Kbps
8075 */
8076 max_rate = mqprio_qopt->max_rate[i];
8077 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8078
8079 /* min_rate is minimum guaranteed rate and it can't be zero */
8080 min_rate = mqprio_qopt->min_rate[i];
8081 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8082 sum_min_rate += min_rate;
8083
8084 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8085 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8086 min_rate, ICE_MIN_BW_LIMIT);
8087 return -EINVAL;
8088 }
8089
8090 if (max_rate && max_rate > speed) {
8091 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8092 i, max_rate, speed);
8093 return -EINVAL;
8094 }
8095
8096 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8097 if (rem) {
8098 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8099 i, ICE_MIN_BW_LIMIT);
8100 return -EINVAL;
8101 }
8102
8103 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8104 if (rem) {
8105 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8106 i, ICE_MIN_BW_LIMIT);
8107 return -EINVAL;
8108 }
8109
8110 /* min_rate can't be more than max_rate, except when max_rate
8111 * is zero (implies max_rate sought is max line rate). In such
8112 * a case min_rate can be more than max.
8113 */
8114 if (max_rate && min_rate > max_rate) {
8115 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8116 min_rate, max_rate);
8117 return -EINVAL;
8118 }
8119
8120 if (i >= mqprio_qopt->qopt.num_tc - 1)
8121 break;
8122 if (mqprio_qopt->qopt.offset[i + 1] !=
8123 (mqprio_qopt->qopt.offset[i] + qcount))
8124 return -EINVAL;
8125 }
8126 if (vsi->num_rxq <
8127 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8128 return -EINVAL;
8129 if (vsi->num_txq <
8130 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8131 return -EINVAL;
8132
8133 if (sum_min_rate && sum_min_rate > (u64)speed) {
8134 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8135 sum_min_rate, speed);
8136 return -EINVAL;
8137 }
8138
8139 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8140 vsi->ch_rss_size = max_rss_q_cnt;
8141
8142 return 0;
8143 }
8144
8145 /**
8146 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8147 * @pf: ptr to PF device
8148 * @vsi: ptr to VSI
8149 */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8150 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8151 {
8152 struct device *dev = ice_pf_to_dev(pf);
8153 bool added = false;
8154 struct ice_hw *hw;
8155 int flow;
8156
8157 if (!(vsi->num_gfltr || vsi->num_bfltr))
8158 return -EINVAL;
8159
8160 hw = &pf->hw;
8161 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8162 struct ice_fd_hw_prof *prof;
8163 int tun, status;
8164 u64 entry_h;
8165
8166 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8167 hw->fdir_prof[flow]->cnt))
8168 continue;
8169
8170 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8171 enum ice_flow_priority prio;
8172 u64 prof_id;
8173
8174 /* add this VSI to FDir profile for this flow */
8175 prio = ICE_FLOW_PRIO_NORMAL;
8176 prof = hw->fdir_prof[flow];
8177 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8178 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8179 prof->vsi_h[0], vsi->idx,
8180 prio, prof->fdir_seg[tun],
8181 &entry_h);
8182 if (status) {
8183 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8184 vsi->idx, flow);
8185 continue;
8186 }
8187
8188 prof->entry_h[prof->cnt][tun] = entry_h;
8189 }
8190
8191 /* store VSI for filter replay and delete */
8192 prof->vsi_h[prof->cnt] = vsi->idx;
8193 prof->cnt++;
8194
8195 added = true;
8196 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8197 flow);
8198 }
8199
8200 if (!added)
8201 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8202
8203 return 0;
8204 }
8205
8206 /**
8207 * ice_add_channel - add a channel by adding VSI
8208 * @pf: ptr to PF device
8209 * @sw_id: underlying HW switching element ID
8210 * @ch: ptr to channel structure
8211 *
8212 * Add a channel (VSI) using add_vsi and queue_map
8213 */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8214 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8215 {
8216 struct device *dev = ice_pf_to_dev(pf);
8217 struct ice_vsi *vsi;
8218
8219 if (ch->type != ICE_VSI_CHNL) {
8220 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8221 return -EINVAL;
8222 }
8223
8224 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8225 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8226 dev_err(dev, "create chnl VSI failure\n");
8227 return -EINVAL;
8228 }
8229
8230 ice_add_vsi_to_fdir(pf, vsi);
8231
8232 ch->sw_id = sw_id;
8233 ch->vsi_num = vsi->vsi_num;
8234 ch->info.mapping_flags = vsi->info.mapping_flags;
8235 ch->ch_vsi = vsi;
8236 /* set the back pointer of channel for newly created VSI */
8237 vsi->ch = ch;
8238
8239 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8240 sizeof(vsi->info.q_mapping));
8241 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8242 sizeof(vsi->info.tc_mapping));
8243
8244 return 0;
8245 }
8246
8247 /**
8248 * ice_chnl_cfg_res
8249 * @vsi: the VSI being setup
8250 * @ch: ptr to channel structure
8251 *
8252 * Configure channel specific resources such as rings, vector.
8253 */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8254 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8255 {
8256 int i;
8257
8258 for (i = 0; i < ch->num_txq; i++) {
8259 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8260 struct ice_ring_container *rc;
8261 struct ice_tx_ring *tx_ring;
8262 struct ice_rx_ring *rx_ring;
8263
8264 tx_ring = vsi->tx_rings[ch->base_q + i];
8265 rx_ring = vsi->rx_rings[ch->base_q + i];
8266 if (!tx_ring || !rx_ring)
8267 continue;
8268
8269 /* setup ring being channel enabled */
8270 tx_ring->ch = ch;
8271 rx_ring->ch = ch;
8272
8273 /* following code block sets up vector specific attributes */
8274 tx_q_vector = tx_ring->q_vector;
8275 rx_q_vector = rx_ring->q_vector;
8276 if (!tx_q_vector && !rx_q_vector)
8277 continue;
8278
8279 if (tx_q_vector) {
8280 tx_q_vector->ch = ch;
8281 /* setup Tx and Rx ITR setting if DIM is off */
8282 rc = &tx_q_vector->tx;
8283 if (!ITR_IS_DYNAMIC(rc))
8284 ice_write_itr(rc, rc->itr_setting);
8285 }
8286 if (rx_q_vector) {
8287 rx_q_vector->ch = ch;
8288 /* setup Tx and Rx ITR setting if DIM is off */
8289 rc = &rx_q_vector->rx;
8290 if (!ITR_IS_DYNAMIC(rc))
8291 ice_write_itr(rc, rc->itr_setting);
8292 }
8293 }
8294
8295 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8296 * GLINT_ITR register would have written to perform in-context
8297 * update, hence perform flush
8298 */
8299 if (ch->num_txq || ch->num_rxq)
8300 ice_flush(&vsi->back->hw);
8301 }
8302
8303 /**
8304 * ice_cfg_chnl_all_res - configure channel resources
8305 * @vsi: pte to main_vsi
8306 * @ch: ptr to channel structure
8307 *
8308 * This function configures channel specific resources such as flow-director
8309 * counter index, and other resources such as queues, vectors, ITR settings
8310 */
8311 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8312 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8313 {
8314 /* configure channel (aka ADQ) resources such as queues, vectors,
8315 * ITR settings for channel specific vectors and anything else
8316 */
8317 ice_chnl_cfg_res(vsi, ch);
8318 }
8319
8320 /**
8321 * ice_setup_hw_channel - setup new channel
8322 * @pf: ptr to PF device
8323 * @vsi: the VSI being setup
8324 * @ch: ptr to channel structure
8325 * @sw_id: underlying HW switching element ID
8326 * @type: type of channel to be created (VMDq2/VF)
8327 *
8328 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8329 * and configures Tx rings accordingly
8330 */
8331 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8332 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8333 struct ice_channel *ch, u16 sw_id, u8 type)
8334 {
8335 struct device *dev = ice_pf_to_dev(pf);
8336 int ret;
8337
8338 ch->base_q = vsi->next_base_q;
8339 ch->type = type;
8340
8341 ret = ice_add_channel(pf, sw_id, ch);
8342 if (ret) {
8343 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8344 return ret;
8345 }
8346
8347 /* configure/setup ADQ specific resources */
8348 ice_cfg_chnl_all_res(vsi, ch);
8349
8350 /* make sure to update the next_base_q so that subsequent channel's
8351 * (aka ADQ) VSI queue map is correct
8352 */
8353 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8354 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8355 ch->num_rxq);
8356
8357 return 0;
8358 }
8359
8360 /**
8361 * ice_setup_channel - setup new channel using uplink element
8362 * @pf: ptr to PF device
8363 * @vsi: the VSI being setup
8364 * @ch: ptr to channel structure
8365 *
8366 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8367 * and uplink switching element
8368 */
8369 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8370 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8371 struct ice_channel *ch)
8372 {
8373 struct device *dev = ice_pf_to_dev(pf);
8374 u16 sw_id;
8375 int ret;
8376
8377 if (vsi->type != ICE_VSI_PF) {
8378 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8379 return false;
8380 }
8381
8382 sw_id = pf->first_sw->sw_id;
8383
8384 /* create channel (VSI) */
8385 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8386 if (ret) {
8387 dev_err(dev, "failed to setup hw_channel\n");
8388 return false;
8389 }
8390 dev_dbg(dev, "successfully created channel()\n");
8391
8392 return ch->ch_vsi ? true : false;
8393 }
8394
8395 /**
8396 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8397 * @vsi: VSI to be configured
8398 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8399 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8400 */
8401 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8402 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8403 {
8404 int err;
8405
8406 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8407 if (err)
8408 return err;
8409
8410 return ice_set_max_bw_limit(vsi, max_tx_rate);
8411 }
8412
8413 /**
8414 * ice_create_q_channel - function to create channel
8415 * @vsi: VSI to be configured
8416 * @ch: ptr to channel (it contains channel specific params)
8417 *
8418 * This function creates channel (VSI) using num_queues specified by user,
8419 * reconfigs RSS if needed.
8420 */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8421 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8422 {
8423 struct ice_pf *pf = vsi->back;
8424 struct device *dev;
8425
8426 if (!ch)
8427 return -EINVAL;
8428
8429 dev = ice_pf_to_dev(pf);
8430 if (!ch->num_txq || !ch->num_rxq) {
8431 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8432 return -EINVAL;
8433 }
8434
8435 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8436 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8437 vsi->cnt_q_avail, ch->num_txq);
8438 return -EINVAL;
8439 }
8440
8441 if (!ice_setup_channel(pf, vsi, ch)) {
8442 dev_info(dev, "Failed to setup channel\n");
8443 return -EINVAL;
8444 }
8445 /* configure BW rate limit */
8446 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8447 int ret;
8448
8449 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8450 ch->min_tx_rate);
8451 if (ret)
8452 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8453 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8454 else
8455 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8456 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8457 }
8458
8459 vsi->cnt_q_avail -= ch->num_txq;
8460
8461 return 0;
8462 }
8463
8464 /**
8465 * ice_rem_all_chnl_fltrs - removes all channel filters
8466 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8467 *
8468 * Remove all advanced switch filters only if they are channel specific
8469 * tc-flower based filter
8470 */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8471 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8472 {
8473 struct ice_tc_flower_fltr *fltr;
8474 struct hlist_node *node;
8475
8476 /* to remove all channel filters, iterate an ordered list of filters */
8477 hlist_for_each_entry_safe(fltr, node,
8478 &pf->tc_flower_fltr_list,
8479 tc_flower_node) {
8480 struct ice_rule_query_data rule;
8481 int status;
8482
8483 /* for now process only channel specific filters */
8484 if (!ice_is_chnl_fltr(fltr))
8485 continue;
8486
8487 rule.rid = fltr->rid;
8488 rule.rule_id = fltr->rule_id;
8489 rule.vsi_handle = fltr->dest_vsi_handle;
8490 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8491 if (status) {
8492 if (status == -ENOENT)
8493 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8494 rule.rule_id);
8495 else
8496 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8497 status);
8498 } else if (fltr->dest_vsi) {
8499 /* update advanced switch filter count */
8500 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8501 u32 flags = fltr->flags;
8502
8503 fltr->dest_vsi->num_chnl_fltr--;
8504 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8505 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8506 pf->num_dmac_chnl_fltrs--;
8507 }
8508 }
8509
8510 hlist_del(&fltr->tc_flower_node);
8511 kfree(fltr);
8512 }
8513 }
8514
8515 /**
8516 * ice_remove_q_channels - Remove queue channels for the TCs
8517 * @vsi: VSI to be configured
8518 * @rem_fltr: delete advanced switch filter or not
8519 *
8520 * Remove queue channels for the TCs
8521 */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8522 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8523 {
8524 struct ice_channel *ch, *ch_tmp;
8525 struct ice_pf *pf = vsi->back;
8526 int i;
8527
8528 /* remove all tc-flower based filter if they are channel filters only */
8529 if (rem_fltr)
8530 ice_rem_all_chnl_fltrs(pf);
8531
8532 /* remove ntuple filters since queue configuration is being changed */
8533 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8534 struct ice_hw *hw = &pf->hw;
8535
8536 mutex_lock(&hw->fdir_fltr_lock);
8537 ice_fdir_del_all_fltrs(vsi);
8538 mutex_unlock(&hw->fdir_fltr_lock);
8539 }
8540
8541 /* perform cleanup for channels if they exist */
8542 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8543 struct ice_vsi *ch_vsi;
8544
8545 list_del(&ch->list);
8546 ch_vsi = ch->ch_vsi;
8547 if (!ch_vsi) {
8548 kfree(ch);
8549 continue;
8550 }
8551
8552 /* Reset queue contexts */
8553 for (i = 0; i < ch->num_rxq; i++) {
8554 struct ice_tx_ring *tx_ring;
8555 struct ice_rx_ring *rx_ring;
8556
8557 tx_ring = vsi->tx_rings[ch->base_q + i];
8558 rx_ring = vsi->rx_rings[ch->base_q + i];
8559 if (tx_ring) {
8560 tx_ring->ch = NULL;
8561 if (tx_ring->q_vector)
8562 tx_ring->q_vector->ch = NULL;
8563 }
8564 if (rx_ring) {
8565 rx_ring->ch = NULL;
8566 if (rx_ring->q_vector)
8567 rx_ring->q_vector->ch = NULL;
8568 }
8569 }
8570
8571 /* Release FD resources for the channel VSI */
8572 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8573
8574 /* clear the VSI from scheduler tree */
8575 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8576
8577 /* Delete VSI from FW, PF and HW VSI arrays */
8578 ice_vsi_delete(ch->ch_vsi);
8579
8580 /* free the channel */
8581 kfree(ch);
8582 }
8583
8584 /* clear the channel VSI map which is stored in main VSI */
8585 ice_for_each_chnl_tc(i)
8586 vsi->tc_map_vsi[i] = NULL;
8587
8588 /* reset main VSI's all TC information */
8589 vsi->all_enatc = 0;
8590 vsi->all_numtc = 0;
8591 }
8592
8593 /**
8594 * ice_rebuild_channels - rebuild channel
8595 * @pf: ptr to PF
8596 *
8597 * Recreate channel VSIs and replay filters
8598 */
ice_rebuild_channels(struct ice_pf * pf)8599 static int ice_rebuild_channels(struct ice_pf *pf)
8600 {
8601 struct device *dev = ice_pf_to_dev(pf);
8602 struct ice_vsi *main_vsi;
8603 bool rem_adv_fltr = true;
8604 struct ice_channel *ch;
8605 struct ice_vsi *vsi;
8606 int tc_idx = 1;
8607 int i, err;
8608
8609 main_vsi = ice_get_main_vsi(pf);
8610 if (!main_vsi)
8611 return 0;
8612
8613 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8614 main_vsi->old_numtc == 1)
8615 return 0; /* nothing to be done */
8616
8617 /* reconfigure main VSI based on old value of TC and cached values
8618 * for MQPRIO opts
8619 */
8620 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8621 if (err) {
8622 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8623 main_vsi->old_ena_tc, main_vsi->vsi_num);
8624 return err;
8625 }
8626
8627 /* rebuild ADQ VSIs */
8628 ice_for_each_vsi(pf, i) {
8629 enum ice_vsi_type type;
8630
8631 vsi = pf->vsi[i];
8632 if (!vsi || vsi->type != ICE_VSI_CHNL)
8633 continue;
8634
8635 type = vsi->type;
8636
8637 /* rebuild ADQ VSI */
8638 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8639 if (err) {
8640 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8641 ice_vsi_type_str(type), vsi->idx, err);
8642 goto cleanup;
8643 }
8644
8645 /* Re-map HW VSI number, using VSI handle that has been
8646 * previously validated in ice_replay_vsi() call above
8647 */
8648 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8649
8650 /* replay filters for the VSI */
8651 err = ice_replay_vsi(&pf->hw, vsi->idx);
8652 if (err) {
8653 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8654 ice_vsi_type_str(type), err, vsi->idx);
8655 rem_adv_fltr = false;
8656 goto cleanup;
8657 }
8658 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8659 ice_vsi_type_str(type), vsi->idx);
8660
8661 /* store ADQ VSI at correct TC index in main VSI's
8662 * map of TC to VSI
8663 */
8664 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8665 }
8666
8667 /* ADQ VSI(s) has been rebuilt successfully, so setup
8668 * channel for main VSI's Tx and Rx rings
8669 */
8670 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8671 struct ice_vsi *ch_vsi;
8672
8673 ch_vsi = ch->ch_vsi;
8674 if (!ch_vsi)
8675 continue;
8676
8677 /* reconfig channel resources */
8678 ice_cfg_chnl_all_res(main_vsi, ch);
8679
8680 /* replay BW rate limit if it is non-zero */
8681 if (!ch->max_tx_rate && !ch->min_tx_rate)
8682 continue;
8683
8684 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8685 ch->min_tx_rate);
8686 if (err)
8687 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8688 err, ch->max_tx_rate, ch->min_tx_rate,
8689 ch_vsi->vsi_num);
8690 else
8691 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8692 ch->max_tx_rate, ch->min_tx_rate,
8693 ch_vsi->vsi_num);
8694 }
8695
8696 /* reconfig RSS for main VSI */
8697 if (main_vsi->ch_rss_size)
8698 ice_vsi_cfg_rss_lut_key(main_vsi);
8699
8700 return 0;
8701
8702 cleanup:
8703 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8704 return err;
8705 }
8706
8707 /**
8708 * ice_create_q_channels - Add queue channel for the given TCs
8709 * @vsi: VSI to be configured
8710 *
8711 * Configures queue channel mapping to the given TCs
8712 */
ice_create_q_channels(struct ice_vsi * vsi)8713 static int ice_create_q_channels(struct ice_vsi *vsi)
8714 {
8715 struct ice_pf *pf = vsi->back;
8716 struct ice_channel *ch;
8717 int ret = 0, i;
8718
8719 ice_for_each_chnl_tc(i) {
8720 if (!(vsi->all_enatc & BIT(i)))
8721 continue;
8722
8723 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8724 if (!ch) {
8725 ret = -ENOMEM;
8726 goto err_free;
8727 }
8728 INIT_LIST_HEAD(&ch->list);
8729 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8730 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8731 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8732 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8733 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8734
8735 /* convert to Kbits/s */
8736 if (ch->max_tx_rate)
8737 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8738 ICE_BW_KBPS_DIVISOR);
8739 if (ch->min_tx_rate)
8740 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8741 ICE_BW_KBPS_DIVISOR);
8742
8743 ret = ice_create_q_channel(vsi, ch);
8744 if (ret) {
8745 dev_err(ice_pf_to_dev(pf),
8746 "failed creating channel TC:%d\n", i);
8747 kfree(ch);
8748 goto err_free;
8749 }
8750 list_add_tail(&ch->list, &vsi->ch_list);
8751 vsi->tc_map_vsi[i] = ch->ch_vsi;
8752 dev_dbg(ice_pf_to_dev(pf),
8753 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8754 }
8755 return 0;
8756
8757 err_free:
8758 ice_remove_q_channels(vsi, false);
8759
8760 return ret;
8761 }
8762
8763 /**
8764 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8765 * @netdev: net device to configure
8766 * @type_data: TC offload data
8767 */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)8768 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8769 {
8770 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8771 struct ice_netdev_priv *np = netdev_priv(netdev);
8772 struct ice_vsi *vsi = np->vsi;
8773 struct ice_pf *pf = vsi->back;
8774 u16 mode, ena_tc_qdisc = 0;
8775 int cur_txq, cur_rxq;
8776 u8 hw = 0, num_tcf;
8777 struct device *dev;
8778 int ret, i;
8779
8780 dev = ice_pf_to_dev(pf);
8781 num_tcf = mqprio_qopt->qopt.num_tc;
8782 hw = mqprio_qopt->qopt.hw;
8783 mode = mqprio_qopt->mode;
8784 if (!hw) {
8785 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8786 vsi->ch_rss_size = 0;
8787 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8788 goto config_tcf;
8789 }
8790
8791 /* Generate queue region map for number of TCF requested */
8792 for (i = 0; i < num_tcf; i++)
8793 ena_tc_qdisc |= BIT(i);
8794
8795 switch (mode) {
8796 case TC_MQPRIO_MODE_CHANNEL:
8797
8798 if (pf->hw.port_info->is_custom_tx_enabled) {
8799 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8800 return -EBUSY;
8801 }
8802 ice_tear_down_devlink_rate_tree(pf);
8803
8804 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8805 if (ret) {
8806 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8807 ret);
8808 return ret;
8809 }
8810 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8811 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8812 /* don't assume state of hw_tc_offload during driver load
8813 * and set the flag for TC flower filter if hw_tc_offload
8814 * already ON
8815 */
8816 if (vsi->netdev->features & NETIF_F_HW_TC)
8817 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8818 break;
8819 default:
8820 return -EINVAL;
8821 }
8822
8823 config_tcf:
8824
8825 /* Requesting same TCF configuration as already enabled */
8826 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8827 mode != TC_MQPRIO_MODE_CHANNEL)
8828 return 0;
8829
8830 /* Pause VSI queues */
8831 ice_dis_vsi(vsi, true);
8832
8833 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8834 ice_remove_q_channels(vsi, true);
8835
8836 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8837 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8838 num_online_cpus());
8839 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8840 num_online_cpus());
8841 } else {
8842 /* logic to rebuild VSI, same like ethtool -L */
8843 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8844
8845 for (i = 0; i < num_tcf; i++) {
8846 if (!(ena_tc_qdisc & BIT(i)))
8847 continue;
8848
8849 offset = vsi->mqprio_qopt.qopt.offset[i];
8850 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8851 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8852 }
8853 vsi->req_txq = offset + qcount_tx;
8854 vsi->req_rxq = offset + qcount_rx;
8855
8856 /* store away original rss_size info, so that it gets reused
8857 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8858 * determine, what should be the rss_sizefor main VSI
8859 */
8860 vsi->orig_rss_size = vsi->rss_size;
8861 }
8862
8863 /* save current values of Tx and Rx queues before calling VSI rebuild
8864 * for fallback option
8865 */
8866 cur_txq = vsi->num_txq;
8867 cur_rxq = vsi->num_rxq;
8868
8869 /* proceed with rebuild main VSI using correct number of queues */
8870 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8871 if (ret) {
8872 /* fallback to current number of queues */
8873 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8874 vsi->req_txq = cur_txq;
8875 vsi->req_rxq = cur_rxq;
8876 clear_bit(ICE_RESET_FAILED, pf->state);
8877 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8878 dev_err(dev, "Rebuild of main VSI failed again\n");
8879 return ret;
8880 }
8881 }
8882
8883 vsi->all_numtc = num_tcf;
8884 vsi->all_enatc = ena_tc_qdisc;
8885 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8886 if (ret) {
8887 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8888 vsi->vsi_num);
8889 goto exit;
8890 }
8891
8892 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8893 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8894 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8895
8896 /* set TC0 rate limit if specified */
8897 if (max_tx_rate || min_tx_rate) {
8898 /* convert to Kbits/s */
8899 if (max_tx_rate)
8900 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8901 if (min_tx_rate)
8902 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8903
8904 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8905 if (!ret) {
8906 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8907 max_tx_rate, min_tx_rate, vsi->vsi_num);
8908 } else {
8909 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8910 max_tx_rate, min_tx_rate, vsi->vsi_num);
8911 goto exit;
8912 }
8913 }
8914 ret = ice_create_q_channels(vsi);
8915 if (ret) {
8916 netdev_err(netdev, "failed configuring queue channels\n");
8917 goto exit;
8918 } else {
8919 netdev_dbg(netdev, "successfully configured channels\n");
8920 }
8921 }
8922
8923 if (vsi->ch_rss_size)
8924 ice_vsi_cfg_rss_lut_key(vsi);
8925
8926 exit:
8927 /* if error, reset the all_numtc and all_enatc */
8928 if (ret) {
8929 vsi->all_numtc = 0;
8930 vsi->all_enatc = 0;
8931 }
8932 /* resume VSI */
8933 ice_ena_vsi(vsi, true);
8934
8935 return ret;
8936 }
8937
8938 static LIST_HEAD(ice_block_cb_list);
8939
8940 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)8941 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8942 void *type_data)
8943 {
8944 struct ice_netdev_priv *np = netdev_priv(netdev);
8945 struct ice_pf *pf = np->vsi->back;
8946 bool locked = false;
8947 int err;
8948
8949 switch (type) {
8950 case TC_SETUP_BLOCK:
8951 return flow_block_cb_setup_simple(type_data,
8952 &ice_block_cb_list,
8953 ice_setup_tc_block_cb,
8954 np, np, true);
8955 case TC_SETUP_QDISC_MQPRIO:
8956 if (ice_is_eswitch_mode_switchdev(pf)) {
8957 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8958 return -EOPNOTSUPP;
8959 }
8960
8961 if (pf->adev) {
8962 mutex_lock(&pf->adev_mutex);
8963 device_lock(&pf->adev->dev);
8964 locked = true;
8965 if (pf->adev->dev.driver) {
8966 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8967 err = -EBUSY;
8968 goto adev_unlock;
8969 }
8970 }
8971
8972 /* setup traffic classifier for receive side */
8973 mutex_lock(&pf->tc_mutex);
8974 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8975 mutex_unlock(&pf->tc_mutex);
8976
8977 adev_unlock:
8978 if (locked) {
8979 device_unlock(&pf->adev->dev);
8980 mutex_unlock(&pf->adev_mutex);
8981 }
8982 return err;
8983 default:
8984 return -EOPNOTSUPP;
8985 }
8986 return -EOPNOTSUPP;
8987 }
8988
8989 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)8990 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8991 struct net_device *netdev)
8992 {
8993 struct ice_indr_block_priv *cb_priv;
8994
8995 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8996 if (!cb_priv->netdev)
8997 return NULL;
8998 if (cb_priv->netdev == netdev)
8999 return cb_priv;
9000 }
9001 return NULL;
9002 }
9003
9004 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)9005 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9006 void *indr_priv)
9007 {
9008 struct ice_indr_block_priv *priv = indr_priv;
9009 struct ice_netdev_priv *np = priv->np;
9010
9011 switch (type) {
9012 case TC_SETUP_CLSFLOWER:
9013 return ice_setup_tc_cls_flower(np, priv->netdev,
9014 (struct flow_cls_offload *)
9015 type_data);
9016 default:
9017 return -EOPNOTSUPP;
9018 }
9019 }
9020
9021 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9022 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9023 struct ice_netdev_priv *np,
9024 struct flow_block_offload *f, void *data,
9025 void (*cleanup)(struct flow_block_cb *block_cb))
9026 {
9027 struct ice_indr_block_priv *indr_priv;
9028 struct flow_block_cb *block_cb;
9029
9030 if (!ice_is_tunnel_supported(netdev) &&
9031 !(is_vlan_dev(netdev) &&
9032 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9033 return -EOPNOTSUPP;
9034
9035 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9036 return -EOPNOTSUPP;
9037
9038 switch (f->command) {
9039 case FLOW_BLOCK_BIND:
9040 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9041 if (indr_priv)
9042 return -EEXIST;
9043
9044 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9045 if (!indr_priv)
9046 return -ENOMEM;
9047
9048 indr_priv->netdev = netdev;
9049 indr_priv->np = np;
9050 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9051
9052 block_cb =
9053 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9054 indr_priv, indr_priv,
9055 ice_rep_indr_tc_block_unbind,
9056 f, netdev, sch, data, np,
9057 cleanup);
9058
9059 if (IS_ERR(block_cb)) {
9060 list_del(&indr_priv->list);
9061 kfree(indr_priv);
9062 return PTR_ERR(block_cb);
9063 }
9064 flow_block_cb_add(block_cb, f);
9065 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9066 break;
9067 case FLOW_BLOCK_UNBIND:
9068 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9069 if (!indr_priv)
9070 return -ENOENT;
9071
9072 block_cb = flow_block_cb_lookup(f->block,
9073 ice_indr_setup_block_cb,
9074 indr_priv);
9075 if (!block_cb)
9076 return -ENOENT;
9077
9078 flow_indr_block_cb_remove(block_cb, f);
9079
9080 list_del(&block_cb->driver_list);
9081 break;
9082 default:
9083 return -EOPNOTSUPP;
9084 }
9085 return 0;
9086 }
9087
9088 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9089 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9090 void *cb_priv, enum tc_setup_type type, void *type_data,
9091 void *data,
9092 void (*cleanup)(struct flow_block_cb *block_cb))
9093 {
9094 switch (type) {
9095 case TC_SETUP_BLOCK:
9096 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9097 data, cleanup);
9098
9099 default:
9100 return -EOPNOTSUPP;
9101 }
9102 }
9103
9104 /**
9105 * ice_open - Called when a network interface becomes active
9106 * @netdev: network interface device structure
9107 *
9108 * The open entry point is called when a network interface is made
9109 * active by the system (IFF_UP). At this point all resources needed
9110 * for transmit and receive operations are allocated, the interrupt
9111 * handler is registered with the OS, the netdev watchdog is enabled,
9112 * and the stack is notified that the interface is ready.
9113 *
9114 * Returns 0 on success, negative value on failure
9115 */
ice_open(struct net_device * netdev)9116 int ice_open(struct net_device *netdev)
9117 {
9118 struct ice_netdev_priv *np = netdev_priv(netdev);
9119 struct ice_pf *pf = np->vsi->back;
9120
9121 if (ice_is_reset_in_progress(pf->state)) {
9122 netdev_err(netdev, "can't open net device while reset is in progress");
9123 return -EBUSY;
9124 }
9125
9126 return ice_open_internal(netdev);
9127 }
9128
9129 /**
9130 * ice_open_internal - Called when a network interface becomes active
9131 * @netdev: network interface device structure
9132 *
9133 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9134 * handling routine
9135 *
9136 * Returns 0 on success, negative value on failure
9137 */
ice_open_internal(struct net_device * netdev)9138 int ice_open_internal(struct net_device *netdev)
9139 {
9140 struct ice_netdev_priv *np = netdev_priv(netdev);
9141 struct ice_vsi *vsi = np->vsi;
9142 struct ice_pf *pf = vsi->back;
9143 struct ice_port_info *pi;
9144 int err;
9145
9146 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9147 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9148 return -EIO;
9149 }
9150
9151 netif_carrier_off(netdev);
9152
9153 pi = vsi->port_info;
9154 err = ice_update_link_info(pi);
9155 if (err) {
9156 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9157 return err;
9158 }
9159
9160 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9161
9162 /* Set PHY if there is media, otherwise, turn off PHY */
9163 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9164 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9165 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9166 err = ice_init_phy_user_cfg(pi);
9167 if (err) {
9168 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9169 err);
9170 return err;
9171 }
9172 }
9173
9174 err = ice_configure_phy(vsi);
9175 if (err) {
9176 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9177 err);
9178 return err;
9179 }
9180 } else {
9181 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9182 ice_set_link(vsi, false);
9183 }
9184
9185 err = ice_vsi_open(vsi);
9186 if (err)
9187 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9188 vsi->vsi_num, vsi->vsw->sw_id);
9189
9190 /* Update existing tunnels information */
9191 udp_tunnel_get_rx_info(netdev);
9192
9193 return err;
9194 }
9195
9196 /**
9197 * ice_stop - Disables a network interface
9198 * @netdev: network interface device structure
9199 *
9200 * The stop entry point is called when an interface is de-activated by the OS,
9201 * and the netdevice enters the DOWN state. The hardware is still under the
9202 * driver's control, but the netdev interface is disabled.
9203 *
9204 * Returns success only - not allowed to fail
9205 */
ice_stop(struct net_device * netdev)9206 int ice_stop(struct net_device *netdev)
9207 {
9208 struct ice_netdev_priv *np = netdev_priv(netdev);
9209 struct ice_vsi *vsi = np->vsi;
9210 struct ice_pf *pf = vsi->back;
9211
9212 if (ice_is_reset_in_progress(pf->state)) {
9213 netdev_err(netdev, "can't stop net device while reset is in progress");
9214 return -EBUSY;
9215 }
9216
9217 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9218 int link_err = ice_force_phys_link_state(vsi, false);
9219
9220 if (link_err) {
9221 if (link_err == -ENOMEDIUM)
9222 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9223 vsi->vsi_num);
9224 else
9225 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9226 vsi->vsi_num, link_err);
9227
9228 ice_vsi_close(vsi);
9229 return -EIO;
9230 }
9231 }
9232
9233 ice_vsi_close(vsi);
9234
9235 return 0;
9236 }
9237
9238 /**
9239 * ice_features_check - Validate encapsulated packet conforms to limits
9240 * @skb: skb buffer
9241 * @netdev: This port's netdev
9242 * @features: Offload features that the stack believes apply
9243 */
9244 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9245 ice_features_check(struct sk_buff *skb,
9246 struct net_device __always_unused *netdev,
9247 netdev_features_t features)
9248 {
9249 bool gso = skb_is_gso(skb);
9250 size_t len;
9251
9252 /* No point in doing any of this if neither checksum nor GSO are
9253 * being requested for this frame. We can rule out both by just
9254 * checking for CHECKSUM_PARTIAL
9255 */
9256 if (skb->ip_summed != CHECKSUM_PARTIAL)
9257 return features;
9258
9259 /* We cannot support GSO if the MSS is going to be less than
9260 * 64 bytes. If it is then we need to drop support for GSO.
9261 */
9262 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9263 features &= ~NETIF_F_GSO_MASK;
9264
9265 len = skb_network_offset(skb);
9266 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9267 goto out_rm_features;
9268
9269 len = skb_network_header_len(skb);
9270 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9271 goto out_rm_features;
9272
9273 if (skb->encapsulation) {
9274 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9275 * the case of IPIP frames, the transport header pointer is
9276 * after the inner header! So check to make sure that this
9277 * is a GRE or UDP_TUNNEL frame before doing that math.
9278 */
9279 if (gso && (skb_shinfo(skb)->gso_type &
9280 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9281 len = skb_inner_network_header(skb) -
9282 skb_transport_header(skb);
9283 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9284 goto out_rm_features;
9285 }
9286
9287 len = skb_inner_network_header_len(skb);
9288 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9289 goto out_rm_features;
9290 }
9291
9292 return features;
9293 out_rm_features:
9294 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9295 }
9296
9297 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9298 .ndo_open = ice_open,
9299 .ndo_stop = ice_stop,
9300 .ndo_start_xmit = ice_start_xmit,
9301 .ndo_set_mac_address = ice_set_mac_address,
9302 .ndo_validate_addr = eth_validate_addr,
9303 .ndo_change_mtu = ice_change_mtu,
9304 .ndo_get_stats64 = ice_get_stats64,
9305 .ndo_tx_timeout = ice_tx_timeout,
9306 .ndo_bpf = ice_xdp_safe_mode,
9307 };
9308
9309 static const struct net_device_ops ice_netdev_ops = {
9310 .ndo_open = ice_open,
9311 .ndo_stop = ice_stop,
9312 .ndo_start_xmit = ice_start_xmit,
9313 .ndo_select_queue = ice_select_queue,
9314 .ndo_features_check = ice_features_check,
9315 .ndo_fix_features = ice_fix_features,
9316 .ndo_set_rx_mode = ice_set_rx_mode,
9317 .ndo_set_mac_address = ice_set_mac_address,
9318 .ndo_validate_addr = eth_validate_addr,
9319 .ndo_change_mtu = ice_change_mtu,
9320 .ndo_get_stats64 = ice_get_stats64,
9321 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9322 .ndo_eth_ioctl = ice_eth_ioctl,
9323 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9324 .ndo_set_vf_mac = ice_set_vf_mac,
9325 .ndo_get_vf_config = ice_get_vf_cfg,
9326 .ndo_set_vf_trust = ice_set_vf_trust,
9327 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9328 .ndo_set_vf_link_state = ice_set_vf_link_state,
9329 .ndo_get_vf_stats = ice_get_vf_stats,
9330 .ndo_set_vf_rate = ice_set_vf_bw,
9331 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9332 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9333 .ndo_setup_tc = ice_setup_tc,
9334 .ndo_set_features = ice_set_features,
9335 .ndo_bridge_getlink = ice_bridge_getlink,
9336 .ndo_bridge_setlink = ice_bridge_setlink,
9337 .ndo_fdb_add = ice_fdb_add,
9338 .ndo_fdb_del = ice_fdb_del,
9339 #ifdef CONFIG_RFS_ACCEL
9340 .ndo_rx_flow_steer = ice_rx_flow_steer,
9341 #endif
9342 .ndo_tx_timeout = ice_tx_timeout,
9343 .ndo_bpf = ice_xdp,
9344 .ndo_xdp_xmit = ice_xdp_xmit,
9345 .ndo_xsk_wakeup = ice_xsk_wakeup,
9346 };
9347