1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include "ice.h" 10 #include "ice_base.h" 11 #include "ice_lib.h" 12 #include "ice_fltr.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_dcb_nl.h" 15 #include "ice_devlink.h" 16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 17 * ice tracepoint functions. This must be done exactly once across the 18 * ice driver. 19 */ 20 #define CREATE_TRACE_POINTS 21 #include "ice_trace.h" 22 #include "ice_eswitch.h" 23 #include "ice_tc_lib.h" 24 #include "ice_vsi_vlan_ops.h" 25 26 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 27 static const char ice_driver_string[] = DRV_SUMMARY; 28 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 29 30 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 31 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 32 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 33 34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 35 MODULE_DESCRIPTION(DRV_SUMMARY); 36 MODULE_LICENSE("GPL v2"); 37 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 38 39 static int debug = -1; 40 module_param(debug, int, 0644); 41 #ifndef CONFIG_DYNAMIC_DEBUG 42 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 43 #else 44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 45 #endif /* !CONFIG_DYNAMIC_DEBUG */ 46 47 static DEFINE_IDA(ice_aux_ida); 48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 49 EXPORT_SYMBOL(ice_xdp_locking_key); 50 51 /** 52 * ice_hw_to_dev - Get device pointer from the hardware structure 53 * @hw: pointer to the device HW structure 54 * 55 * Used to access the device pointer from compilation units which can't easily 56 * include the definition of struct ice_pf without leading to circular header 57 * dependencies. 58 */ 59 struct device *ice_hw_to_dev(struct ice_hw *hw) 60 { 61 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 62 63 return &pf->pdev->dev; 64 } 65 66 static struct workqueue_struct *ice_wq; 67 static const struct net_device_ops ice_netdev_safe_mode_ops; 68 static const struct net_device_ops ice_netdev_ops; 69 70 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 71 72 static void ice_vsi_release_all(struct ice_pf *pf); 73 74 static int ice_rebuild_channels(struct ice_pf *pf); 75 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 76 77 static int 78 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 79 void *cb_priv, enum tc_setup_type type, void *type_data, 80 void *data, 81 void (*cleanup)(struct flow_block_cb *block_cb)); 82 83 bool netif_is_ice(struct net_device *dev) 84 { 85 return dev && (dev->netdev_ops == &ice_netdev_ops); 86 } 87 88 /** 89 * ice_get_tx_pending - returns number of Tx descriptors not processed 90 * @ring: the ring of descriptors 91 */ 92 static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 93 { 94 u16 head, tail; 95 96 head = ring->next_to_clean; 97 tail = ring->next_to_use; 98 99 if (head != tail) 100 return (head < tail) ? 101 tail - head : (tail + ring->count - head); 102 return 0; 103 } 104 105 /** 106 * ice_check_for_hang_subtask - check for and recover hung queues 107 * @pf: pointer to PF struct 108 */ 109 static void ice_check_for_hang_subtask(struct ice_pf *pf) 110 { 111 struct ice_vsi *vsi = NULL; 112 struct ice_hw *hw; 113 unsigned int i; 114 int packets; 115 u32 v; 116 117 ice_for_each_vsi(pf, v) 118 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 119 vsi = pf->vsi[v]; 120 break; 121 } 122 123 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 124 return; 125 126 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 127 return; 128 129 hw = &vsi->back->hw; 130 131 ice_for_each_txq(vsi, i) { 132 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 133 struct ice_ring_stats *ring_stats; 134 135 if (!tx_ring) 136 continue; 137 if (ice_ring_ch_enabled(tx_ring)) 138 continue; 139 140 ring_stats = tx_ring->ring_stats; 141 if (!ring_stats) 142 continue; 143 144 if (tx_ring->desc) { 145 /* If packet counter has not changed the queue is 146 * likely stalled, so force an interrupt for this 147 * queue. 148 * 149 * prev_pkt would be negative if there was no 150 * pending work. 151 */ 152 packets = ring_stats->stats.pkts & INT_MAX; 153 if (ring_stats->tx_stats.prev_pkt == packets) { 154 /* Trigger sw interrupt to revive the queue */ 155 ice_trigger_sw_intr(hw, tx_ring->q_vector); 156 continue; 157 } 158 159 /* Memory barrier between read of packet count and call 160 * to ice_get_tx_pending() 161 */ 162 smp_rmb(); 163 ring_stats->tx_stats.prev_pkt = 164 ice_get_tx_pending(tx_ring) ? packets : -1; 165 } 166 } 167 } 168 169 /** 170 * ice_init_mac_fltr - Set initial MAC filters 171 * @pf: board private structure 172 * 173 * Set initial set of MAC filters for PF VSI; configure filters for permanent 174 * address and broadcast address. If an error is encountered, netdevice will be 175 * unregistered. 176 */ 177 static int ice_init_mac_fltr(struct ice_pf *pf) 178 { 179 struct ice_vsi *vsi; 180 u8 *perm_addr; 181 182 vsi = ice_get_main_vsi(pf); 183 if (!vsi) 184 return -EINVAL; 185 186 perm_addr = vsi->port_info->mac.perm_addr; 187 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 188 } 189 190 /** 191 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 192 * @netdev: the net device on which the sync is happening 193 * @addr: MAC address to sync 194 * 195 * This is a callback function which is called by the in kernel device sync 196 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 197 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 198 * MAC filters from the hardware. 199 */ 200 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 201 { 202 struct ice_netdev_priv *np = netdev_priv(netdev); 203 struct ice_vsi *vsi = np->vsi; 204 205 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 206 ICE_FWD_TO_VSI)) 207 return -EINVAL; 208 209 return 0; 210 } 211 212 /** 213 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 214 * @netdev: the net device on which the unsync is happening 215 * @addr: MAC address to unsync 216 * 217 * This is a callback function which is called by the in kernel device unsync 218 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 219 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 220 * delete the MAC filters from the hardware. 221 */ 222 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 223 { 224 struct ice_netdev_priv *np = netdev_priv(netdev); 225 struct ice_vsi *vsi = np->vsi; 226 227 /* Under some circumstances, we might receive a request to delete our 228 * own device address from our uc list. Because we store the device 229 * address in the VSI's MAC filter list, we need to ignore such 230 * requests and not delete our device address from this list. 231 */ 232 if (ether_addr_equal(addr, netdev->dev_addr)) 233 return 0; 234 235 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 236 ICE_FWD_TO_VSI)) 237 return -EINVAL; 238 239 return 0; 240 } 241 242 /** 243 * ice_vsi_fltr_changed - check if filter state changed 244 * @vsi: VSI to be checked 245 * 246 * returns true if filter state has changed, false otherwise. 247 */ 248 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 249 { 250 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 251 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 252 } 253 254 /** 255 * ice_set_promisc - Enable promiscuous mode for a given PF 256 * @vsi: the VSI being configured 257 * @promisc_m: mask of promiscuous config bits 258 * 259 */ 260 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 261 { 262 int status; 263 264 if (vsi->type != ICE_VSI_PF) 265 return 0; 266 267 if (ice_vsi_has_non_zero_vlans(vsi)) { 268 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 269 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, 270 promisc_m); 271 } else { 272 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 273 promisc_m, 0); 274 } 275 if (status && status != -EEXIST) 276 return status; 277 278 return 0; 279 } 280 281 /** 282 * ice_clear_promisc - Disable promiscuous mode for a given PF 283 * @vsi: the VSI being configured 284 * @promisc_m: mask of promiscuous config bits 285 * 286 */ 287 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 288 { 289 int status; 290 291 if (vsi->type != ICE_VSI_PF) 292 return 0; 293 294 if (ice_vsi_has_non_zero_vlans(vsi)) { 295 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 296 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, 297 promisc_m); 298 } else { 299 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 300 promisc_m, 0); 301 } 302 303 return status; 304 } 305 306 /** 307 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 308 * @vsi: ptr to the VSI 309 * 310 * Push any outstanding VSI filter changes through the AdminQ. 311 */ 312 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 313 { 314 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 315 struct device *dev = ice_pf_to_dev(vsi->back); 316 struct net_device *netdev = vsi->netdev; 317 bool promisc_forced_on = false; 318 struct ice_pf *pf = vsi->back; 319 struct ice_hw *hw = &pf->hw; 320 u32 changed_flags = 0; 321 int err; 322 323 if (!vsi->netdev) 324 return -EINVAL; 325 326 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 327 usleep_range(1000, 2000); 328 329 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 330 vsi->current_netdev_flags = vsi->netdev->flags; 331 332 INIT_LIST_HEAD(&vsi->tmp_sync_list); 333 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 334 335 if (ice_vsi_fltr_changed(vsi)) { 336 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 337 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 338 339 /* grab the netdev's addr_list_lock */ 340 netif_addr_lock_bh(netdev); 341 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 342 ice_add_mac_to_unsync_list); 343 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 344 ice_add_mac_to_unsync_list); 345 /* our temp lists are populated. release lock */ 346 netif_addr_unlock_bh(netdev); 347 } 348 349 /* Remove MAC addresses in the unsync list */ 350 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 351 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 352 if (err) { 353 netdev_err(netdev, "Failed to delete MAC filters\n"); 354 /* if we failed because of alloc failures, just bail */ 355 if (err == -ENOMEM) 356 goto out; 357 } 358 359 /* Add MAC addresses in the sync list */ 360 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 361 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 362 /* If filter is added successfully or already exists, do not go into 363 * 'if' condition and report it as error. Instead continue processing 364 * rest of the function. 365 */ 366 if (err && err != -EEXIST) { 367 netdev_err(netdev, "Failed to add MAC filters\n"); 368 /* If there is no more space for new umac filters, VSI 369 * should go into promiscuous mode. There should be some 370 * space reserved for promiscuous filters. 371 */ 372 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 373 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 374 vsi->state)) { 375 promisc_forced_on = true; 376 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 377 vsi->vsi_num); 378 } else { 379 goto out; 380 } 381 } 382 err = 0; 383 /* check for changes in promiscuous modes */ 384 if (changed_flags & IFF_ALLMULTI) { 385 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 386 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); 387 if (err) { 388 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 389 goto out_promisc; 390 } 391 } else { 392 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 393 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); 394 if (err) { 395 vsi->current_netdev_flags |= IFF_ALLMULTI; 396 goto out_promisc; 397 } 398 } 399 } 400 401 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 402 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 403 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 404 if (vsi->current_netdev_flags & IFF_PROMISC) { 405 /* Apply Rx filter rule to get traffic from wire */ 406 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { 407 err = ice_set_dflt_vsi(vsi); 408 if (err && err != -EEXIST) { 409 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 410 err, vsi->vsi_num); 411 vsi->current_netdev_flags &= 412 ~IFF_PROMISC; 413 goto out_promisc; 414 } 415 err = 0; 416 vlan_ops->dis_rx_filtering(vsi); 417 } 418 } else { 419 /* Clear Rx filter to remove traffic from wire */ 420 if (ice_is_vsi_dflt_vsi(vsi)) { 421 err = ice_clear_dflt_vsi(vsi); 422 if (err) { 423 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 424 err, vsi->vsi_num); 425 vsi->current_netdev_flags |= 426 IFF_PROMISC; 427 goto out_promisc; 428 } 429 if (vsi->netdev->features & 430 NETIF_F_HW_VLAN_CTAG_FILTER) 431 vlan_ops->ena_rx_filtering(vsi); 432 } 433 } 434 } 435 goto exit; 436 437 out_promisc: 438 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 439 goto exit; 440 out: 441 /* if something went wrong then set the changed flag so we try again */ 442 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 443 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 444 exit: 445 clear_bit(ICE_CFG_BUSY, vsi->state); 446 return err; 447 } 448 449 /** 450 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 451 * @pf: board private structure 452 */ 453 static void ice_sync_fltr_subtask(struct ice_pf *pf) 454 { 455 int v; 456 457 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 458 return; 459 460 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 461 462 ice_for_each_vsi(pf, v) 463 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 464 ice_vsi_sync_fltr(pf->vsi[v])) { 465 /* come back and try again later */ 466 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 467 break; 468 } 469 } 470 471 /** 472 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 473 * @pf: the PF 474 * @locked: is the rtnl_lock already held 475 */ 476 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 477 { 478 int node; 479 int v; 480 481 ice_for_each_vsi(pf, v) 482 if (pf->vsi[v]) 483 ice_dis_vsi(pf->vsi[v], locked); 484 485 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 486 pf->pf_agg_node[node].num_vsis = 0; 487 488 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 489 pf->vf_agg_node[node].num_vsis = 0; 490 } 491 492 /** 493 * ice_clear_sw_switch_recipes - clear switch recipes 494 * @pf: board private structure 495 * 496 * Mark switch recipes as not created in sw structures. There are cases where 497 * rules (especially advanced rules) need to be restored, either re-read from 498 * hardware or added again. For example after the reset. 'recp_created' flag 499 * prevents from doing that and need to be cleared upfront. 500 */ 501 static void ice_clear_sw_switch_recipes(struct ice_pf *pf) 502 { 503 struct ice_sw_recipe *recp; 504 u8 i; 505 506 recp = pf->hw.switch_info->recp_list; 507 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 508 recp[i].recp_created = false; 509 } 510 511 /** 512 * ice_prepare_for_reset - prep for reset 513 * @pf: board private structure 514 * @reset_type: reset type requested 515 * 516 * Inform or close all dependent features in prep for reset. 517 */ 518 static void 519 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 520 { 521 struct ice_hw *hw = &pf->hw; 522 struct ice_vsi *vsi; 523 struct ice_vf *vf; 524 unsigned int bkt; 525 526 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 527 528 /* already prepared for reset */ 529 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 530 return; 531 532 ice_unplug_aux_dev(pf); 533 534 /* Notify VFs of impending reset */ 535 if (ice_check_sq_alive(hw, &hw->mailboxq)) 536 ice_vc_notify_reset(pf); 537 538 /* Disable VFs until reset is completed */ 539 mutex_lock(&pf->vfs.table_lock); 540 ice_for_each_vf(pf, bkt, vf) 541 ice_set_vf_state_qs_dis(vf); 542 mutex_unlock(&pf->vfs.table_lock); 543 544 if (ice_is_eswitch_mode_switchdev(pf)) { 545 if (reset_type != ICE_RESET_PFR) 546 ice_clear_sw_switch_recipes(pf); 547 } 548 549 /* release ADQ specific HW and SW resources */ 550 vsi = ice_get_main_vsi(pf); 551 if (!vsi) 552 goto skip; 553 554 /* to be on safe side, reset orig_rss_size so that normal flow 555 * of deciding rss_size can take precedence 556 */ 557 vsi->orig_rss_size = 0; 558 559 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 560 if (reset_type == ICE_RESET_PFR) { 561 vsi->old_ena_tc = vsi->all_enatc; 562 vsi->old_numtc = vsi->all_numtc; 563 } else { 564 ice_remove_q_channels(vsi, true); 565 566 /* for other reset type, do not support channel rebuild 567 * hence reset needed info 568 */ 569 vsi->old_ena_tc = 0; 570 vsi->all_enatc = 0; 571 vsi->old_numtc = 0; 572 vsi->all_numtc = 0; 573 vsi->req_txq = 0; 574 vsi->req_rxq = 0; 575 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 576 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 577 } 578 } 579 skip: 580 581 /* clear SW filtering DB */ 582 ice_clear_hw_tbls(hw); 583 /* disable the VSIs and their queues that are not already DOWN */ 584 ice_pf_dis_all_vsi(pf, false); 585 586 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 587 ice_ptp_prepare_for_reset(pf); 588 589 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 590 ice_gnss_exit(pf); 591 592 if (hw->port_info) 593 ice_sched_clear_port(hw->port_info); 594 595 ice_shutdown_all_ctrlq(hw); 596 597 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 598 } 599 600 /** 601 * ice_do_reset - Initiate one of many types of resets 602 * @pf: board private structure 603 * @reset_type: reset type requested before this function was called. 604 */ 605 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 606 { 607 struct device *dev = ice_pf_to_dev(pf); 608 struct ice_hw *hw = &pf->hw; 609 610 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 611 612 ice_prepare_for_reset(pf, reset_type); 613 614 /* trigger the reset */ 615 if (ice_reset(hw, reset_type)) { 616 dev_err(dev, "reset %d failed\n", reset_type); 617 set_bit(ICE_RESET_FAILED, pf->state); 618 clear_bit(ICE_RESET_OICR_RECV, pf->state); 619 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 620 clear_bit(ICE_PFR_REQ, pf->state); 621 clear_bit(ICE_CORER_REQ, pf->state); 622 clear_bit(ICE_GLOBR_REQ, pf->state); 623 wake_up(&pf->reset_wait_queue); 624 return; 625 } 626 627 /* PFR is a bit of a special case because it doesn't result in an OICR 628 * interrupt. So for PFR, rebuild after the reset and clear the reset- 629 * associated state bits. 630 */ 631 if (reset_type == ICE_RESET_PFR) { 632 pf->pfr_count++; 633 ice_rebuild(pf, reset_type); 634 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 635 clear_bit(ICE_PFR_REQ, pf->state); 636 wake_up(&pf->reset_wait_queue); 637 ice_reset_all_vfs(pf); 638 } 639 } 640 641 /** 642 * ice_reset_subtask - Set up for resetting the device and driver 643 * @pf: board private structure 644 */ 645 static void ice_reset_subtask(struct ice_pf *pf) 646 { 647 enum ice_reset_req reset_type = ICE_RESET_INVAL; 648 649 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 650 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 651 * of reset is pending and sets bits in pf->state indicating the reset 652 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 653 * prepare for pending reset if not already (for PF software-initiated 654 * global resets the software should already be prepared for it as 655 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 656 * by firmware or software on other PFs, that bit is not set so prepare 657 * for the reset now), poll for reset done, rebuild and return. 658 */ 659 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 660 /* Perform the largest reset requested */ 661 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 662 reset_type = ICE_RESET_CORER; 663 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 664 reset_type = ICE_RESET_GLOBR; 665 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 666 reset_type = ICE_RESET_EMPR; 667 /* return if no valid reset type requested */ 668 if (reset_type == ICE_RESET_INVAL) 669 return; 670 ice_prepare_for_reset(pf, reset_type); 671 672 /* make sure we are ready to rebuild */ 673 if (ice_check_reset(&pf->hw)) { 674 set_bit(ICE_RESET_FAILED, pf->state); 675 } else { 676 /* done with reset. start rebuild */ 677 pf->hw.reset_ongoing = false; 678 ice_rebuild(pf, reset_type); 679 /* clear bit to resume normal operations, but 680 * ICE_NEEDS_RESTART bit is set in case rebuild failed 681 */ 682 clear_bit(ICE_RESET_OICR_RECV, pf->state); 683 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 684 clear_bit(ICE_PFR_REQ, pf->state); 685 clear_bit(ICE_CORER_REQ, pf->state); 686 clear_bit(ICE_GLOBR_REQ, pf->state); 687 wake_up(&pf->reset_wait_queue); 688 ice_reset_all_vfs(pf); 689 } 690 691 return; 692 } 693 694 /* No pending resets to finish processing. Check for new resets */ 695 if (test_bit(ICE_PFR_REQ, pf->state)) 696 reset_type = ICE_RESET_PFR; 697 if (test_bit(ICE_CORER_REQ, pf->state)) 698 reset_type = ICE_RESET_CORER; 699 if (test_bit(ICE_GLOBR_REQ, pf->state)) 700 reset_type = ICE_RESET_GLOBR; 701 /* If no valid reset type requested just return */ 702 if (reset_type == ICE_RESET_INVAL) 703 return; 704 705 /* reset if not already down or busy */ 706 if (!test_bit(ICE_DOWN, pf->state) && 707 !test_bit(ICE_CFG_BUSY, pf->state)) { 708 ice_do_reset(pf, reset_type); 709 } 710 } 711 712 /** 713 * ice_print_topo_conflict - print topology conflict message 714 * @vsi: the VSI whose topology status is being checked 715 */ 716 static void ice_print_topo_conflict(struct ice_vsi *vsi) 717 { 718 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 719 case ICE_AQ_LINK_TOPO_CONFLICT: 720 case ICE_AQ_LINK_MEDIA_CONFLICT: 721 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 722 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 723 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 724 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 725 break; 726 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 727 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 728 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 729 else 730 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 731 break; 732 default: 733 break; 734 } 735 } 736 737 /** 738 * ice_print_link_msg - print link up or down message 739 * @vsi: the VSI whose link status is being queried 740 * @isup: boolean for if the link is now up or down 741 */ 742 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 743 { 744 struct ice_aqc_get_phy_caps_data *caps; 745 const char *an_advertised; 746 const char *fec_req; 747 const char *speed; 748 const char *fec; 749 const char *fc; 750 const char *an; 751 int status; 752 753 if (!vsi) 754 return; 755 756 if (vsi->current_isup == isup) 757 return; 758 759 vsi->current_isup = isup; 760 761 if (!isup) { 762 netdev_info(vsi->netdev, "NIC Link is Down\n"); 763 return; 764 } 765 766 switch (vsi->port_info->phy.link_info.link_speed) { 767 case ICE_AQ_LINK_SPEED_100GB: 768 speed = "100 G"; 769 break; 770 case ICE_AQ_LINK_SPEED_50GB: 771 speed = "50 G"; 772 break; 773 case ICE_AQ_LINK_SPEED_40GB: 774 speed = "40 G"; 775 break; 776 case ICE_AQ_LINK_SPEED_25GB: 777 speed = "25 G"; 778 break; 779 case ICE_AQ_LINK_SPEED_20GB: 780 speed = "20 G"; 781 break; 782 case ICE_AQ_LINK_SPEED_10GB: 783 speed = "10 G"; 784 break; 785 case ICE_AQ_LINK_SPEED_5GB: 786 speed = "5 G"; 787 break; 788 case ICE_AQ_LINK_SPEED_2500MB: 789 speed = "2.5 G"; 790 break; 791 case ICE_AQ_LINK_SPEED_1000MB: 792 speed = "1 G"; 793 break; 794 case ICE_AQ_LINK_SPEED_100MB: 795 speed = "100 M"; 796 break; 797 default: 798 speed = "Unknown "; 799 break; 800 } 801 802 switch (vsi->port_info->fc.current_mode) { 803 case ICE_FC_FULL: 804 fc = "Rx/Tx"; 805 break; 806 case ICE_FC_TX_PAUSE: 807 fc = "Tx"; 808 break; 809 case ICE_FC_RX_PAUSE: 810 fc = "Rx"; 811 break; 812 case ICE_FC_NONE: 813 fc = "None"; 814 break; 815 default: 816 fc = "Unknown"; 817 break; 818 } 819 820 /* Get FEC mode based on negotiated link info */ 821 switch (vsi->port_info->phy.link_info.fec_info) { 822 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 823 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 824 fec = "RS-FEC"; 825 break; 826 case ICE_AQ_LINK_25G_KR_FEC_EN: 827 fec = "FC-FEC/BASE-R"; 828 break; 829 default: 830 fec = "NONE"; 831 break; 832 } 833 834 /* check if autoneg completed, might be false due to not supported */ 835 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 836 an = "True"; 837 else 838 an = "False"; 839 840 /* Get FEC mode requested based on PHY caps last SW configuration */ 841 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 842 if (!caps) { 843 fec_req = "Unknown"; 844 an_advertised = "Unknown"; 845 goto done; 846 } 847 848 status = ice_aq_get_phy_caps(vsi->port_info, false, 849 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 850 if (status) 851 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 852 853 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 854 855 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 856 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 857 fec_req = "RS-FEC"; 858 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 859 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 860 fec_req = "FC-FEC/BASE-R"; 861 else 862 fec_req = "NONE"; 863 864 kfree(caps); 865 866 done: 867 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 868 speed, fec_req, fec, an_advertised, an, fc); 869 ice_print_topo_conflict(vsi); 870 } 871 872 /** 873 * ice_vsi_link_event - update the VSI's netdev 874 * @vsi: the VSI on which the link event occurred 875 * @link_up: whether or not the VSI needs to be set up or down 876 */ 877 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 878 { 879 if (!vsi) 880 return; 881 882 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 883 return; 884 885 if (vsi->type == ICE_VSI_PF) { 886 if (link_up == netif_carrier_ok(vsi->netdev)) 887 return; 888 889 if (link_up) { 890 netif_carrier_on(vsi->netdev); 891 netif_tx_wake_all_queues(vsi->netdev); 892 } else { 893 netif_carrier_off(vsi->netdev); 894 netif_tx_stop_all_queues(vsi->netdev); 895 } 896 } 897 } 898 899 /** 900 * ice_set_dflt_mib - send a default config MIB to the FW 901 * @pf: private PF struct 902 * 903 * This function sends a default configuration MIB to the FW. 904 * 905 * If this function errors out at any point, the driver is still able to 906 * function. The main impact is that LFC may not operate as expected. 907 * Therefore an error state in this function should be treated with a DBG 908 * message and continue on with driver rebuild/reenable. 909 */ 910 static void ice_set_dflt_mib(struct ice_pf *pf) 911 { 912 struct device *dev = ice_pf_to_dev(pf); 913 u8 mib_type, *buf, *lldpmib = NULL; 914 u16 len, typelen, offset = 0; 915 struct ice_lldp_org_tlv *tlv; 916 struct ice_hw *hw = &pf->hw; 917 u32 ouisubtype; 918 919 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 920 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 921 if (!lldpmib) { 922 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 923 __func__); 924 return; 925 } 926 927 /* Add ETS CFG TLV */ 928 tlv = (struct ice_lldp_org_tlv *)lldpmib; 929 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 930 ICE_IEEE_ETS_TLV_LEN); 931 tlv->typelen = htons(typelen); 932 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 933 ICE_IEEE_SUBTYPE_ETS_CFG); 934 tlv->ouisubtype = htonl(ouisubtype); 935 936 buf = tlv->tlvinfo; 937 buf[0] = 0; 938 939 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 940 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 941 * Octets 13 - 20 are TSA values - leave as zeros 942 */ 943 buf[5] = 0x64; 944 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 945 offset += len + 2; 946 tlv = (struct ice_lldp_org_tlv *) 947 ((char *)tlv + sizeof(tlv->typelen) + len); 948 949 /* Add ETS REC TLV */ 950 buf = tlv->tlvinfo; 951 tlv->typelen = htons(typelen); 952 953 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 954 ICE_IEEE_SUBTYPE_ETS_REC); 955 tlv->ouisubtype = htonl(ouisubtype); 956 957 /* First octet of buf is reserved 958 * Octets 1 - 4 map UP to TC - all UPs map to zero 959 * Octets 5 - 12 are BW values - set TC 0 to 100%. 960 * Octets 13 - 20 are TSA value - leave as zeros 961 */ 962 buf[5] = 0x64; 963 offset += len + 2; 964 tlv = (struct ice_lldp_org_tlv *) 965 ((char *)tlv + sizeof(tlv->typelen) + len); 966 967 /* Add PFC CFG TLV */ 968 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 969 ICE_IEEE_PFC_TLV_LEN); 970 tlv->typelen = htons(typelen); 971 972 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 973 ICE_IEEE_SUBTYPE_PFC_CFG); 974 tlv->ouisubtype = htonl(ouisubtype); 975 976 /* Octet 1 left as all zeros - PFC disabled */ 977 buf[0] = 0x08; 978 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 979 offset += len + 2; 980 981 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 982 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 983 984 kfree(lldpmib); 985 } 986 987 /** 988 * ice_check_phy_fw_load - check if PHY FW load failed 989 * @pf: pointer to PF struct 990 * @link_cfg_err: bitmap from the link info structure 991 * 992 * check if external PHY FW load failed and print an error message if it did 993 */ 994 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 995 { 996 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 997 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 998 return; 999 } 1000 1001 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 1002 return; 1003 1004 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 1005 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 1006 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1007 } 1008 } 1009 1010 /** 1011 * ice_check_module_power 1012 * @pf: pointer to PF struct 1013 * @link_cfg_err: bitmap from the link info structure 1014 * 1015 * check module power level returned by a previous call to aq_get_link_info 1016 * and print error messages if module power level is not supported 1017 */ 1018 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 1019 { 1020 /* if module power level is supported, clear the flag */ 1021 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 1022 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 1023 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1024 return; 1025 } 1026 1027 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1028 * above block didn't clear this bit, there's nothing to do 1029 */ 1030 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1031 return; 1032 1033 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1034 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1035 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1036 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1037 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1038 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1039 } 1040 } 1041 1042 /** 1043 * ice_check_link_cfg_err - check if link configuration failed 1044 * @pf: pointer to the PF struct 1045 * @link_cfg_err: bitmap from the link info structure 1046 * 1047 * print if any link configuration failure happens due to the value in the 1048 * link_cfg_err parameter in the link info structure 1049 */ 1050 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 1051 { 1052 ice_check_module_power(pf, link_cfg_err); 1053 ice_check_phy_fw_load(pf, link_cfg_err); 1054 } 1055 1056 /** 1057 * ice_link_event - process the link event 1058 * @pf: PF that the link event is associated with 1059 * @pi: port_info for the port that the link event is associated with 1060 * @link_up: true if the physical link is up and false if it is down 1061 * @link_speed: current link speed received from the link event 1062 * 1063 * Returns 0 on success and negative on failure 1064 */ 1065 static int 1066 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1067 u16 link_speed) 1068 { 1069 struct device *dev = ice_pf_to_dev(pf); 1070 struct ice_phy_info *phy_info; 1071 struct ice_vsi *vsi; 1072 u16 old_link_speed; 1073 bool old_link; 1074 int status; 1075 1076 phy_info = &pi->phy; 1077 phy_info->link_info_old = phy_info->link_info; 1078 1079 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 1080 old_link_speed = phy_info->link_info_old.link_speed; 1081 1082 /* update the link info structures and re-enable link events, 1083 * don't bail on failure due to other book keeping needed 1084 */ 1085 status = ice_update_link_info(pi); 1086 if (status) 1087 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 1088 pi->lport, status, 1089 ice_aq_str(pi->hw->adminq.sq_last_status)); 1090 1091 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1092 1093 /* Check if the link state is up after updating link info, and treat 1094 * this event as an UP event since the link is actually UP now. 1095 */ 1096 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 1097 link_up = true; 1098 1099 vsi = ice_get_main_vsi(pf); 1100 if (!vsi || !vsi->port_info) 1101 return -EINVAL; 1102 1103 /* turn off PHY if media was removed */ 1104 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 1105 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1106 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1107 ice_set_link(vsi, false); 1108 } 1109 1110 /* if the old link up/down and speed is the same as the new */ 1111 if (link_up == old_link && link_speed == old_link_speed) 1112 return 0; 1113 1114 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); 1115 1116 if (ice_is_dcb_active(pf)) { 1117 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1118 ice_dcb_rebuild(pf); 1119 } else { 1120 if (link_up) 1121 ice_set_dflt_mib(pf); 1122 } 1123 ice_vsi_link_event(vsi, link_up); 1124 ice_print_link_msg(vsi, link_up); 1125 1126 ice_vc_notify_link_state(pf); 1127 1128 return 0; 1129 } 1130 1131 /** 1132 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 1133 * @pf: board private structure 1134 */ 1135 static void ice_watchdog_subtask(struct ice_pf *pf) 1136 { 1137 int i; 1138 1139 /* if interface is down do nothing */ 1140 if (test_bit(ICE_DOWN, pf->state) || 1141 test_bit(ICE_CFG_BUSY, pf->state)) 1142 return; 1143 1144 /* make sure we don't do these things too often */ 1145 if (time_before(jiffies, 1146 pf->serv_tmr_prev + pf->serv_tmr_period)) 1147 return; 1148 1149 pf->serv_tmr_prev = jiffies; 1150 1151 /* Update the stats for active netdevs so the network stack 1152 * can look at updated numbers whenever it cares to 1153 */ 1154 ice_update_pf_stats(pf); 1155 ice_for_each_vsi(pf, i) 1156 if (pf->vsi[i] && pf->vsi[i]->netdev) 1157 ice_update_vsi_stats(pf->vsi[i]); 1158 } 1159 1160 /** 1161 * ice_init_link_events - enable/initialize link events 1162 * @pi: pointer to the port_info instance 1163 * 1164 * Returns -EIO on failure, 0 on success 1165 */ 1166 static int ice_init_link_events(struct ice_port_info *pi) 1167 { 1168 u16 mask; 1169 1170 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1171 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 1172 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1173 1174 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1175 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1176 pi->lport); 1177 return -EIO; 1178 } 1179 1180 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1181 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1182 pi->lport); 1183 return -EIO; 1184 } 1185 1186 return 0; 1187 } 1188 1189 /** 1190 * ice_handle_link_event - handle link event via ARQ 1191 * @pf: PF that the link event is associated with 1192 * @event: event structure containing link status info 1193 */ 1194 static int 1195 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1196 { 1197 struct ice_aqc_get_link_status_data *link_data; 1198 struct ice_port_info *port_info; 1199 int status; 1200 1201 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1202 port_info = pf->hw.port_info; 1203 if (!port_info) 1204 return -EINVAL; 1205 1206 status = ice_link_event(pf, port_info, 1207 !!(link_data->link_info & ICE_AQ_LINK_UP), 1208 le16_to_cpu(link_data->link_speed)); 1209 if (status) 1210 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1211 status); 1212 1213 return status; 1214 } 1215 1216 enum ice_aq_task_state { 1217 ICE_AQ_TASK_WAITING = 0, 1218 ICE_AQ_TASK_COMPLETE, 1219 ICE_AQ_TASK_CANCELED, 1220 }; 1221 1222 struct ice_aq_task { 1223 struct hlist_node entry; 1224 1225 u16 opcode; 1226 struct ice_rq_event_info *event; 1227 enum ice_aq_task_state state; 1228 }; 1229 1230 /** 1231 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1232 * @pf: pointer to the PF private structure 1233 * @opcode: the opcode to wait for 1234 * @timeout: how long to wait, in jiffies 1235 * @event: storage for the event info 1236 * 1237 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1238 * current thread will be put to sleep until the specified event occurs or 1239 * until the given timeout is reached. 1240 * 1241 * To obtain only the descriptor contents, pass an event without an allocated 1242 * msg_buf. If the complete data buffer is desired, allocate the 1243 * event->msg_buf with enough space ahead of time. 1244 * 1245 * Returns: zero on success, or a negative error code on failure. 1246 */ 1247 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1248 struct ice_rq_event_info *event) 1249 { 1250 struct device *dev = ice_pf_to_dev(pf); 1251 struct ice_aq_task *task; 1252 unsigned long start; 1253 long ret; 1254 int err; 1255 1256 task = kzalloc(sizeof(*task), GFP_KERNEL); 1257 if (!task) 1258 return -ENOMEM; 1259 1260 INIT_HLIST_NODE(&task->entry); 1261 task->opcode = opcode; 1262 task->event = event; 1263 task->state = ICE_AQ_TASK_WAITING; 1264 1265 spin_lock_bh(&pf->aq_wait_lock); 1266 hlist_add_head(&task->entry, &pf->aq_wait_list); 1267 spin_unlock_bh(&pf->aq_wait_lock); 1268 1269 start = jiffies; 1270 1271 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1272 timeout); 1273 switch (task->state) { 1274 case ICE_AQ_TASK_WAITING: 1275 err = ret < 0 ? ret : -ETIMEDOUT; 1276 break; 1277 case ICE_AQ_TASK_CANCELED: 1278 err = ret < 0 ? ret : -ECANCELED; 1279 break; 1280 case ICE_AQ_TASK_COMPLETE: 1281 err = ret < 0 ? ret : 0; 1282 break; 1283 default: 1284 WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1285 err = -EINVAL; 1286 break; 1287 } 1288 1289 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1290 jiffies_to_msecs(jiffies - start), 1291 jiffies_to_msecs(timeout), 1292 opcode); 1293 1294 spin_lock_bh(&pf->aq_wait_lock); 1295 hlist_del(&task->entry); 1296 spin_unlock_bh(&pf->aq_wait_lock); 1297 kfree(task); 1298 1299 return err; 1300 } 1301 1302 /** 1303 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1304 * @pf: pointer to the PF private structure 1305 * @opcode: the opcode of the event 1306 * @event: the event to check 1307 * 1308 * Loops over the current list of pending threads waiting for an AdminQ event. 1309 * For each matching task, copy the contents of the event into the task 1310 * structure and wake up the thread. 1311 * 1312 * If multiple threads wait for the same opcode, they will all be woken up. 1313 * 1314 * Note that event->msg_buf will only be duplicated if the event has a buffer 1315 * with enough space already allocated. Otherwise, only the descriptor and 1316 * message length will be copied. 1317 * 1318 * Returns: true if an event was found, false otherwise 1319 */ 1320 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1321 struct ice_rq_event_info *event) 1322 { 1323 struct ice_aq_task *task; 1324 bool found = false; 1325 1326 spin_lock_bh(&pf->aq_wait_lock); 1327 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1328 if (task->state || task->opcode != opcode) 1329 continue; 1330 1331 memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1332 task->event->msg_len = event->msg_len; 1333 1334 /* Only copy the data buffer if a destination was set */ 1335 if (task->event->msg_buf && 1336 task->event->buf_len > event->buf_len) { 1337 memcpy(task->event->msg_buf, event->msg_buf, 1338 event->buf_len); 1339 task->event->buf_len = event->buf_len; 1340 } 1341 1342 task->state = ICE_AQ_TASK_COMPLETE; 1343 found = true; 1344 } 1345 spin_unlock_bh(&pf->aq_wait_lock); 1346 1347 if (found) 1348 wake_up(&pf->aq_wait_queue); 1349 } 1350 1351 /** 1352 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1353 * @pf: the PF private structure 1354 * 1355 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1356 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1357 */ 1358 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1359 { 1360 struct ice_aq_task *task; 1361 1362 spin_lock_bh(&pf->aq_wait_lock); 1363 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1364 task->state = ICE_AQ_TASK_CANCELED; 1365 spin_unlock_bh(&pf->aq_wait_lock); 1366 1367 wake_up(&pf->aq_wait_queue); 1368 } 1369 1370 /** 1371 * __ice_clean_ctrlq - helper function to clean controlq rings 1372 * @pf: ptr to struct ice_pf 1373 * @q_type: specific Control queue type 1374 */ 1375 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1376 { 1377 struct device *dev = ice_pf_to_dev(pf); 1378 struct ice_rq_event_info event; 1379 struct ice_hw *hw = &pf->hw; 1380 struct ice_ctl_q_info *cq; 1381 u16 pending, i = 0; 1382 const char *qtype; 1383 u32 oldval, val; 1384 1385 /* Do not clean control queue if/when PF reset fails */ 1386 if (test_bit(ICE_RESET_FAILED, pf->state)) 1387 return 0; 1388 1389 switch (q_type) { 1390 case ICE_CTL_Q_ADMIN: 1391 cq = &hw->adminq; 1392 qtype = "Admin"; 1393 break; 1394 case ICE_CTL_Q_SB: 1395 cq = &hw->sbq; 1396 qtype = "Sideband"; 1397 break; 1398 case ICE_CTL_Q_MAILBOX: 1399 cq = &hw->mailboxq; 1400 qtype = "Mailbox"; 1401 /* we are going to try to detect a malicious VF, so set the 1402 * state to begin detection 1403 */ 1404 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1405 break; 1406 default: 1407 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1408 return 0; 1409 } 1410 1411 /* check for error indications - PF_xx_AxQLEN register layout for 1412 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1413 */ 1414 val = rd32(hw, cq->rq.len); 1415 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1416 PF_FW_ARQLEN_ARQCRIT_M)) { 1417 oldval = val; 1418 if (val & PF_FW_ARQLEN_ARQVFE_M) 1419 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1420 qtype); 1421 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1422 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1423 qtype); 1424 } 1425 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1426 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1427 qtype); 1428 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1429 PF_FW_ARQLEN_ARQCRIT_M); 1430 if (oldval != val) 1431 wr32(hw, cq->rq.len, val); 1432 } 1433 1434 val = rd32(hw, cq->sq.len); 1435 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1436 PF_FW_ATQLEN_ATQCRIT_M)) { 1437 oldval = val; 1438 if (val & PF_FW_ATQLEN_ATQVFE_M) 1439 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1440 qtype); 1441 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1442 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1443 qtype); 1444 } 1445 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1446 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1447 qtype); 1448 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1449 PF_FW_ATQLEN_ATQCRIT_M); 1450 if (oldval != val) 1451 wr32(hw, cq->sq.len, val); 1452 } 1453 1454 event.buf_len = cq->rq_buf_size; 1455 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1456 if (!event.msg_buf) 1457 return 0; 1458 1459 do { 1460 u16 opcode; 1461 int ret; 1462 1463 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1464 if (ret == -EALREADY) 1465 break; 1466 if (ret) { 1467 dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1468 ret); 1469 break; 1470 } 1471 1472 opcode = le16_to_cpu(event.desc.opcode); 1473 1474 /* Notify any thread that might be waiting for this event */ 1475 ice_aq_check_events(pf, opcode, &event); 1476 1477 switch (opcode) { 1478 case ice_aqc_opc_get_link_status: 1479 if (ice_handle_link_event(pf, &event)) 1480 dev_err(dev, "Could not handle link event\n"); 1481 break; 1482 case ice_aqc_opc_event_lan_overflow: 1483 ice_vf_lan_overflow_event(pf, &event); 1484 break; 1485 case ice_mbx_opc_send_msg_to_pf: 1486 if (!ice_is_malicious_vf(pf, &event, i, pending)) 1487 ice_vc_process_vf_msg(pf, &event); 1488 break; 1489 case ice_aqc_opc_fw_logging: 1490 ice_output_fw_log(hw, &event.desc, event.msg_buf); 1491 break; 1492 case ice_aqc_opc_lldp_set_mib_change: 1493 ice_dcb_process_lldp_set_mib_change(pf, &event); 1494 break; 1495 default: 1496 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1497 qtype, opcode); 1498 break; 1499 } 1500 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1501 1502 kfree(event.msg_buf); 1503 1504 return pending && (i == ICE_DFLT_IRQ_WORK); 1505 } 1506 1507 /** 1508 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1509 * @hw: pointer to hardware info 1510 * @cq: control queue information 1511 * 1512 * returns true if there are pending messages in a queue, false if there aren't 1513 */ 1514 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1515 { 1516 u16 ntu; 1517 1518 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1519 return cq->rq.next_to_clean != ntu; 1520 } 1521 1522 /** 1523 * ice_clean_adminq_subtask - clean the AdminQ rings 1524 * @pf: board private structure 1525 */ 1526 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1527 { 1528 struct ice_hw *hw = &pf->hw; 1529 1530 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1531 return; 1532 1533 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1534 return; 1535 1536 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1537 1538 /* There might be a situation where new messages arrive to a control 1539 * queue between processing the last message and clearing the 1540 * EVENT_PENDING bit. So before exiting, check queue head again (using 1541 * ice_ctrlq_pending) and process new messages if any. 1542 */ 1543 if (ice_ctrlq_pending(hw, &hw->adminq)) 1544 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1545 1546 ice_flush(hw); 1547 } 1548 1549 /** 1550 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1551 * @pf: board private structure 1552 */ 1553 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1554 { 1555 struct ice_hw *hw = &pf->hw; 1556 1557 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1558 return; 1559 1560 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1561 return; 1562 1563 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1564 1565 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1566 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1567 1568 ice_flush(hw); 1569 } 1570 1571 /** 1572 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1573 * @pf: board private structure 1574 */ 1575 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1576 { 1577 struct ice_hw *hw = &pf->hw; 1578 1579 /* Nothing to do here if sideband queue is not supported */ 1580 if (!ice_is_sbq_supported(hw)) { 1581 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1582 return; 1583 } 1584 1585 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1586 return; 1587 1588 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1589 return; 1590 1591 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1592 1593 if (ice_ctrlq_pending(hw, &hw->sbq)) 1594 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1595 1596 ice_flush(hw); 1597 } 1598 1599 /** 1600 * ice_service_task_schedule - schedule the service task to wake up 1601 * @pf: board private structure 1602 * 1603 * If not already scheduled, this puts the task into the work queue. 1604 */ 1605 void ice_service_task_schedule(struct ice_pf *pf) 1606 { 1607 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1608 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1609 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1610 queue_work(ice_wq, &pf->serv_task); 1611 } 1612 1613 /** 1614 * ice_service_task_complete - finish up the service task 1615 * @pf: board private structure 1616 */ 1617 static void ice_service_task_complete(struct ice_pf *pf) 1618 { 1619 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1620 1621 /* force memory (pf->state) to sync before next service task */ 1622 smp_mb__before_atomic(); 1623 clear_bit(ICE_SERVICE_SCHED, pf->state); 1624 } 1625 1626 /** 1627 * ice_service_task_stop - stop service task and cancel works 1628 * @pf: board private structure 1629 * 1630 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1631 * 1 otherwise. 1632 */ 1633 static int ice_service_task_stop(struct ice_pf *pf) 1634 { 1635 int ret; 1636 1637 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1638 1639 if (pf->serv_tmr.function) 1640 del_timer_sync(&pf->serv_tmr); 1641 if (pf->serv_task.func) 1642 cancel_work_sync(&pf->serv_task); 1643 1644 clear_bit(ICE_SERVICE_SCHED, pf->state); 1645 return ret; 1646 } 1647 1648 /** 1649 * ice_service_task_restart - restart service task and schedule works 1650 * @pf: board private structure 1651 * 1652 * This function is needed for suspend and resume works (e.g WoL scenario) 1653 */ 1654 static void ice_service_task_restart(struct ice_pf *pf) 1655 { 1656 clear_bit(ICE_SERVICE_DIS, pf->state); 1657 ice_service_task_schedule(pf); 1658 } 1659 1660 /** 1661 * ice_service_timer - timer callback to schedule service task 1662 * @t: pointer to timer_list 1663 */ 1664 static void ice_service_timer(struct timer_list *t) 1665 { 1666 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1667 1668 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1669 ice_service_task_schedule(pf); 1670 } 1671 1672 /** 1673 * ice_handle_mdd_event - handle malicious driver detect event 1674 * @pf: pointer to the PF structure 1675 * 1676 * Called from service task. OICR interrupt handler indicates MDD event. 1677 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1678 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1679 * disable the queue, the PF can be configured to reset the VF using ethtool 1680 * private flag mdd-auto-reset-vf. 1681 */ 1682 static void ice_handle_mdd_event(struct ice_pf *pf) 1683 { 1684 struct device *dev = ice_pf_to_dev(pf); 1685 struct ice_hw *hw = &pf->hw; 1686 struct ice_vf *vf; 1687 unsigned int bkt; 1688 u32 reg; 1689 1690 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1691 /* Since the VF MDD event logging is rate limited, check if 1692 * there are pending MDD events. 1693 */ 1694 ice_print_vfs_mdd_events(pf); 1695 return; 1696 } 1697 1698 /* find what triggered an MDD event */ 1699 reg = rd32(hw, GL_MDET_TX_PQM); 1700 if (reg & GL_MDET_TX_PQM_VALID_M) { 1701 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1702 GL_MDET_TX_PQM_PF_NUM_S; 1703 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1704 GL_MDET_TX_PQM_VF_NUM_S; 1705 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1706 GL_MDET_TX_PQM_MAL_TYPE_S; 1707 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1708 GL_MDET_TX_PQM_QNUM_S); 1709 1710 if (netif_msg_tx_err(pf)) 1711 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1712 event, queue, pf_num, vf_num); 1713 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1714 } 1715 1716 reg = rd32(hw, GL_MDET_TX_TCLAN); 1717 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1718 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1719 GL_MDET_TX_TCLAN_PF_NUM_S; 1720 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1721 GL_MDET_TX_TCLAN_VF_NUM_S; 1722 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1723 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1724 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1725 GL_MDET_TX_TCLAN_QNUM_S); 1726 1727 if (netif_msg_tx_err(pf)) 1728 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1729 event, queue, pf_num, vf_num); 1730 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1731 } 1732 1733 reg = rd32(hw, GL_MDET_RX); 1734 if (reg & GL_MDET_RX_VALID_M) { 1735 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1736 GL_MDET_RX_PF_NUM_S; 1737 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1738 GL_MDET_RX_VF_NUM_S; 1739 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1740 GL_MDET_RX_MAL_TYPE_S; 1741 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1742 GL_MDET_RX_QNUM_S); 1743 1744 if (netif_msg_rx_err(pf)) 1745 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1746 event, queue, pf_num, vf_num); 1747 wr32(hw, GL_MDET_RX, 0xffffffff); 1748 } 1749 1750 /* check to see if this PF caused an MDD event */ 1751 reg = rd32(hw, PF_MDET_TX_PQM); 1752 if (reg & PF_MDET_TX_PQM_VALID_M) { 1753 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1754 if (netif_msg_tx_err(pf)) 1755 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1756 } 1757 1758 reg = rd32(hw, PF_MDET_TX_TCLAN); 1759 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1760 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 1761 if (netif_msg_tx_err(pf)) 1762 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1763 } 1764 1765 reg = rd32(hw, PF_MDET_RX); 1766 if (reg & PF_MDET_RX_VALID_M) { 1767 wr32(hw, PF_MDET_RX, 0xFFFF); 1768 if (netif_msg_rx_err(pf)) 1769 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1770 } 1771 1772 /* Check to see if one of the VFs caused an MDD event, and then 1773 * increment counters and set print pending 1774 */ 1775 mutex_lock(&pf->vfs.table_lock); 1776 ice_for_each_vf(pf, bkt, vf) { 1777 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); 1778 if (reg & VP_MDET_TX_PQM_VALID_M) { 1779 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); 1780 vf->mdd_tx_events.count++; 1781 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1782 if (netif_msg_tx_err(pf)) 1783 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1784 vf->vf_id); 1785 } 1786 1787 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); 1788 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1789 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); 1790 vf->mdd_tx_events.count++; 1791 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1792 if (netif_msg_tx_err(pf)) 1793 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1794 vf->vf_id); 1795 } 1796 1797 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); 1798 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1799 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); 1800 vf->mdd_tx_events.count++; 1801 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1802 if (netif_msg_tx_err(pf)) 1803 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1804 vf->vf_id); 1805 } 1806 1807 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); 1808 if (reg & VP_MDET_RX_VALID_M) { 1809 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); 1810 vf->mdd_rx_events.count++; 1811 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1812 if (netif_msg_rx_err(pf)) 1813 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1814 vf->vf_id); 1815 1816 /* Since the queue is disabled on VF Rx MDD events, the 1817 * PF can be configured to reset the VF through ethtool 1818 * private flag mdd-auto-reset-vf. 1819 */ 1820 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1821 /* VF MDD event counters will be cleared by 1822 * reset, so print the event prior to reset. 1823 */ 1824 ice_print_vf_rx_mdd_event(vf); 1825 ice_reset_vf(vf, ICE_VF_RESET_LOCK); 1826 } 1827 } 1828 } 1829 mutex_unlock(&pf->vfs.table_lock); 1830 1831 ice_print_vfs_mdd_events(pf); 1832 } 1833 1834 /** 1835 * ice_force_phys_link_state - Force the physical link state 1836 * @vsi: VSI to force the physical link state to up/down 1837 * @link_up: true/false indicates to set the physical link to up/down 1838 * 1839 * Force the physical link state by getting the current PHY capabilities from 1840 * hardware and setting the PHY config based on the determined capabilities. If 1841 * link changes a link event will be triggered because both the Enable Automatic 1842 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1843 * 1844 * Returns 0 on success, negative on failure 1845 */ 1846 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1847 { 1848 struct ice_aqc_get_phy_caps_data *pcaps; 1849 struct ice_aqc_set_phy_cfg_data *cfg; 1850 struct ice_port_info *pi; 1851 struct device *dev; 1852 int retcode; 1853 1854 if (!vsi || !vsi->port_info || !vsi->back) 1855 return -EINVAL; 1856 if (vsi->type != ICE_VSI_PF) 1857 return 0; 1858 1859 dev = ice_pf_to_dev(vsi->back); 1860 1861 pi = vsi->port_info; 1862 1863 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1864 if (!pcaps) 1865 return -ENOMEM; 1866 1867 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1868 NULL); 1869 if (retcode) { 1870 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1871 vsi->vsi_num, retcode); 1872 retcode = -EIO; 1873 goto out; 1874 } 1875 1876 /* No change in link */ 1877 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1878 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1879 goto out; 1880 1881 /* Use the current user PHY configuration. The current user PHY 1882 * configuration is initialized during probe from PHY capabilities 1883 * software mode, and updated on set PHY configuration. 1884 */ 1885 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1886 if (!cfg) { 1887 retcode = -ENOMEM; 1888 goto out; 1889 } 1890 1891 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1892 if (link_up) 1893 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1894 else 1895 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1896 1897 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1898 if (retcode) { 1899 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1900 vsi->vsi_num, retcode); 1901 retcode = -EIO; 1902 } 1903 1904 kfree(cfg); 1905 out: 1906 kfree(pcaps); 1907 return retcode; 1908 } 1909 1910 /** 1911 * ice_init_nvm_phy_type - Initialize the NVM PHY type 1912 * @pi: port info structure 1913 * 1914 * Initialize nvm_phy_type_[low|high] for link lenient mode support 1915 */ 1916 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1917 { 1918 struct ice_aqc_get_phy_caps_data *pcaps; 1919 struct ice_pf *pf = pi->hw->back; 1920 int err; 1921 1922 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1923 if (!pcaps) 1924 return -ENOMEM; 1925 1926 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 1927 pcaps, NULL); 1928 1929 if (err) { 1930 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1931 goto out; 1932 } 1933 1934 pf->nvm_phy_type_hi = pcaps->phy_type_high; 1935 pf->nvm_phy_type_lo = pcaps->phy_type_low; 1936 1937 out: 1938 kfree(pcaps); 1939 return err; 1940 } 1941 1942 /** 1943 * ice_init_link_dflt_override - Initialize link default override 1944 * @pi: port info structure 1945 * 1946 * Initialize link default override and PHY total port shutdown during probe 1947 */ 1948 static void ice_init_link_dflt_override(struct ice_port_info *pi) 1949 { 1950 struct ice_link_default_override_tlv *ldo; 1951 struct ice_pf *pf = pi->hw->back; 1952 1953 ldo = &pf->link_dflt_override; 1954 if (ice_get_link_default_override(ldo, pi)) 1955 return; 1956 1957 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1958 return; 1959 1960 /* Enable Total Port Shutdown (override/replace link-down-on-close 1961 * ethtool private flag) for ports with Port Disable bit set. 1962 */ 1963 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1964 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1965 } 1966 1967 /** 1968 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1969 * @pi: port info structure 1970 * 1971 * If default override is enabled, initialize the user PHY cfg speed and FEC 1972 * settings using the default override mask from the NVM. 1973 * 1974 * The PHY should only be configured with the default override settings the 1975 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1976 * is used to indicate that the user PHY cfg default override is initialized 1977 * and the PHY has not been configured with the default override settings. The 1978 * state is set here, and cleared in ice_configure_phy the first time the PHY is 1979 * configured. 1980 * 1981 * This function should be called only if the FW doesn't support default 1982 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 1983 */ 1984 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1985 { 1986 struct ice_link_default_override_tlv *ldo; 1987 struct ice_aqc_set_phy_cfg_data *cfg; 1988 struct ice_phy_info *phy = &pi->phy; 1989 struct ice_pf *pf = pi->hw->back; 1990 1991 ldo = &pf->link_dflt_override; 1992 1993 /* If link default override is enabled, use to mask NVM PHY capabilities 1994 * for speed and FEC default configuration. 1995 */ 1996 cfg = &phy->curr_user_phy_cfg; 1997 1998 if (ldo->phy_type_low || ldo->phy_type_high) { 1999 cfg->phy_type_low = pf->nvm_phy_type_lo & 2000 cpu_to_le64(ldo->phy_type_low); 2001 cfg->phy_type_high = pf->nvm_phy_type_hi & 2002 cpu_to_le64(ldo->phy_type_high); 2003 } 2004 cfg->link_fec_opt = ldo->fec_options; 2005 phy->curr_user_fec_req = ICE_FEC_AUTO; 2006 2007 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 2008 } 2009 2010 /** 2011 * ice_init_phy_user_cfg - Initialize the PHY user configuration 2012 * @pi: port info structure 2013 * 2014 * Initialize the current user PHY configuration, speed, FEC, and FC requested 2015 * mode to default. The PHY defaults are from get PHY capabilities topology 2016 * with media so call when media is first available. An error is returned if 2017 * called when media is not available. The PHY initialization completed state is 2018 * set here. 2019 * 2020 * These configurations are used when setting PHY 2021 * configuration. The user PHY configuration is updated on set PHY 2022 * configuration. Returns 0 on success, negative on failure 2023 */ 2024 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 2025 { 2026 struct ice_aqc_get_phy_caps_data *pcaps; 2027 struct ice_phy_info *phy = &pi->phy; 2028 struct ice_pf *pf = pi->hw->back; 2029 int err; 2030 2031 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2032 return -EIO; 2033 2034 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2035 if (!pcaps) 2036 return -ENOMEM; 2037 2038 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2039 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2040 pcaps, NULL); 2041 else 2042 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2043 pcaps, NULL); 2044 if (err) { 2045 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2046 goto err_out; 2047 } 2048 2049 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2050 2051 /* check if lenient mode is supported and enabled */ 2052 if (ice_fw_supports_link_override(pi->hw) && 2053 !(pcaps->module_compliance_enforcement & 2054 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2055 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2056 2057 /* if the FW supports default PHY configuration mode, then the driver 2058 * does not have to apply link override settings. If not, 2059 * initialize user PHY configuration with link override values 2060 */ 2061 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 2062 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2063 ice_init_phy_cfg_dflt_override(pi); 2064 goto out; 2065 } 2066 } 2067 2068 /* if link default override is not enabled, set user flow control and 2069 * FEC settings based on what get_phy_caps returned 2070 */ 2071 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 2072 pcaps->link_fec_options); 2073 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 2074 2075 out: 2076 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 2077 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 2078 err_out: 2079 kfree(pcaps); 2080 return err; 2081 } 2082 2083 /** 2084 * ice_configure_phy - configure PHY 2085 * @vsi: VSI of PHY 2086 * 2087 * Set the PHY configuration. If the current PHY configuration is the same as 2088 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 2089 * configure the based get PHY capabilities for topology with media. 2090 */ 2091 static int ice_configure_phy(struct ice_vsi *vsi) 2092 { 2093 struct device *dev = ice_pf_to_dev(vsi->back); 2094 struct ice_port_info *pi = vsi->port_info; 2095 struct ice_aqc_get_phy_caps_data *pcaps; 2096 struct ice_aqc_set_phy_cfg_data *cfg; 2097 struct ice_phy_info *phy = &pi->phy; 2098 struct ice_pf *pf = vsi->back; 2099 int err; 2100 2101 /* Ensure we have media as we cannot configure a medialess port */ 2102 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2103 return -EPERM; 2104 2105 ice_print_topo_conflict(vsi); 2106 2107 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 2108 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 2109 return -EPERM; 2110 2111 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 2112 return ice_force_phys_link_state(vsi, true); 2113 2114 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2115 if (!pcaps) 2116 return -ENOMEM; 2117 2118 /* Get current PHY config */ 2119 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 2120 NULL); 2121 if (err) { 2122 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 2123 vsi->vsi_num, err); 2124 goto done; 2125 } 2126 2127 /* If PHY enable link is configured and configuration has not changed, 2128 * there's nothing to do 2129 */ 2130 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2131 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 2132 goto done; 2133 2134 /* Use PHY topology as baseline for configuration */ 2135 memset(pcaps, 0, sizeof(*pcaps)); 2136 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2137 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2138 pcaps, NULL); 2139 else 2140 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2141 pcaps, NULL); 2142 if (err) { 2143 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 2144 vsi->vsi_num, err); 2145 goto done; 2146 } 2147 2148 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2149 if (!cfg) { 2150 err = -ENOMEM; 2151 goto done; 2152 } 2153 2154 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2155 2156 /* Speed - If default override pending, use curr_user_phy_cfg set in 2157 * ice_init_phy_user_cfg_ldo. 2158 */ 2159 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2160 vsi->back->state)) { 2161 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2162 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2163 } else { 2164 u64 phy_low = 0, phy_high = 0; 2165 2166 ice_update_phy_type(&phy_low, &phy_high, 2167 pi->phy.curr_user_speed_req); 2168 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2169 cfg->phy_type_high = pcaps->phy_type_high & 2170 cpu_to_le64(phy_high); 2171 } 2172 2173 /* Can't provide what was requested; use PHY capabilities */ 2174 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2175 cfg->phy_type_low = pcaps->phy_type_low; 2176 cfg->phy_type_high = pcaps->phy_type_high; 2177 } 2178 2179 /* FEC */ 2180 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2181 2182 /* Can't provide what was requested; use PHY capabilities */ 2183 if (cfg->link_fec_opt != 2184 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2185 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2186 cfg->link_fec_opt = pcaps->link_fec_options; 2187 } 2188 2189 /* Flow Control - always supported; no need to check against 2190 * capabilities 2191 */ 2192 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2193 2194 /* Enable link and link update */ 2195 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2196 2197 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2198 if (err) 2199 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2200 vsi->vsi_num, err); 2201 2202 kfree(cfg); 2203 done: 2204 kfree(pcaps); 2205 return err; 2206 } 2207 2208 /** 2209 * ice_check_media_subtask - Check for media 2210 * @pf: pointer to PF struct 2211 * 2212 * If media is available, then initialize PHY user configuration if it is not 2213 * been, and configure the PHY if the interface is up. 2214 */ 2215 static void ice_check_media_subtask(struct ice_pf *pf) 2216 { 2217 struct ice_port_info *pi; 2218 struct ice_vsi *vsi; 2219 int err; 2220 2221 /* No need to check for media if it's already present */ 2222 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2223 return; 2224 2225 vsi = ice_get_main_vsi(pf); 2226 if (!vsi) 2227 return; 2228 2229 /* Refresh link info and check if media is present */ 2230 pi = vsi->port_info; 2231 err = ice_update_link_info(pi); 2232 if (err) 2233 return; 2234 2235 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2236 2237 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2238 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2239 ice_init_phy_user_cfg(pi); 2240 2241 /* PHY settings are reset on media insertion, reconfigure 2242 * PHY to preserve settings. 2243 */ 2244 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2245 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2246 return; 2247 2248 err = ice_configure_phy(vsi); 2249 if (!err) 2250 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2251 2252 /* A Link Status Event will be generated; the event handler 2253 * will complete bringing the interface up 2254 */ 2255 } 2256 } 2257 2258 /** 2259 * ice_service_task - manage and run subtasks 2260 * @work: pointer to work_struct contained by the PF struct 2261 */ 2262 static void ice_service_task(struct work_struct *work) 2263 { 2264 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2265 unsigned long start_time = jiffies; 2266 2267 /* subtasks */ 2268 2269 /* process reset requests first */ 2270 ice_reset_subtask(pf); 2271 2272 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2273 if (ice_is_reset_in_progress(pf->state) || 2274 test_bit(ICE_SUSPENDED, pf->state) || 2275 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2276 ice_service_task_complete(pf); 2277 return; 2278 } 2279 2280 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { 2281 struct iidc_event *event; 2282 2283 event = kzalloc(sizeof(*event), GFP_KERNEL); 2284 if (event) { 2285 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 2286 /* report the entire OICR value to AUX driver */ 2287 swap(event->reg, pf->oicr_err_reg); 2288 ice_send_event_to_aux(pf, event); 2289 kfree(event); 2290 } 2291 } 2292 2293 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { 2294 /* Plug aux device per request */ 2295 ice_plug_aux_dev(pf); 2296 2297 /* Mark plugging as done but check whether unplug was 2298 * requested during ice_plug_aux_dev() call 2299 * (e.g. from ice_clear_rdma_cap()) and if so then 2300 * plug aux device. 2301 */ 2302 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2303 ice_unplug_aux_dev(pf); 2304 } 2305 2306 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2307 struct iidc_event *event; 2308 2309 event = kzalloc(sizeof(*event), GFP_KERNEL); 2310 if (event) { 2311 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 2312 ice_send_event_to_aux(pf, event); 2313 kfree(event); 2314 } 2315 } 2316 2317 ice_clean_adminq_subtask(pf); 2318 ice_check_media_subtask(pf); 2319 ice_check_for_hang_subtask(pf); 2320 ice_sync_fltr_subtask(pf); 2321 ice_handle_mdd_event(pf); 2322 ice_watchdog_subtask(pf); 2323 2324 if (ice_is_safe_mode(pf)) { 2325 ice_service_task_complete(pf); 2326 return; 2327 } 2328 2329 ice_process_vflr_event(pf); 2330 ice_clean_mailboxq_subtask(pf); 2331 ice_clean_sbq_subtask(pf); 2332 ice_sync_arfs_fltrs(pf); 2333 ice_flush_fdir_ctx(pf); 2334 2335 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2336 ice_service_task_complete(pf); 2337 2338 /* If the tasks have taken longer than one service timer period 2339 * or there is more work to be done, reset the service timer to 2340 * schedule the service task now. 2341 */ 2342 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2343 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2344 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2345 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2346 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2347 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2348 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2349 mod_timer(&pf->serv_tmr, jiffies); 2350 } 2351 2352 /** 2353 * ice_set_ctrlq_len - helper function to set controlq length 2354 * @hw: pointer to the HW instance 2355 */ 2356 static void ice_set_ctrlq_len(struct ice_hw *hw) 2357 { 2358 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2359 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2360 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2361 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2362 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2363 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2364 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2365 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2366 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2367 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2368 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2369 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2370 } 2371 2372 /** 2373 * ice_schedule_reset - schedule a reset 2374 * @pf: board private structure 2375 * @reset: reset being requested 2376 */ 2377 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2378 { 2379 struct device *dev = ice_pf_to_dev(pf); 2380 2381 /* bail out if earlier reset has failed */ 2382 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2383 dev_dbg(dev, "earlier reset has failed\n"); 2384 return -EIO; 2385 } 2386 /* bail if reset/recovery already in progress */ 2387 if (ice_is_reset_in_progress(pf->state)) { 2388 dev_dbg(dev, "Reset already in progress\n"); 2389 return -EBUSY; 2390 } 2391 2392 switch (reset) { 2393 case ICE_RESET_PFR: 2394 set_bit(ICE_PFR_REQ, pf->state); 2395 break; 2396 case ICE_RESET_CORER: 2397 set_bit(ICE_CORER_REQ, pf->state); 2398 break; 2399 case ICE_RESET_GLOBR: 2400 set_bit(ICE_GLOBR_REQ, pf->state); 2401 break; 2402 default: 2403 return -EINVAL; 2404 } 2405 2406 ice_service_task_schedule(pf); 2407 return 0; 2408 } 2409 2410 /** 2411 * ice_irq_affinity_notify - Callback for affinity changes 2412 * @notify: context as to what irq was changed 2413 * @mask: the new affinity mask 2414 * 2415 * This is a callback function used by the irq_set_affinity_notifier function 2416 * so that we may register to receive changes to the irq affinity masks. 2417 */ 2418 static void 2419 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2420 const cpumask_t *mask) 2421 { 2422 struct ice_q_vector *q_vector = 2423 container_of(notify, struct ice_q_vector, affinity_notify); 2424 2425 cpumask_copy(&q_vector->affinity_mask, mask); 2426 } 2427 2428 /** 2429 * ice_irq_affinity_release - Callback for affinity notifier release 2430 * @ref: internal core kernel usage 2431 * 2432 * This is a callback function used by the irq_set_affinity_notifier function 2433 * to inform the current notification subscriber that they will no longer 2434 * receive notifications. 2435 */ 2436 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2437 2438 /** 2439 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2440 * @vsi: the VSI being configured 2441 */ 2442 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2443 { 2444 struct ice_hw *hw = &vsi->back->hw; 2445 int i; 2446 2447 ice_for_each_q_vector(vsi, i) 2448 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2449 2450 ice_flush(hw); 2451 return 0; 2452 } 2453 2454 /** 2455 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2456 * @vsi: the VSI being configured 2457 * @basename: name for the vector 2458 */ 2459 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2460 { 2461 int q_vectors = vsi->num_q_vectors; 2462 struct ice_pf *pf = vsi->back; 2463 int base = vsi->base_vector; 2464 struct device *dev; 2465 int rx_int_idx = 0; 2466 int tx_int_idx = 0; 2467 int vector, err; 2468 int irq_num; 2469 2470 dev = ice_pf_to_dev(pf); 2471 for (vector = 0; vector < q_vectors; vector++) { 2472 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2473 2474 irq_num = pf->msix_entries[base + vector].vector; 2475 2476 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2477 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2478 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2479 tx_int_idx++; 2480 } else if (q_vector->rx.rx_ring) { 2481 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2482 "%s-%s-%d", basename, "rx", rx_int_idx++); 2483 } else if (q_vector->tx.tx_ring) { 2484 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2485 "%s-%s-%d", basename, "tx", tx_int_idx++); 2486 } else { 2487 /* skip this unused q_vector */ 2488 continue; 2489 } 2490 if (vsi->type == ICE_VSI_CTRL && vsi->vf) 2491 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2492 IRQF_SHARED, q_vector->name, 2493 q_vector); 2494 else 2495 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2496 0, q_vector->name, q_vector); 2497 if (err) { 2498 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2499 err); 2500 goto free_q_irqs; 2501 } 2502 2503 /* register for affinity change notifications */ 2504 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2505 struct irq_affinity_notify *affinity_notify; 2506 2507 affinity_notify = &q_vector->affinity_notify; 2508 affinity_notify->notify = ice_irq_affinity_notify; 2509 affinity_notify->release = ice_irq_affinity_release; 2510 irq_set_affinity_notifier(irq_num, affinity_notify); 2511 } 2512 2513 /* assign the mask for this irq */ 2514 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2515 } 2516 2517 err = ice_set_cpu_rx_rmap(vsi); 2518 if (err) { 2519 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", 2520 vsi->vsi_num, ERR_PTR(err)); 2521 goto free_q_irqs; 2522 } 2523 2524 vsi->irqs_ready = true; 2525 return 0; 2526 2527 free_q_irqs: 2528 while (vector) { 2529 vector--; 2530 irq_num = pf->msix_entries[base + vector].vector; 2531 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2532 irq_set_affinity_notifier(irq_num, NULL); 2533 irq_set_affinity_hint(irq_num, NULL); 2534 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2535 } 2536 return err; 2537 } 2538 2539 /** 2540 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2541 * @vsi: VSI to setup Tx rings used by XDP 2542 * 2543 * Return 0 on success and negative value on error 2544 */ 2545 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2546 { 2547 struct device *dev = ice_pf_to_dev(vsi->back); 2548 struct ice_tx_desc *tx_desc; 2549 int i, j; 2550 2551 ice_for_each_xdp_txq(vsi, i) { 2552 u16 xdp_q_idx = vsi->alloc_txq + i; 2553 struct ice_ring_stats *ring_stats; 2554 struct ice_tx_ring *xdp_ring; 2555 2556 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2557 if (!xdp_ring) 2558 goto free_xdp_rings; 2559 2560 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 2561 if (!ring_stats) { 2562 ice_free_tx_ring(xdp_ring); 2563 goto free_xdp_rings; 2564 } 2565 2566 xdp_ring->ring_stats = ring_stats; 2567 xdp_ring->q_index = xdp_q_idx; 2568 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2569 xdp_ring->vsi = vsi; 2570 xdp_ring->netdev = NULL; 2571 xdp_ring->dev = dev; 2572 xdp_ring->count = vsi->num_tx_desc; 2573 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1; 2574 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1; 2575 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2576 if (ice_setup_tx_ring(xdp_ring)) 2577 goto free_xdp_rings; 2578 ice_set_ring_xdp(xdp_ring); 2579 spin_lock_init(&xdp_ring->tx_lock); 2580 for (j = 0; j < xdp_ring->count; j++) { 2581 tx_desc = ICE_TX_DESC(xdp_ring, j); 2582 tx_desc->cmd_type_offset_bsz = 0; 2583 } 2584 } 2585 2586 return 0; 2587 2588 free_xdp_rings: 2589 for (; i >= 0; i--) { 2590 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { 2591 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2592 vsi->xdp_rings[i]->ring_stats = NULL; 2593 ice_free_tx_ring(vsi->xdp_rings[i]); 2594 } 2595 } 2596 return -ENOMEM; 2597 } 2598 2599 /** 2600 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2601 * @vsi: VSI to set the bpf prog on 2602 * @prog: the bpf prog pointer 2603 */ 2604 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2605 { 2606 struct bpf_prog *old_prog; 2607 int i; 2608 2609 old_prog = xchg(&vsi->xdp_prog, prog); 2610 if (old_prog) 2611 bpf_prog_put(old_prog); 2612 2613 ice_for_each_rxq(vsi, i) 2614 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2615 } 2616 2617 /** 2618 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2619 * @vsi: VSI to bring up Tx rings used by XDP 2620 * @prog: bpf program that will be assigned to VSI 2621 * 2622 * Return 0 on success and negative value on error 2623 */ 2624 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2625 { 2626 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2627 int xdp_rings_rem = vsi->num_xdp_txq; 2628 struct ice_pf *pf = vsi->back; 2629 struct ice_qs_cfg xdp_qs_cfg = { 2630 .qs_mutex = &pf->avail_q_mutex, 2631 .pf_map = pf->avail_txqs, 2632 .pf_map_size = pf->max_pf_txqs, 2633 .q_count = vsi->num_xdp_txq, 2634 .scatter_count = ICE_MAX_SCATTER_TXQS, 2635 .vsi_map = vsi->txq_map, 2636 .vsi_map_offset = vsi->alloc_txq, 2637 .mapping_mode = ICE_VSI_MAP_CONTIG 2638 }; 2639 struct device *dev; 2640 int i, v_idx; 2641 int status; 2642 2643 dev = ice_pf_to_dev(pf); 2644 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2645 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2646 if (!vsi->xdp_rings) 2647 return -ENOMEM; 2648 2649 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2650 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2651 goto err_map_xdp; 2652 2653 if (static_key_enabled(&ice_xdp_locking_key)) 2654 netdev_warn(vsi->netdev, 2655 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 2656 2657 if (ice_xdp_alloc_setup_rings(vsi)) 2658 goto clear_xdp_rings; 2659 2660 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2661 ice_for_each_q_vector(vsi, v_idx) { 2662 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2663 int xdp_rings_per_v, q_id, q_base; 2664 2665 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2666 vsi->num_q_vectors - v_idx); 2667 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2668 2669 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2670 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2671 2672 xdp_ring->q_vector = q_vector; 2673 xdp_ring->next = q_vector->tx.tx_ring; 2674 q_vector->tx.tx_ring = xdp_ring; 2675 } 2676 xdp_rings_rem -= xdp_rings_per_v; 2677 } 2678 2679 ice_for_each_rxq(vsi, i) { 2680 if (static_key_enabled(&ice_xdp_locking_key)) { 2681 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; 2682 } else { 2683 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; 2684 struct ice_tx_ring *ring; 2685 2686 ice_for_each_tx_ring(ring, q_vector->tx) { 2687 if (ice_ring_is_xdp(ring)) { 2688 vsi->rx_rings[i]->xdp_ring = ring; 2689 break; 2690 } 2691 } 2692 } 2693 ice_tx_xsk_pool(vsi, i); 2694 } 2695 2696 /* omit the scheduler update if in reset path; XDP queues will be 2697 * taken into account at the end of ice_vsi_rebuild, where 2698 * ice_cfg_vsi_lan is being called 2699 */ 2700 if (ice_is_reset_in_progress(pf->state)) 2701 return 0; 2702 2703 /* tell the Tx scheduler that right now we have 2704 * additional queues 2705 */ 2706 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2707 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2708 2709 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2710 max_txqs); 2711 if (status) { 2712 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2713 status); 2714 goto clear_xdp_rings; 2715 } 2716 2717 /* assign the prog only when it's not already present on VSI; 2718 * this flow is a subject of both ethtool -L and ndo_bpf flows; 2719 * VSI rebuild that happens under ethtool -L can expose us to 2720 * the bpf_prog refcount issues as we would be swapping same 2721 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2722 * on it as it would be treated as an 'old_prog'; for ndo_bpf 2723 * this is not harmful as dev_xdp_install bumps the refcount 2724 * before calling the op exposed by the driver; 2725 */ 2726 if (!ice_is_xdp_ena_vsi(vsi)) 2727 ice_vsi_assign_bpf_prog(vsi, prog); 2728 2729 return 0; 2730 clear_xdp_rings: 2731 ice_for_each_xdp_txq(vsi, i) 2732 if (vsi->xdp_rings[i]) { 2733 kfree_rcu(vsi->xdp_rings[i], rcu); 2734 vsi->xdp_rings[i] = NULL; 2735 } 2736 2737 err_map_xdp: 2738 mutex_lock(&pf->avail_q_mutex); 2739 ice_for_each_xdp_txq(vsi, i) { 2740 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2741 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2742 } 2743 mutex_unlock(&pf->avail_q_mutex); 2744 2745 devm_kfree(dev, vsi->xdp_rings); 2746 return -ENOMEM; 2747 } 2748 2749 /** 2750 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2751 * @vsi: VSI to remove XDP rings 2752 * 2753 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2754 * resources 2755 */ 2756 int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2757 { 2758 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2759 struct ice_pf *pf = vsi->back; 2760 int i, v_idx; 2761 2762 /* q_vectors are freed in reset path so there's no point in detaching 2763 * rings; in case of rebuild being triggered not from reset bits 2764 * in pf->state won't be set, so additionally check first q_vector 2765 * against NULL 2766 */ 2767 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2768 goto free_qmap; 2769 2770 ice_for_each_q_vector(vsi, v_idx) { 2771 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2772 struct ice_tx_ring *ring; 2773 2774 ice_for_each_tx_ring(ring, q_vector->tx) 2775 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2776 break; 2777 2778 /* restore the value of last node prior to XDP setup */ 2779 q_vector->tx.tx_ring = ring; 2780 } 2781 2782 free_qmap: 2783 mutex_lock(&pf->avail_q_mutex); 2784 ice_for_each_xdp_txq(vsi, i) { 2785 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2786 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2787 } 2788 mutex_unlock(&pf->avail_q_mutex); 2789 2790 ice_for_each_xdp_txq(vsi, i) 2791 if (vsi->xdp_rings[i]) { 2792 if (vsi->xdp_rings[i]->desc) { 2793 synchronize_rcu(); 2794 ice_free_tx_ring(vsi->xdp_rings[i]); 2795 } 2796 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2797 vsi->xdp_rings[i]->ring_stats = NULL; 2798 kfree_rcu(vsi->xdp_rings[i], rcu); 2799 vsi->xdp_rings[i] = NULL; 2800 } 2801 2802 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2803 vsi->xdp_rings = NULL; 2804 2805 if (static_key_enabled(&ice_xdp_locking_key)) 2806 static_branch_dec(&ice_xdp_locking_key); 2807 2808 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2809 return 0; 2810 2811 ice_vsi_assign_bpf_prog(vsi, NULL); 2812 2813 /* notify Tx scheduler that we destroyed XDP queues and bring 2814 * back the old number of child nodes 2815 */ 2816 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2817 max_txqs[i] = vsi->num_txq; 2818 2819 /* change number of XDP Tx queues to 0 */ 2820 vsi->num_xdp_txq = 0; 2821 2822 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2823 max_txqs); 2824 } 2825 2826 /** 2827 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2828 * @vsi: VSI to schedule napi on 2829 */ 2830 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2831 { 2832 int i; 2833 2834 ice_for_each_rxq(vsi, i) { 2835 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2836 2837 if (rx_ring->xsk_pool) 2838 napi_schedule(&rx_ring->q_vector->napi); 2839 } 2840 } 2841 2842 /** 2843 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 2844 * @vsi: VSI to determine the count of XDP Tx qs 2845 * 2846 * returns 0 if Tx qs count is higher than at least half of CPU count, 2847 * -ENOMEM otherwise 2848 */ 2849 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 2850 { 2851 u16 avail = ice_get_avail_txq_count(vsi->back); 2852 u16 cpus = num_possible_cpus(); 2853 2854 if (avail < cpus / 2) 2855 return -ENOMEM; 2856 2857 vsi->num_xdp_txq = min_t(u16, avail, cpus); 2858 2859 if (vsi->num_xdp_txq < cpus) 2860 static_branch_inc(&ice_xdp_locking_key); 2861 2862 return 0; 2863 } 2864 2865 /** 2866 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2867 * @vsi: VSI to setup XDP for 2868 * @prog: XDP program 2869 * @extack: netlink extended ack 2870 */ 2871 static int 2872 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2873 struct netlink_ext_ack *extack) 2874 { 2875 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2876 bool if_running = netif_running(vsi->netdev); 2877 int ret = 0, xdp_ring_err = 0; 2878 2879 if (frame_size > vsi->rx_buf_len) { 2880 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 2881 return -EOPNOTSUPP; 2882 } 2883 2884 /* need to stop netdev while setting up the program for Rx rings */ 2885 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2886 ret = ice_down(vsi); 2887 if (ret) { 2888 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2889 return ret; 2890 } 2891 } 2892 2893 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 2894 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 2895 if (xdp_ring_err) { 2896 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 2897 } else { 2898 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2899 if (xdp_ring_err) 2900 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 2901 } 2902 /* reallocate Rx queues that are used for zero-copy */ 2903 xdp_ring_err = ice_realloc_zc_buf(vsi, true); 2904 if (xdp_ring_err) 2905 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); 2906 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2907 xdp_ring_err = ice_destroy_xdp_rings(vsi); 2908 if (xdp_ring_err) 2909 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2910 /* reallocate Rx queues that were used for zero-copy */ 2911 xdp_ring_err = ice_realloc_zc_buf(vsi, false); 2912 if (xdp_ring_err) 2913 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); 2914 } else { 2915 /* safe to call even when prog == vsi->xdp_prog as 2916 * dev_xdp_install in net/core/dev.c incremented prog's 2917 * refcount so corresponding bpf_prog_put won't cause 2918 * underflow 2919 */ 2920 ice_vsi_assign_bpf_prog(vsi, prog); 2921 } 2922 2923 if (if_running) 2924 ret = ice_up(vsi); 2925 2926 if (!ret && prog) 2927 ice_vsi_rx_napi_schedule(vsi); 2928 2929 return (ret || xdp_ring_err) ? -ENOMEM : 0; 2930 } 2931 2932 /** 2933 * ice_xdp_safe_mode - XDP handler for safe mode 2934 * @dev: netdevice 2935 * @xdp: XDP command 2936 */ 2937 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 2938 struct netdev_bpf *xdp) 2939 { 2940 NL_SET_ERR_MSG_MOD(xdp->extack, 2941 "Please provide working DDP firmware package in order to use XDP\n" 2942 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 2943 return -EOPNOTSUPP; 2944 } 2945 2946 /** 2947 * ice_xdp - implements XDP handler 2948 * @dev: netdevice 2949 * @xdp: XDP command 2950 */ 2951 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2952 { 2953 struct ice_netdev_priv *np = netdev_priv(dev); 2954 struct ice_vsi *vsi = np->vsi; 2955 2956 if (vsi->type != ICE_VSI_PF) { 2957 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 2958 return -EINVAL; 2959 } 2960 2961 switch (xdp->command) { 2962 case XDP_SETUP_PROG: 2963 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 2964 case XDP_SETUP_XSK_POOL: 2965 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 2966 xdp->xsk.queue_id); 2967 default: 2968 return -EINVAL; 2969 } 2970 } 2971 2972 /** 2973 * ice_ena_misc_vector - enable the non-queue interrupts 2974 * @pf: board private structure 2975 */ 2976 static void ice_ena_misc_vector(struct ice_pf *pf) 2977 { 2978 struct ice_hw *hw = &pf->hw; 2979 u32 val; 2980 2981 /* Disable anti-spoof detection interrupt to prevent spurious event 2982 * interrupts during a function reset. Anti-spoof functionally is 2983 * still supported. 2984 */ 2985 val = rd32(hw, GL_MDCK_TX_TDPU); 2986 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 2987 wr32(hw, GL_MDCK_TX_TDPU, val); 2988 2989 /* clear things first */ 2990 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2991 rd32(hw, PFINT_OICR); /* read to clear */ 2992 2993 val = (PFINT_OICR_ECC_ERR_M | 2994 PFINT_OICR_MAL_DETECT_M | 2995 PFINT_OICR_GRST_M | 2996 PFINT_OICR_PCI_EXCEPTION_M | 2997 PFINT_OICR_VFLR_M | 2998 PFINT_OICR_HMC_ERR_M | 2999 PFINT_OICR_PE_PUSH_M | 3000 PFINT_OICR_PE_CRITERR_M); 3001 3002 wr32(hw, PFINT_OICR_ENA, val); 3003 3004 /* SW_ITR_IDX = 0, but don't change INTENA */ 3005 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 3006 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 3007 } 3008 3009 /** 3010 * ice_misc_intr - misc interrupt handler 3011 * @irq: interrupt number 3012 * @data: pointer to a q_vector 3013 */ 3014 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 3015 { 3016 struct ice_pf *pf = (struct ice_pf *)data; 3017 struct ice_hw *hw = &pf->hw; 3018 irqreturn_t ret = IRQ_NONE; 3019 struct device *dev; 3020 u32 oicr, ena_mask; 3021 3022 dev = ice_pf_to_dev(pf); 3023 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 3024 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 3025 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 3026 3027 oicr = rd32(hw, PFINT_OICR); 3028 ena_mask = rd32(hw, PFINT_OICR_ENA); 3029 3030 if (oicr & PFINT_OICR_SWINT_M) { 3031 ena_mask &= ~PFINT_OICR_SWINT_M; 3032 pf->sw_int_count++; 3033 } 3034 3035 if (oicr & PFINT_OICR_MAL_DETECT_M) { 3036 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 3037 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 3038 } 3039 if (oicr & PFINT_OICR_VFLR_M) { 3040 /* disable any further VFLR event notifications */ 3041 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 3042 u32 reg = rd32(hw, PFINT_OICR_ENA); 3043 3044 reg &= ~PFINT_OICR_VFLR_M; 3045 wr32(hw, PFINT_OICR_ENA, reg); 3046 } else { 3047 ena_mask &= ~PFINT_OICR_VFLR_M; 3048 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 3049 } 3050 } 3051 3052 if (oicr & PFINT_OICR_GRST_M) { 3053 u32 reset; 3054 3055 /* we have a reset warning */ 3056 ena_mask &= ~PFINT_OICR_GRST_M; 3057 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 3058 GLGEN_RSTAT_RESET_TYPE_S; 3059 3060 if (reset == ICE_RESET_CORER) 3061 pf->corer_count++; 3062 else if (reset == ICE_RESET_GLOBR) 3063 pf->globr_count++; 3064 else if (reset == ICE_RESET_EMPR) 3065 pf->empr_count++; 3066 else 3067 dev_dbg(dev, "Invalid reset type %d\n", reset); 3068 3069 /* If a reset cycle isn't already in progress, we set a bit in 3070 * pf->state so that the service task can start a reset/rebuild. 3071 */ 3072 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 3073 if (reset == ICE_RESET_CORER) 3074 set_bit(ICE_CORER_RECV, pf->state); 3075 else if (reset == ICE_RESET_GLOBR) 3076 set_bit(ICE_GLOBR_RECV, pf->state); 3077 else 3078 set_bit(ICE_EMPR_RECV, pf->state); 3079 3080 /* There are couple of different bits at play here. 3081 * hw->reset_ongoing indicates whether the hardware is 3082 * in reset. This is set to true when a reset interrupt 3083 * is received and set back to false after the driver 3084 * has determined that the hardware is out of reset. 3085 * 3086 * ICE_RESET_OICR_RECV in pf->state indicates 3087 * that a post reset rebuild is required before the 3088 * driver is operational again. This is set above. 3089 * 3090 * As this is the start of the reset/rebuild cycle, set 3091 * both to indicate that. 3092 */ 3093 hw->reset_ongoing = true; 3094 } 3095 } 3096 3097 if (oicr & PFINT_OICR_TSYN_TX_M) { 3098 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3099 if (!hw->reset_ongoing) 3100 ret = IRQ_WAKE_THREAD; 3101 } 3102 3103 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3104 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3105 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3106 3107 /* Save EVENTs from GTSYN register */ 3108 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | 3109 GLTSYN_STAT_EVENT1_M | 3110 GLTSYN_STAT_EVENT2_M); 3111 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3112 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); 3113 } 3114 3115 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3116 if (oicr & ICE_AUX_CRIT_ERR) { 3117 pf->oicr_err_reg |= oicr; 3118 set_bit(ICE_AUX_ERR_PENDING, pf->state); 3119 ena_mask &= ~ICE_AUX_CRIT_ERR; 3120 } 3121 3122 /* Report any remaining unexpected interrupts */ 3123 oicr &= ena_mask; 3124 if (oicr) { 3125 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3126 /* If a critical error is pending there is no choice but to 3127 * reset the device. 3128 */ 3129 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 3130 PFINT_OICR_ECC_ERR_M)) { 3131 set_bit(ICE_PFR_REQ, pf->state); 3132 ice_service_task_schedule(pf); 3133 } 3134 } 3135 if (!ret) 3136 ret = IRQ_HANDLED; 3137 3138 ice_service_task_schedule(pf); 3139 ice_irq_dynamic_ena(hw, NULL, NULL); 3140 3141 return ret; 3142 } 3143 3144 /** 3145 * ice_misc_intr_thread_fn - misc interrupt thread function 3146 * @irq: interrupt number 3147 * @data: pointer to a q_vector 3148 */ 3149 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) 3150 { 3151 struct ice_pf *pf = data; 3152 3153 if (ice_is_reset_in_progress(pf->state)) 3154 return IRQ_HANDLED; 3155 3156 while (!ice_ptp_process_ts(pf)) 3157 usleep_range(50, 100); 3158 3159 return IRQ_HANDLED; 3160 } 3161 3162 /** 3163 * ice_dis_ctrlq_interrupts - disable control queue interrupts 3164 * @hw: pointer to HW structure 3165 */ 3166 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 3167 { 3168 /* disable Admin queue Interrupt causes */ 3169 wr32(hw, PFINT_FW_CTL, 3170 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 3171 3172 /* disable Mailbox queue Interrupt causes */ 3173 wr32(hw, PFINT_MBX_CTL, 3174 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 3175 3176 wr32(hw, PFINT_SB_CTL, 3177 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 3178 3179 /* disable Control queue Interrupt causes */ 3180 wr32(hw, PFINT_OICR_CTL, 3181 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 3182 3183 ice_flush(hw); 3184 } 3185 3186 /** 3187 * ice_free_irq_msix_misc - Unroll misc vector setup 3188 * @pf: board private structure 3189 */ 3190 static void ice_free_irq_msix_misc(struct ice_pf *pf) 3191 { 3192 struct ice_hw *hw = &pf->hw; 3193 3194 ice_dis_ctrlq_interrupts(hw); 3195 3196 /* disable OICR interrupt */ 3197 wr32(hw, PFINT_OICR_ENA, 0); 3198 ice_flush(hw); 3199 3200 if (pf->msix_entries) { 3201 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 3202 devm_free_irq(ice_pf_to_dev(pf), 3203 pf->msix_entries[pf->oicr_idx].vector, pf); 3204 } 3205 3206 pf->num_avail_sw_msix += 1; 3207 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 3208 } 3209 3210 /** 3211 * ice_ena_ctrlq_interrupts - enable control queue interrupts 3212 * @hw: pointer to HW structure 3213 * @reg_idx: HW vector index to associate the control queue interrupts with 3214 */ 3215 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 3216 { 3217 u32 val; 3218 3219 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 3220 PFINT_OICR_CTL_CAUSE_ENA_M); 3221 wr32(hw, PFINT_OICR_CTL, val); 3222 3223 /* enable Admin queue Interrupt causes */ 3224 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 3225 PFINT_FW_CTL_CAUSE_ENA_M); 3226 wr32(hw, PFINT_FW_CTL, val); 3227 3228 /* enable Mailbox queue Interrupt causes */ 3229 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 3230 PFINT_MBX_CTL_CAUSE_ENA_M); 3231 wr32(hw, PFINT_MBX_CTL, val); 3232 3233 /* This enables Sideband queue Interrupt causes */ 3234 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 3235 PFINT_SB_CTL_CAUSE_ENA_M); 3236 wr32(hw, PFINT_SB_CTL, val); 3237 3238 ice_flush(hw); 3239 } 3240 3241 /** 3242 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3243 * @pf: board private structure 3244 * 3245 * This sets up the handler for MSIX 0, which is used to manage the 3246 * non-queue interrupts, e.g. AdminQ and errors. This is not used 3247 * when in MSI or Legacy interrupt mode. 3248 */ 3249 static int ice_req_irq_msix_misc(struct ice_pf *pf) 3250 { 3251 struct device *dev = ice_pf_to_dev(pf); 3252 struct ice_hw *hw = &pf->hw; 3253 int oicr_idx, err = 0; 3254 3255 if (!pf->int_name[0]) 3256 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 3257 dev_driver_string(dev), dev_name(dev)); 3258 3259 /* Do not request IRQ but do enable OICR interrupt since settings are 3260 * lost during reset. Note that this function is called only during 3261 * rebuild path and not while reset is in progress. 3262 */ 3263 if (ice_is_reset_in_progress(pf->state)) 3264 goto skip_req_irq; 3265 3266 /* reserve one vector in irq_tracker for misc interrupts */ 3267 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3268 if (oicr_idx < 0) 3269 return oicr_idx; 3270 3271 pf->num_avail_sw_msix -= 1; 3272 pf->oicr_idx = (u16)oicr_idx; 3273 3274 err = devm_request_threaded_irq(dev, 3275 pf->msix_entries[pf->oicr_idx].vector, 3276 ice_misc_intr, ice_misc_intr_thread_fn, 3277 0, pf->int_name, pf); 3278 if (err) { 3279 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", 3280 pf->int_name, err); 3281 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3282 pf->num_avail_sw_msix += 1; 3283 return err; 3284 } 3285 3286 skip_req_irq: 3287 ice_ena_misc_vector(pf); 3288 3289 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 3290 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 3291 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3292 3293 ice_flush(hw); 3294 ice_irq_dynamic_ena(hw, NULL, NULL); 3295 3296 return 0; 3297 } 3298 3299 /** 3300 * ice_napi_add - register NAPI handler for the VSI 3301 * @vsi: VSI for which NAPI handler is to be registered 3302 * 3303 * This function is only called in the driver's load path. Registering the NAPI 3304 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 3305 * reset/rebuild, etc.) 3306 */ 3307 static void ice_napi_add(struct ice_vsi *vsi) 3308 { 3309 int v_idx; 3310 3311 if (!vsi->netdev) 3312 return; 3313 3314 ice_for_each_q_vector(vsi, v_idx) 3315 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 3316 ice_napi_poll); 3317 } 3318 3319 /** 3320 * ice_set_ops - set netdev and ethtools ops for the given netdev 3321 * @netdev: netdev instance 3322 */ 3323 static void ice_set_ops(struct net_device *netdev) 3324 { 3325 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3326 3327 if (ice_is_safe_mode(pf)) { 3328 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3329 ice_set_ethtool_safe_mode_ops(netdev); 3330 return; 3331 } 3332 3333 netdev->netdev_ops = &ice_netdev_ops; 3334 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3335 ice_set_ethtool_ops(netdev); 3336 } 3337 3338 /** 3339 * ice_set_netdev_features - set features for the given netdev 3340 * @netdev: netdev instance 3341 */ 3342 static void ice_set_netdev_features(struct net_device *netdev) 3343 { 3344 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3345 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); 3346 netdev_features_t csumo_features; 3347 netdev_features_t vlano_features; 3348 netdev_features_t dflt_features; 3349 netdev_features_t tso_features; 3350 3351 if (ice_is_safe_mode(pf)) { 3352 /* safe mode */ 3353 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3354 netdev->hw_features = netdev->features; 3355 return; 3356 } 3357 3358 dflt_features = NETIF_F_SG | 3359 NETIF_F_HIGHDMA | 3360 NETIF_F_NTUPLE | 3361 NETIF_F_RXHASH; 3362 3363 csumo_features = NETIF_F_RXCSUM | 3364 NETIF_F_IP_CSUM | 3365 NETIF_F_SCTP_CRC | 3366 NETIF_F_IPV6_CSUM; 3367 3368 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3369 NETIF_F_HW_VLAN_CTAG_TX | 3370 NETIF_F_HW_VLAN_CTAG_RX; 3371 3372 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ 3373 if (is_dvm_ena) 3374 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; 3375 3376 tso_features = NETIF_F_TSO | 3377 NETIF_F_TSO_ECN | 3378 NETIF_F_TSO6 | 3379 NETIF_F_GSO_GRE | 3380 NETIF_F_GSO_UDP_TUNNEL | 3381 NETIF_F_GSO_GRE_CSUM | 3382 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3383 NETIF_F_GSO_PARTIAL | 3384 NETIF_F_GSO_IPXIP4 | 3385 NETIF_F_GSO_IPXIP6 | 3386 NETIF_F_GSO_UDP_L4; 3387 3388 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3389 NETIF_F_GSO_GRE_CSUM; 3390 /* set features that user can change */ 3391 netdev->hw_features = dflt_features | csumo_features | 3392 vlano_features | tso_features; 3393 3394 /* add support for HW_CSUM on packets with MPLS header */ 3395 netdev->mpls_features = NETIF_F_HW_CSUM | 3396 NETIF_F_TSO | 3397 NETIF_F_TSO6; 3398 3399 /* enable features */ 3400 netdev->features |= netdev->hw_features; 3401 3402 netdev->hw_features |= NETIF_F_HW_TC; 3403 netdev->hw_features |= NETIF_F_LOOPBACK; 3404 3405 /* encap and VLAN devices inherit default, csumo and tso features */ 3406 netdev->hw_enc_features |= dflt_features | csumo_features | 3407 tso_features; 3408 netdev->vlan_features |= dflt_features | csumo_features | 3409 tso_features; 3410 3411 /* advertise support but don't enable by default since only one type of 3412 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one 3413 * type turns on the other has to be turned off. This is enforced by the 3414 * ice_fix_features() ndo callback. 3415 */ 3416 if (is_dvm_ena) 3417 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | 3418 NETIF_F_HW_VLAN_STAG_TX; 3419 3420 /* Leave CRC / FCS stripping enabled by default, but allow the value to 3421 * be changed at runtime 3422 */ 3423 netdev->hw_features |= NETIF_F_RXFCS; 3424 } 3425 3426 /** 3427 * ice_cfg_netdev - Allocate, configure and register a netdev 3428 * @vsi: the VSI associated with the new netdev 3429 * 3430 * Returns 0 on success, negative value on failure 3431 */ 3432 static int ice_cfg_netdev(struct ice_vsi *vsi) 3433 { 3434 struct ice_netdev_priv *np; 3435 struct net_device *netdev; 3436 u8 mac_addr[ETH_ALEN]; 3437 3438 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 3439 vsi->alloc_rxq); 3440 if (!netdev) 3441 return -ENOMEM; 3442 3443 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3444 vsi->netdev = netdev; 3445 np = netdev_priv(netdev); 3446 np->vsi = vsi; 3447 3448 ice_set_netdev_features(netdev); 3449 3450 ice_set_ops(netdev); 3451 3452 if (vsi->type == ICE_VSI_PF) { 3453 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 3454 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3455 eth_hw_addr_set(netdev, mac_addr); 3456 ether_addr_copy(netdev->perm_addr, mac_addr); 3457 } 3458 3459 netdev->priv_flags |= IFF_UNICAST_FLT; 3460 3461 /* Setup netdev TC information */ 3462 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 3463 3464 /* setup watchdog timeout value to be 5 second */ 3465 netdev->watchdog_timeo = 5 * HZ; 3466 3467 netdev->min_mtu = ETH_MIN_MTU; 3468 netdev->max_mtu = ICE_MAX_MTU; 3469 3470 return 0; 3471 } 3472 3473 /** 3474 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3475 * @lut: Lookup table 3476 * @rss_table_size: Lookup table size 3477 * @rss_size: Range of queue number for hashing 3478 */ 3479 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3480 { 3481 u16 i; 3482 3483 for (i = 0; i < rss_table_size; i++) 3484 lut[i] = i % rss_size; 3485 } 3486 3487 /** 3488 * ice_pf_vsi_setup - Set up a PF VSI 3489 * @pf: board private structure 3490 * @pi: pointer to the port_info instance 3491 * 3492 * Returns pointer to the successfully allocated VSI software struct 3493 * on success, otherwise returns NULL on failure. 3494 */ 3495 static struct ice_vsi * 3496 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3497 { 3498 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); 3499 } 3500 3501 static struct ice_vsi * 3502 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3503 struct ice_channel *ch) 3504 { 3505 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); 3506 } 3507 3508 /** 3509 * ice_ctrl_vsi_setup - Set up a control VSI 3510 * @pf: board private structure 3511 * @pi: pointer to the port_info instance 3512 * 3513 * Returns pointer to the successfully allocated VSI software struct 3514 * on success, otherwise returns NULL on failure. 3515 */ 3516 static struct ice_vsi * 3517 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3518 { 3519 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); 3520 } 3521 3522 /** 3523 * ice_lb_vsi_setup - Set up a loopback VSI 3524 * @pf: board private structure 3525 * @pi: pointer to the port_info instance 3526 * 3527 * Returns pointer to the successfully allocated VSI software struct 3528 * on success, otherwise returns NULL on failure. 3529 */ 3530 struct ice_vsi * 3531 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3532 { 3533 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); 3534 } 3535 3536 /** 3537 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3538 * @netdev: network interface to be adjusted 3539 * @proto: VLAN TPID 3540 * @vid: VLAN ID to be added 3541 * 3542 * net_device_ops implementation for adding VLAN IDs 3543 */ 3544 static int 3545 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3546 { 3547 struct ice_netdev_priv *np = netdev_priv(netdev); 3548 struct ice_vsi_vlan_ops *vlan_ops; 3549 struct ice_vsi *vsi = np->vsi; 3550 struct ice_vlan vlan; 3551 int ret; 3552 3553 /* VLAN 0 is added by default during load/reset */ 3554 if (!vid) 3555 return 0; 3556 3557 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3558 usleep_range(1000, 2000); 3559 3560 /* Add multicast promisc rule for the VLAN ID to be added if 3561 * all-multicast is currently enabled. 3562 */ 3563 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3564 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3565 ICE_MCAST_VLAN_PROMISC_BITS, 3566 vid); 3567 if (ret) 3568 goto finish; 3569 } 3570 3571 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3572 3573 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3574 * packets aren't pruned by the device's internal switch on Rx 3575 */ 3576 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3577 ret = vlan_ops->add_vlan(vsi, &vlan); 3578 if (ret) 3579 goto finish; 3580 3581 /* If all-multicast is currently enabled and this VLAN ID is only one 3582 * besides VLAN-0 we have to update look-up type of multicast promisc 3583 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. 3584 */ 3585 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && 3586 ice_vsi_num_non_zero_vlans(vsi) == 1) { 3587 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3588 ICE_MCAST_PROMISC_BITS, 0); 3589 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3590 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3591 } 3592 3593 finish: 3594 clear_bit(ICE_CFG_BUSY, vsi->state); 3595 3596 return ret; 3597 } 3598 3599 /** 3600 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3601 * @netdev: network interface to be adjusted 3602 * @proto: VLAN TPID 3603 * @vid: VLAN ID to be removed 3604 * 3605 * net_device_ops implementation for removing VLAN IDs 3606 */ 3607 static int 3608 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3609 { 3610 struct ice_netdev_priv *np = netdev_priv(netdev); 3611 struct ice_vsi_vlan_ops *vlan_ops; 3612 struct ice_vsi *vsi = np->vsi; 3613 struct ice_vlan vlan; 3614 int ret; 3615 3616 /* don't allow removal of VLAN 0 */ 3617 if (!vid) 3618 return 0; 3619 3620 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3621 usleep_range(1000, 2000); 3622 3623 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3624 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3625 if (ret) { 3626 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", 3627 vsi->vsi_num); 3628 vsi->current_netdev_flags |= IFF_ALLMULTI; 3629 } 3630 3631 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3632 3633 /* Make sure VLAN delete is successful before updating VLAN 3634 * information 3635 */ 3636 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3637 ret = vlan_ops->del_vlan(vsi, &vlan); 3638 if (ret) 3639 goto finish; 3640 3641 /* Remove multicast promisc rule for the removed VLAN ID if 3642 * all-multicast is enabled. 3643 */ 3644 if (vsi->current_netdev_flags & IFF_ALLMULTI) 3645 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3646 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3647 3648 if (!ice_vsi_has_non_zero_vlans(vsi)) { 3649 /* Update look-up type of multicast promisc rule for VLAN 0 3650 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when 3651 * all-multicast is enabled and VLAN 0 is the only VLAN rule. 3652 */ 3653 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3654 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3655 ICE_MCAST_VLAN_PROMISC_BITS, 3656 0); 3657 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3658 ICE_MCAST_PROMISC_BITS, 0); 3659 } 3660 } 3661 3662 finish: 3663 clear_bit(ICE_CFG_BUSY, vsi->state); 3664 3665 return ret; 3666 } 3667 3668 /** 3669 * ice_rep_indr_tc_block_unbind 3670 * @cb_priv: indirection block private data 3671 */ 3672 static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3673 { 3674 struct ice_indr_block_priv *indr_priv = cb_priv; 3675 3676 list_del(&indr_priv->list); 3677 kfree(indr_priv); 3678 } 3679 3680 /** 3681 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3682 * @vsi: VSI struct which has the netdev 3683 */ 3684 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3685 { 3686 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3687 3688 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3689 ice_rep_indr_tc_block_unbind); 3690 } 3691 3692 /** 3693 * ice_tc_indir_block_remove - clean indirect TC block notifications 3694 * @pf: PF structure 3695 */ 3696 static void ice_tc_indir_block_remove(struct ice_pf *pf) 3697 { 3698 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 3699 3700 if (!pf_vsi) 3701 return; 3702 3703 ice_tc_indir_block_unregister(pf_vsi); 3704 } 3705 3706 /** 3707 * ice_tc_indir_block_register - Register TC indirect block notifications 3708 * @vsi: VSI struct which has the netdev 3709 * 3710 * Returns 0 on success, negative value on failure 3711 */ 3712 static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3713 { 3714 struct ice_netdev_priv *np; 3715 3716 if (!vsi || !vsi->netdev) 3717 return -EINVAL; 3718 3719 np = netdev_priv(vsi->netdev); 3720 3721 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3722 return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3723 } 3724 3725 /** 3726 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 3727 * @pf: board private structure 3728 * 3729 * Returns 0 on success, negative value on failure 3730 */ 3731 static int ice_setup_pf_sw(struct ice_pf *pf) 3732 { 3733 struct device *dev = ice_pf_to_dev(pf); 3734 bool dvm = ice_is_dvm_ena(&pf->hw); 3735 struct ice_vsi *vsi; 3736 int status; 3737 3738 if (ice_is_reset_in_progress(pf->state)) 3739 return -EBUSY; 3740 3741 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 3742 if (status) 3743 return -EIO; 3744 3745 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 3746 if (!vsi) 3747 return -ENOMEM; 3748 3749 /* init channel list */ 3750 INIT_LIST_HEAD(&vsi->ch_list); 3751 3752 status = ice_cfg_netdev(vsi); 3753 if (status) 3754 goto unroll_vsi_setup; 3755 /* netdev has to be configured before setting frame size */ 3756 ice_vsi_cfg_frame_size(vsi); 3757 3758 /* init indirect block notifications */ 3759 status = ice_tc_indir_block_register(vsi); 3760 if (status) { 3761 dev_err(dev, "Failed to register netdev notifier\n"); 3762 goto unroll_cfg_netdev; 3763 } 3764 3765 /* Setup DCB netlink interface */ 3766 ice_dcbnl_setup(vsi); 3767 3768 /* registering the NAPI handler requires both the queues and 3769 * netdev to be created, which are done in ice_pf_vsi_setup() 3770 * and ice_cfg_netdev() respectively 3771 */ 3772 ice_napi_add(vsi); 3773 3774 status = ice_init_mac_fltr(pf); 3775 if (status) 3776 goto unroll_napi_add; 3777 3778 return 0; 3779 3780 unroll_napi_add: 3781 ice_tc_indir_block_unregister(vsi); 3782 unroll_cfg_netdev: 3783 ice_napi_del(vsi); 3784 if (vsi->netdev) { 3785 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3786 free_netdev(vsi->netdev); 3787 vsi->netdev = NULL; 3788 } 3789 3790 unroll_vsi_setup: 3791 ice_vsi_release(vsi); 3792 return status; 3793 } 3794 3795 /** 3796 * ice_get_avail_q_count - Get count of queues in use 3797 * @pf_qmap: bitmap to get queue use count from 3798 * @lock: pointer to a mutex that protects access to pf_qmap 3799 * @size: size of the bitmap 3800 */ 3801 static u16 3802 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3803 { 3804 unsigned long bit; 3805 u16 count = 0; 3806 3807 mutex_lock(lock); 3808 for_each_clear_bit(bit, pf_qmap, size) 3809 count++; 3810 mutex_unlock(lock); 3811 3812 return count; 3813 } 3814 3815 /** 3816 * ice_get_avail_txq_count - Get count of Tx queues in use 3817 * @pf: pointer to an ice_pf instance 3818 */ 3819 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3820 { 3821 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3822 pf->max_pf_txqs); 3823 } 3824 3825 /** 3826 * ice_get_avail_rxq_count - Get count of Rx queues in use 3827 * @pf: pointer to an ice_pf instance 3828 */ 3829 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3830 { 3831 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3832 pf->max_pf_rxqs); 3833 } 3834 3835 /** 3836 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3837 * @pf: board private structure to initialize 3838 */ 3839 static void ice_deinit_pf(struct ice_pf *pf) 3840 { 3841 ice_service_task_stop(pf); 3842 mutex_destroy(&pf->adev_mutex); 3843 mutex_destroy(&pf->sw_mutex); 3844 mutex_destroy(&pf->tc_mutex); 3845 mutex_destroy(&pf->avail_q_mutex); 3846 mutex_destroy(&pf->vfs.table_lock); 3847 3848 if (pf->avail_txqs) { 3849 bitmap_free(pf->avail_txqs); 3850 pf->avail_txqs = NULL; 3851 } 3852 3853 if (pf->avail_rxqs) { 3854 bitmap_free(pf->avail_rxqs); 3855 pf->avail_rxqs = NULL; 3856 } 3857 3858 if (pf->ptp.clock) 3859 ptp_clock_unregister(pf->ptp.clock); 3860 } 3861 3862 /** 3863 * ice_set_pf_caps - set PFs capability flags 3864 * @pf: pointer to the PF instance 3865 */ 3866 static void ice_set_pf_caps(struct ice_pf *pf) 3867 { 3868 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3869 3870 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3871 if (func_caps->common_cap.rdma) 3872 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3873 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3874 if (func_caps->common_cap.dcb) 3875 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3876 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3877 if (func_caps->common_cap.sr_iov_1_1) { 3878 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3879 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, 3880 ICE_MAX_SRIOV_VFS); 3881 } 3882 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3883 if (func_caps->common_cap.rss_table_size) 3884 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3885 3886 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3887 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3888 u16 unused; 3889 3890 /* ctrl_vsi_idx will be set to a valid value when flow director 3891 * is setup by ice_init_fdir 3892 */ 3893 pf->ctrl_vsi_idx = ICE_NO_VSI; 3894 set_bit(ICE_FLAG_FD_ENA, pf->flags); 3895 /* force guaranteed filter pool for PF */ 3896 ice_alloc_fd_guar_item(&pf->hw, &unused, 3897 func_caps->fd_fltr_guar); 3898 /* force shared filter pool for PF */ 3899 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3900 func_caps->fd_fltr_best_effort); 3901 } 3902 3903 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3904 if (func_caps->common_cap.ieee_1588) 3905 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3906 3907 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3908 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3909 } 3910 3911 /** 3912 * ice_init_pf - Initialize general software structures (struct ice_pf) 3913 * @pf: board private structure to initialize 3914 */ 3915 static int ice_init_pf(struct ice_pf *pf) 3916 { 3917 ice_set_pf_caps(pf); 3918 3919 mutex_init(&pf->sw_mutex); 3920 mutex_init(&pf->tc_mutex); 3921 mutex_init(&pf->adev_mutex); 3922 3923 INIT_HLIST_HEAD(&pf->aq_wait_list); 3924 spin_lock_init(&pf->aq_wait_lock); 3925 init_waitqueue_head(&pf->aq_wait_queue); 3926 3927 init_waitqueue_head(&pf->reset_wait_queue); 3928 3929 /* setup service timer and periodic service task */ 3930 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3931 pf->serv_tmr_period = HZ; 3932 INIT_WORK(&pf->serv_task, ice_service_task); 3933 clear_bit(ICE_SERVICE_SCHED, pf->state); 3934 3935 mutex_init(&pf->avail_q_mutex); 3936 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 3937 if (!pf->avail_txqs) 3938 return -ENOMEM; 3939 3940 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 3941 if (!pf->avail_rxqs) { 3942 bitmap_free(pf->avail_txqs); 3943 pf->avail_txqs = NULL; 3944 return -ENOMEM; 3945 } 3946 3947 mutex_init(&pf->vfs.table_lock); 3948 hash_init(pf->vfs.table); 3949 3950 return 0; 3951 } 3952 3953 /** 3954 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors 3955 * @pf: board private structure 3956 * @v_remain: number of remaining MSI-X vectors to be distributed 3957 * 3958 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. 3959 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of 3960 * remaining vectors. 3961 */ 3962 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) 3963 { 3964 int v_rdma; 3965 3966 if (!ice_is_rdma_ena(pf)) { 3967 pf->num_lan_msix = v_remain; 3968 return; 3969 } 3970 3971 /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ 3972 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; 3973 3974 if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { 3975 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); 3976 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3977 3978 pf->num_rdma_msix = 0; 3979 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; 3980 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || 3981 (v_remain - v_rdma < v_rdma)) { 3982 /* Support minimum RDMA and give remaining vectors to LAN MSIX */ 3983 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; 3984 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; 3985 } else { 3986 /* Split remaining MSIX with RDMA after accounting for AEQ MSIX 3987 */ 3988 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + 3989 ICE_RDMA_NUM_AEQ_MSIX; 3990 pf->num_lan_msix = v_remain - pf->num_rdma_msix; 3991 } 3992 } 3993 3994 /** 3995 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 3996 * @pf: board private structure 3997 * 3998 * Compute the number of MSIX vectors wanted and request from the OS. Adjust 3999 * device usage if there are not enough vectors. Return the number of vectors 4000 * reserved or negative on failure. 4001 */ 4002 static int ice_ena_msix_range(struct ice_pf *pf) 4003 { 4004 int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; 4005 struct device *dev = ice_pf_to_dev(pf); 4006 int err, i; 4007 4008 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; 4009 num_cpus = num_online_cpus(); 4010 4011 /* LAN miscellaneous handler */ 4012 v_other = ICE_MIN_LAN_OICR_MSIX; 4013 4014 /* Flow Director */ 4015 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) 4016 v_other += ICE_FDIR_MSIX; 4017 4018 /* switchdev */ 4019 v_other += ICE_ESWITCH_MSIX; 4020 4021 v_wanted = v_other; 4022 4023 /* LAN traffic */ 4024 pf->num_lan_msix = num_cpus; 4025 v_wanted += pf->num_lan_msix; 4026 4027 /* RDMA auxiliary driver */ 4028 if (ice_is_rdma_ena(pf)) { 4029 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; 4030 v_wanted += pf->num_rdma_msix; 4031 } 4032 4033 if (v_wanted > hw_num_msix) { 4034 int v_remain; 4035 4036 dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", 4037 v_wanted, hw_num_msix); 4038 4039 if (hw_num_msix < ICE_MIN_MSIX) { 4040 err = -ERANGE; 4041 goto exit_err; 4042 } 4043 4044 v_remain = hw_num_msix - v_other; 4045 if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { 4046 v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; 4047 v_remain = ICE_MIN_LAN_TXRX_MSIX; 4048 } 4049 4050 ice_reduce_msix_usage(pf, v_remain); 4051 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; 4052 4053 dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", 4054 pf->num_lan_msix); 4055 if (ice_is_rdma_ena(pf)) 4056 dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", 4057 pf->num_rdma_msix); 4058 } 4059 4060 pf->msix_entries = devm_kcalloc(dev, v_wanted, 4061 sizeof(*pf->msix_entries), GFP_KERNEL); 4062 if (!pf->msix_entries) { 4063 err = -ENOMEM; 4064 goto exit_err; 4065 } 4066 4067 for (i = 0; i < v_wanted; i++) 4068 pf->msix_entries[i].entry = i; 4069 4070 /* actually reserve the vectors */ 4071 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 4072 ICE_MIN_MSIX, v_wanted); 4073 if (v_actual < 0) { 4074 dev_err(dev, "unable to reserve MSI-X vectors\n"); 4075 err = v_actual; 4076 goto msix_err; 4077 } 4078 4079 if (v_actual < v_wanted) { 4080 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 4081 v_wanted, v_actual); 4082 4083 if (v_actual < ICE_MIN_MSIX) { 4084 /* error if we can't get minimum vectors */ 4085 pci_disable_msix(pf->pdev); 4086 err = -ERANGE; 4087 goto msix_err; 4088 } else { 4089 int v_remain = v_actual - v_other; 4090 4091 if (v_remain < ICE_MIN_LAN_TXRX_MSIX) 4092 v_remain = ICE_MIN_LAN_TXRX_MSIX; 4093 4094 ice_reduce_msix_usage(pf, v_remain); 4095 4096 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", 4097 pf->num_lan_msix); 4098 4099 if (ice_is_rdma_ena(pf)) 4100 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", 4101 pf->num_rdma_msix); 4102 } 4103 } 4104 4105 return v_actual; 4106 4107 msix_err: 4108 devm_kfree(dev, pf->msix_entries); 4109 4110 exit_err: 4111 pf->num_rdma_msix = 0; 4112 pf->num_lan_msix = 0; 4113 return err; 4114 } 4115 4116 /** 4117 * ice_dis_msix - Disable MSI-X interrupt setup in OS 4118 * @pf: board private structure 4119 */ 4120 static void ice_dis_msix(struct ice_pf *pf) 4121 { 4122 pci_disable_msix(pf->pdev); 4123 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 4124 pf->msix_entries = NULL; 4125 } 4126 4127 /** 4128 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 4129 * @pf: board private structure 4130 */ 4131 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 4132 { 4133 ice_dis_msix(pf); 4134 4135 if (pf->irq_tracker) { 4136 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 4137 pf->irq_tracker = NULL; 4138 } 4139 } 4140 4141 /** 4142 * ice_init_interrupt_scheme - Determine proper interrupt scheme 4143 * @pf: board private structure to initialize 4144 */ 4145 static int ice_init_interrupt_scheme(struct ice_pf *pf) 4146 { 4147 int vectors; 4148 4149 vectors = ice_ena_msix_range(pf); 4150 4151 if (vectors < 0) 4152 return vectors; 4153 4154 /* set up vector assignment tracking */ 4155 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), 4156 struct_size(pf->irq_tracker, list, vectors), 4157 GFP_KERNEL); 4158 if (!pf->irq_tracker) { 4159 ice_dis_msix(pf); 4160 return -ENOMEM; 4161 } 4162 4163 /* populate SW interrupts pool with number of OS granted IRQs. */ 4164 pf->num_avail_sw_msix = (u16)vectors; 4165 pf->irq_tracker->num_entries = (u16)vectors; 4166 pf->irq_tracker->end = pf->irq_tracker->num_entries; 4167 4168 return 0; 4169 } 4170 4171 /** 4172 * ice_is_wol_supported - check if WoL is supported 4173 * @hw: pointer to hardware info 4174 * 4175 * Check if WoL is supported based on the HW configuration. 4176 * Returns true if NVM supports and enables WoL for this port, false otherwise 4177 */ 4178 bool ice_is_wol_supported(struct ice_hw *hw) 4179 { 4180 u16 wol_ctrl; 4181 4182 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 4183 * word) indicates WoL is not supported on the corresponding PF ID. 4184 */ 4185 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 4186 return false; 4187 4188 return !(BIT(hw->port_info->lport) & wol_ctrl); 4189 } 4190 4191 /** 4192 * ice_vsi_recfg_qs - Change the number of queues on a VSI 4193 * @vsi: VSI being changed 4194 * @new_rx: new number of Rx queues 4195 * @new_tx: new number of Tx queues 4196 * 4197 * Only change the number of queues if new_tx, or new_rx is non-0. 4198 * 4199 * Returns 0 on success. 4200 */ 4201 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 4202 { 4203 struct ice_pf *pf = vsi->back; 4204 int err = 0, timeout = 50; 4205 4206 if (!new_rx && !new_tx) 4207 return -EINVAL; 4208 4209 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 4210 timeout--; 4211 if (!timeout) 4212 return -EBUSY; 4213 usleep_range(1000, 2000); 4214 } 4215 4216 if (new_tx) 4217 vsi->req_txq = (u16)new_tx; 4218 if (new_rx) 4219 vsi->req_rxq = (u16)new_rx; 4220 4221 /* set for the next time the netdev is started */ 4222 if (!netif_running(vsi->netdev)) { 4223 ice_vsi_rebuild(vsi, false); 4224 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 4225 goto done; 4226 } 4227 4228 ice_vsi_close(vsi); 4229 ice_vsi_rebuild(vsi, false); 4230 ice_pf_dcb_recfg(pf); 4231 ice_vsi_open(vsi); 4232 done: 4233 clear_bit(ICE_CFG_BUSY, pf->state); 4234 return err; 4235 } 4236 4237 /** 4238 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 4239 * @pf: PF to configure 4240 * 4241 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4242 * VSI can still Tx/Rx VLAN tagged packets. 4243 */ 4244 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4245 { 4246 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4247 struct ice_vsi_ctx *ctxt; 4248 struct ice_hw *hw; 4249 int status; 4250 4251 if (!vsi) 4252 return; 4253 4254 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4255 if (!ctxt) 4256 return; 4257 4258 hw = &pf->hw; 4259 ctxt->info = vsi->info; 4260 4261 ctxt->info.valid_sections = 4262 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4263 ICE_AQ_VSI_PROP_SECURITY_VALID | 4264 ICE_AQ_VSI_PROP_SW_VALID); 4265 4266 /* disable VLAN anti-spoof */ 4267 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4268 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4269 4270 /* disable VLAN pruning and keep all other settings */ 4271 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4272 4273 /* allow all VLANs on Tx and don't strip on Rx */ 4274 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | 4275 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4276 4277 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4278 if (status) { 4279 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 4280 status, ice_aq_str(hw->adminq.sq_last_status)); 4281 } else { 4282 vsi->info.sec_flags = ctxt->info.sec_flags; 4283 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4284 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; 4285 } 4286 4287 kfree(ctxt); 4288 } 4289 4290 /** 4291 * ice_log_pkg_init - log result of DDP package load 4292 * @hw: pointer to hardware info 4293 * @state: state of package load 4294 */ 4295 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4296 { 4297 struct ice_pf *pf = hw->back; 4298 struct device *dev; 4299 4300 dev = ice_pf_to_dev(pf); 4301 4302 switch (state) { 4303 case ICE_DDP_PKG_SUCCESS: 4304 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4305 hw->active_pkg_name, 4306 hw->active_pkg_ver.major, 4307 hw->active_pkg_ver.minor, 4308 hw->active_pkg_ver.update, 4309 hw->active_pkg_ver.draft); 4310 break; 4311 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4312 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4313 hw->active_pkg_name, 4314 hw->active_pkg_ver.major, 4315 hw->active_pkg_ver.minor, 4316 hw->active_pkg_ver.update, 4317 hw->active_pkg_ver.draft); 4318 break; 4319 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 4320 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4321 hw->active_pkg_name, 4322 hw->active_pkg_ver.major, 4323 hw->active_pkg_ver.minor, 4324 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4325 break; 4326 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 4327 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4328 hw->active_pkg_name, 4329 hw->active_pkg_ver.major, 4330 hw->active_pkg_ver.minor, 4331 hw->active_pkg_ver.update, 4332 hw->active_pkg_ver.draft, 4333 hw->pkg_name, 4334 hw->pkg_ver.major, 4335 hw->pkg_ver.minor, 4336 hw->pkg_ver.update, 4337 hw->pkg_ver.draft); 4338 break; 4339 case ICE_DDP_PKG_FW_MISMATCH: 4340 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4341 break; 4342 case ICE_DDP_PKG_INVALID_FILE: 4343 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4344 break; 4345 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 4346 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4347 break; 4348 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 4349 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4350 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4351 break; 4352 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 4353 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4354 break; 4355 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 4356 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4357 break; 4358 case ICE_DDP_PKG_LOAD_ERROR: 4359 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 4360 /* poll for reset to complete */ 4361 if (ice_check_reset(hw)) 4362 dev_err(dev, "Error resetting device. Please reload the driver\n"); 4363 break; 4364 case ICE_DDP_PKG_ERR: 4365 default: 4366 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 4367 break; 4368 } 4369 } 4370 4371 /** 4372 * ice_load_pkg - load/reload the DDP Package file 4373 * @firmware: firmware structure when firmware requested or NULL for reload 4374 * @pf: pointer to the PF instance 4375 * 4376 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4377 * initialize HW tables. 4378 */ 4379 static void 4380 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4381 { 4382 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 4383 struct device *dev = ice_pf_to_dev(pf); 4384 struct ice_hw *hw = &pf->hw; 4385 4386 /* Load DDP Package */ 4387 if (firmware && !hw->pkg_copy) { 4388 state = ice_copy_and_init_pkg(hw, firmware->data, 4389 firmware->size); 4390 ice_log_pkg_init(hw, state); 4391 } else if (!firmware && hw->pkg_copy) { 4392 /* Reload package during rebuild after CORER/GLOBR reset */ 4393 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4394 ice_log_pkg_init(hw, state); 4395 } else { 4396 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4397 } 4398 4399 if (!ice_is_init_pkg_successful(state)) { 4400 /* Safe Mode */ 4401 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4402 return; 4403 } 4404 4405 /* Successful download package is the precondition for advanced 4406 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4407 */ 4408 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4409 } 4410 4411 /** 4412 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4413 * @pf: pointer to the PF structure 4414 * 4415 * There is no error returned here because the driver should be able to handle 4416 * 128 Byte cache lines, so we only print a warning in case issues are seen, 4417 * specifically with Tx. 4418 */ 4419 static void ice_verify_cacheline_size(struct ice_pf *pf) 4420 { 4421 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 4422 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4423 ICE_CACHE_LINE_BYTES); 4424 } 4425 4426 /** 4427 * ice_send_version - update firmware with driver version 4428 * @pf: PF struct 4429 * 4430 * Returns 0 on success, else error code 4431 */ 4432 static int ice_send_version(struct ice_pf *pf) 4433 { 4434 struct ice_driver_ver dv; 4435 4436 dv.major_ver = 0xff; 4437 dv.minor_ver = 0xff; 4438 dv.build_ver = 0xff; 4439 dv.subbuild_ver = 0; 4440 strscpy((char *)dv.driver_string, UTS_RELEASE, 4441 sizeof(dv.driver_string)); 4442 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4443 } 4444 4445 /** 4446 * ice_init_fdir - Initialize flow director VSI and configuration 4447 * @pf: pointer to the PF instance 4448 * 4449 * returns 0 on success, negative on error 4450 */ 4451 static int ice_init_fdir(struct ice_pf *pf) 4452 { 4453 struct device *dev = ice_pf_to_dev(pf); 4454 struct ice_vsi *ctrl_vsi; 4455 int err; 4456 4457 /* Side Band Flow Director needs to have a control VSI. 4458 * Allocate it and store it in the PF. 4459 */ 4460 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4461 if (!ctrl_vsi) { 4462 dev_dbg(dev, "could not create control VSI\n"); 4463 return -ENOMEM; 4464 } 4465 4466 err = ice_vsi_open_ctrl(ctrl_vsi); 4467 if (err) { 4468 dev_dbg(dev, "could not open control VSI\n"); 4469 goto err_vsi_open; 4470 } 4471 4472 mutex_init(&pf->hw.fdir_fltr_lock); 4473 4474 err = ice_fdir_create_dflt_rules(pf); 4475 if (err) 4476 goto err_fdir_rule; 4477 4478 return 0; 4479 4480 err_fdir_rule: 4481 ice_fdir_release_flows(&pf->hw); 4482 ice_vsi_close(ctrl_vsi); 4483 err_vsi_open: 4484 ice_vsi_release(ctrl_vsi); 4485 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4486 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4487 pf->ctrl_vsi_idx = ICE_NO_VSI; 4488 } 4489 return err; 4490 } 4491 4492 /** 4493 * ice_get_opt_fw_name - return optional firmware file name or NULL 4494 * @pf: pointer to the PF instance 4495 */ 4496 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4497 { 4498 /* Optional firmware name same as default with additional dash 4499 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4500 */ 4501 struct pci_dev *pdev = pf->pdev; 4502 char *opt_fw_filename; 4503 u64 dsn; 4504 4505 /* Determine the name of the optional file using the DSN (two 4506 * dwords following the start of the DSN Capability). 4507 */ 4508 dsn = pci_get_dsn(pdev); 4509 if (!dsn) 4510 return NULL; 4511 4512 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4513 if (!opt_fw_filename) 4514 return NULL; 4515 4516 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4517 ICE_DDP_PKG_PATH, dsn); 4518 4519 return opt_fw_filename; 4520 } 4521 4522 /** 4523 * ice_request_fw - Device initialization routine 4524 * @pf: pointer to the PF instance 4525 */ 4526 static void ice_request_fw(struct ice_pf *pf) 4527 { 4528 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4529 const struct firmware *firmware = NULL; 4530 struct device *dev = ice_pf_to_dev(pf); 4531 int err = 0; 4532 4533 /* optional device-specific DDP (if present) overrides the default DDP 4534 * package file. kernel logs a debug message if the file doesn't exist, 4535 * and warning messages for other errors. 4536 */ 4537 if (opt_fw_filename) { 4538 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4539 if (err) { 4540 kfree(opt_fw_filename); 4541 goto dflt_pkg_load; 4542 } 4543 4544 /* request for firmware was successful. Download to device */ 4545 ice_load_pkg(firmware, pf); 4546 kfree(opt_fw_filename); 4547 release_firmware(firmware); 4548 return; 4549 } 4550 4551 dflt_pkg_load: 4552 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4553 if (err) { 4554 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4555 return; 4556 } 4557 4558 /* request for firmware was successful. Download to device */ 4559 ice_load_pkg(firmware, pf); 4560 release_firmware(firmware); 4561 } 4562 4563 /** 4564 * ice_print_wake_reason - show the wake up cause in the log 4565 * @pf: pointer to the PF struct 4566 */ 4567 static void ice_print_wake_reason(struct ice_pf *pf) 4568 { 4569 u32 wus = pf->wakeup_reason; 4570 const char *wake_str; 4571 4572 /* if no wake event, nothing to print */ 4573 if (!wus) 4574 return; 4575 4576 if (wus & PFPM_WUS_LNKC_M) 4577 wake_str = "Link\n"; 4578 else if (wus & PFPM_WUS_MAG_M) 4579 wake_str = "Magic Packet\n"; 4580 else if (wus & PFPM_WUS_MNG_M) 4581 wake_str = "Management\n"; 4582 else if (wus & PFPM_WUS_FW_RST_WK_M) 4583 wake_str = "Firmware Reset\n"; 4584 else 4585 wake_str = "Unknown\n"; 4586 4587 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4588 } 4589 4590 /** 4591 * ice_register_netdev - register netdev 4592 * @pf: pointer to the PF struct 4593 */ 4594 static int ice_register_netdev(struct ice_pf *pf) 4595 { 4596 struct ice_vsi *vsi; 4597 int err = 0; 4598 4599 vsi = ice_get_main_vsi(pf); 4600 if (!vsi || !vsi->netdev) 4601 return -EIO; 4602 4603 err = register_netdev(vsi->netdev); 4604 if (err) 4605 goto err_register_netdev; 4606 4607 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4608 netif_carrier_off(vsi->netdev); 4609 netif_tx_stop_all_queues(vsi->netdev); 4610 4611 return 0; 4612 err_register_netdev: 4613 free_netdev(vsi->netdev); 4614 vsi->netdev = NULL; 4615 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4616 return err; 4617 } 4618 4619 /** 4620 * ice_probe - Device initialization routine 4621 * @pdev: PCI device information struct 4622 * @ent: entry in ice_pci_tbl 4623 * 4624 * Returns 0 on success, negative on failure 4625 */ 4626 static int 4627 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4628 { 4629 struct device *dev = &pdev->dev; 4630 struct ice_vsi *vsi; 4631 struct ice_pf *pf; 4632 struct ice_hw *hw; 4633 int i, err; 4634 4635 if (pdev->is_virtfn) { 4636 dev_err(dev, "can't probe a virtual function\n"); 4637 return -EINVAL; 4638 } 4639 4640 /* this driver uses devres, see 4641 * Documentation/driver-api/driver-model/devres.rst 4642 */ 4643 err = pcim_enable_device(pdev); 4644 if (err) 4645 return err; 4646 4647 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 4648 if (err) { 4649 dev_err(dev, "BAR0 I/O map error %d\n", err); 4650 return err; 4651 } 4652 4653 pf = ice_allocate_pf(dev); 4654 if (!pf) 4655 return -ENOMEM; 4656 4657 /* initialize Auxiliary index to invalid value */ 4658 pf->aux_idx = -1; 4659 4660 /* set up for high or low DMA */ 4661 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4662 if (err) { 4663 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 4664 return err; 4665 } 4666 4667 pci_enable_pcie_error_reporting(pdev); 4668 pci_set_master(pdev); 4669 4670 pf->pdev = pdev; 4671 pci_set_drvdata(pdev, pf); 4672 set_bit(ICE_DOWN, pf->state); 4673 /* Disable service task until DOWN bit is cleared */ 4674 set_bit(ICE_SERVICE_DIS, pf->state); 4675 4676 hw = &pf->hw; 4677 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 4678 pci_save_state(pdev); 4679 4680 hw->back = pf; 4681 hw->vendor_id = pdev->vendor; 4682 hw->device_id = pdev->device; 4683 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4684 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4685 hw->subsystem_device_id = pdev->subsystem_device; 4686 hw->bus.device = PCI_SLOT(pdev->devfn); 4687 hw->bus.func = PCI_FUNC(pdev->devfn); 4688 ice_set_ctrlq_len(hw); 4689 4690 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 4691 4692 #ifndef CONFIG_DYNAMIC_DEBUG 4693 if (debug < -1) 4694 hw->debug_mask = debug; 4695 #endif 4696 4697 err = ice_init_hw(hw); 4698 if (err) { 4699 dev_err(dev, "ice_init_hw failed: %d\n", err); 4700 err = -EIO; 4701 goto err_exit_unroll; 4702 } 4703 4704 ice_init_feature_support(pf); 4705 4706 ice_request_fw(pf); 4707 4708 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4709 * set in pf->state, which will cause ice_is_safe_mode to return 4710 * true 4711 */ 4712 if (ice_is_safe_mode(pf)) { 4713 /* we already got function/device capabilities but these don't 4714 * reflect what the driver needs to do in safe mode. Instead of 4715 * adding conditional logic everywhere to ignore these 4716 * device/function capabilities, override them. 4717 */ 4718 ice_set_safe_mode_caps(hw); 4719 } 4720 4721 err = ice_init_pf(pf); 4722 if (err) { 4723 dev_err(dev, "ice_init_pf failed: %d\n", err); 4724 goto err_init_pf_unroll; 4725 } 4726 4727 ice_devlink_init_regions(pf); 4728 4729 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4730 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4731 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4732 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4733 i = 0; 4734 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4735 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4736 pf->hw.tnl.valid_count[TNL_VXLAN]; 4737 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4738 UDP_TUNNEL_TYPE_VXLAN; 4739 i++; 4740 } 4741 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4742 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4743 pf->hw.tnl.valid_count[TNL_GENEVE]; 4744 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4745 UDP_TUNNEL_TYPE_GENEVE; 4746 i++; 4747 } 4748 4749 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 4750 if (!pf->num_alloc_vsi) { 4751 err = -EIO; 4752 goto err_init_pf_unroll; 4753 } 4754 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4755 dev_warn(&pf->pdev->dev, 4756 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4757 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4758 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4759 } 4760 4761 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4762 GFP_KERNEL); 4763 if (!pf->vsi) { 4764 err = -ENOMEM; 4765 goto err_init_pf_unroll; 4766 } 4767 4768 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, 4769 sizeof(*pf->vsi_stats), GFP_KERNEL); 4770 if (!pf->vsi_stats) { 4771 err = -ENOMEM; 4772 goto err_init_vsi_unroll; 4773 } 4774 4775 err = ice_init_interrupt_scheme(pf); 4776 if (err) { 4777 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4778 err = -EIO; 4779 goto err_init_vsi_stats_unroll; 4780 } 4781 4782 /* In case of MSIX we are going to setup the misc vector right here 4783 * to handle admin queue events etc. In case of legacy and MSI 4784 * the misc functionality and queue processing is combined in 4785 * the same vector and that gets setup at open. 4786 */ 4787 err = ice_req_irq_msix_misc(pf); 4788 if (err) { 4789 dev_err(dev, "setup of misc vector failed: %d\n", err); 4790 goto err_init_interrupt_unroll; 4791 } 4792 4793 /* create switch struct for the switch element created by FW on boot */ 4794 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 4795 if (!pf->first_sw) { 4796 err = -ENOMEM; 4797 goto err_msix_misc_unroll; 4798 } 4799 4800 if (hw->evb_veb) 4801 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4802 else 4803 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4804 4805 pf->first_sw->pf = pf; 4806 4807 /* record the sw_id available for later use */ 4808 pf->first_sw->sw_id = hw->port_info->sw_id; 4809 4810 err = ice_setup_pf_sw(pf); 4811 if (err) { 4812 dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 4813 goto err_alloc_sw_unroll; 4814 } 4815 4816 clear_bit(ICE_SERVICE_DIS, pf->state); 4817 4818 /* tell the firmware we are up */ 4819 err = ice_send_version(pf); 4820 if (err) { 4821 dev_err(dev, "probe failed sending driver version %s. error: %d\n", 4822 UTS_RELEASE, err); 4823 goto err_send_version_unroll; 4824 } 4825 4826 /* since everything is good, start the service timer */ 4827 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4828 4829 err = ice_init_link_events(pf->hw.port_info); 4830 if (err) { 4831 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4832 goto err_send_version_unroll; 4833 } 4834 4835 /* not a fatal error if this fails */ 4836 err = ice_init_nvm_phy_type(pf->hw.port_info); 4837 if (err) 4838 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4839 4840 /* not a fatal error if this fails */ 4841 err = ice_update_link_info(pf->hw.port_info); 4842 if (err) 4843 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4844 4845 ice_init_link_dflt_override(pf->hw.port_info); 4846 4847 ice_check_link_cfg_err(pf, 4848 pf->hw.port_info->phy.link_info.link_cfg_err); 4849 4850 /* if media available, initialize PHY settings */ 4851 if (pf->hw.port_info->phy.link_info.link_info & 4852 ICE_AQ_MEDIA_AVAILABLE) { 4853 /* not a fatal error if this fails */ 4854 err = ice_init_phy_user_cfg(pf->hw.port_info); 4855 if (err) 4856 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4857 4858 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4859 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4860 4861 if (vsi) 4862 ice_configure_phy(vsi); 4863 } 4864 } else { 4865 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4866 } 4867 4868 ice_verify_cacheline_size(pf); 4869 4870 /* Save wakeup reason register for later use */ 4871 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4872 4873 /* check for a power management event */ 4874 ice_print_wake_reason(pf); 4875 4876 /* clear wake status, all bits */ 4877 wr32(hw, PFPM_WUS, U32_MAX); 4878 4879 /* Disable WoL at init, wait for user to enable */ 4880 device_set_wakeup_enable(dev, false); 4881 4882 if (ice_is_safe_mode(pf)) { 4883 ice_set_safe_mode_vlan_cfg(pf); 4884 goto probe_done; 4885 } 4886 4887 /* initialize DDP driven features */ 4888 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4889 ice_ptp_init(pf); 4890 4891 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4892 ice_gnss_init(pf); 4893 4894 /* Note: Flow director init failure is non-fatal to load */ 4895 if (ice_init_fdir(pf)) 4896 dev_err(dev, "could not initialize flow director\n"); 4897 4898 /* Note: DCB init failure is non-fatal to load */ 4899 if (ice_init_pf_dcb(pf, false)) { 4900 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4901 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4902 } else { 4903 ice_cfg_lldp_mib_change(&pf->hw, true); 4904 } 4905 4906 if (ice_init_lag(pf)) 4907 dev_warn(dev, "Failed to init link aggregation support\n"); 4908 4909 /* print PCI link speed and width */ 4910 pcie_print_link_status(pf->pdev); 4911 4912 probe_done: 4913 err = ice_devlink_create_pf_port(pf); 4914 if (err) 4915 goto err_create_pf_port; 4916 4917 vsi = ice_get_main_vsi(pf); 4918 if (!vsi || !vsi->netdev) { 4919 err = -EINVAL; 4920 goto err_netdev_reg; 4921 } 4922 4923 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); 4924 4925 err = ice_register_netdev(pf); 4926 if (err) 4927 goto err_netdev_reg; 4928 4929 err = ice_devlink_register_params(pf); 4930 if (err) 4931 goto err_netdev_reg; 4932 4933 /* ready to go, so clear down state bit */ 4934 clear_bit(ICE_DOWN, pf->state); 4935 if (ice_is_rdma_ena(pf)) { 4936 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); 4937 if (pf->aux_idx < 0) { 4938 dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 4939 err = -ENOMEM; 4940 goto err_devlink_reg_param; 4941 } 4942 4943 err = ice_init_rdma(pf); 4944 if (err) { 4945 dev_err(dev, "Failed to initialize RDMA: %d\n", err); 4946 err = -EIO; 4947 goto err_init_aux_unroll; 4948 } 4949 } else { 4950 dev_warn(dev, "RDMA is not supported on this device\n"); 4951 } 4952 4953 ice_devlink_register(pf); 4954 return 0; 4955 4956 err_init_aux_unroll: 4957 pf->adev = NULL; 4958 ida_free(&ice_aux_ida, pf->aux_idx); 4959 err_devlink_reg_param: 4960 ice_devlink_unregister_params(pf); 4961 err_netdev_reg: 4962 ice_devlink_destroy_pf_port(pf); 4963 err_create_pf_port: 4964 err_send_version_unroll: 4965 ice_vsi_release_all(pf); 4966 err_alloc_sw_unroll: 4967 set_bit(ICE_SERVICE_DIS, pf->state); 4968 set_bit(ICE_DOWN, pf->state); 4969 devm_kfree(dev, pf->first_sw); 4970 err_msix_misc_unroll: 4971 ice_free_irq_msix_misc(pf); 4972 err_init_interrupt_unroll: 4973 ice_clear_interrupt_scheme(pf); 4974 err_init_vsi_stats_unroll: 4975 devm_kfree(dev, pf->vsi_stats); 4976 pf->vsi_stats = NULL; 4977 err_init_vsi_unroll: 4978 devm_kfree(dev, pf->vsi); 4979 err_init_pf_unroll: 4980 ice_deinit_pf(pf); 4981 ice_devlink_destroy_regions(pf); 4982 ice_deinit_hw(hw); 4983 err_exit_unroll: 4984 pci_disable_pcie_error_reporting(pdev); 4985 pci_disable_device(pdev); 4986 return err; 4987 } 4988 4989 /** 4990 * ice_set_wake - enable or disable Wake on LAN 4991 * @pf: pointer to the PF struct 4992 * 4993 * Simple helper for WoL control 4994 */ 4995 static void ice_set_wake(struct ice_pf *pf) 4996 { 4997 struct ice_hw *hw = &pf->hw; 4998 bool wol = pf->wol_ena; 4999 5000 /* clear wake state, otherwise new wake events won't fire */ 5001 wr32(hw, PFPM_WUS, U32_MAX); 5002 5003 /* enable / disable APM wake up, no RMW needed */ 5004 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 5005 5006 /* set magic packet filter enabled */ 5007 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 5008 } 5009 5010 /** 5011 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 5012 * @pf: pointer to the PF struct 5013 * 5014 * Issue firmware command to enable multicast magic wake, making 5015 * sure that any locally administered address (LAA) is used for 5016 * wake, and that PF reset doesn't undo the LAA. 5017 */ 5018 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 5019 { 5020 struct device *dev = ice_pf_to_dev(pf); 5021 struct ice_hw *hw = &pf->hw; 5022 u8 mac_addr[ETH_ALEN]; 5023 struct ice_vsi *vsi; 5024 int status; 5025 u8 flags; 5026 5027 if (!pf->wol_ena) 5028 return; 5029 5030 vsi = ice_get_main_vsi(pf); 5031 if (!vsi) 5032 return; 5033 5034 /* Get current MAC address in case it's an LAA */ 5035 if (vsi->netdev) 5036 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 5037 else 5038 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 5039 5040 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 5041 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 5042 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 5043 5044 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 5045 if (status) 5046 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 5047 status, ice_aq_str(hw->adminq.sq_last_status)); 5048 } 5049 5050 /** 5051 * ice_remove - Device removal routine 5052 * @pdev: PCI device information struct 5053 */ 5054 static void ice_remove(struct pci_dev *pdev) 5055 { 5056 struct ice_pf *pf = pci_get_drvdata(pdev); 5057 struct ice_hw *hw; 5058 int i; 5059 5060 hw = &pf->hw; 5061 5062 ice_devlink_unregister(pf); 5063 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 5064 if (!ice_is_reset_in_progress(pf->state)) 5065 break; 5066 msleep(100); 5067 } 5068 5069 ice_tc_indir_block_remove(pf); 5070 5071 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 5072 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 5073 ice_free_vfs(pf); 5074 } 5075 5076 ice_service_task_stop(pf); 5077 5078 ice_aq_cancel_waiting_tasks(pf); 5079 ice_unplug_aux_dev(pf); 5080 if (pf->aux_idx >= 0) 5081 ida_free(&ice_aux_ida, pf->aux_idx); 5082 ice_devlink_unregister_params(pf); 5083 set_bit(ICE_DOWN, pf->state); 5084 5085 ice_deinit_lag(pf); 5086 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 5087 ice_ptp_release(pf); 5088 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 5089 ice_gnss_exit(pf); 5090 if (!ice_is_safe_mode(pf)) 5091 ice_remove_arfs(pf); 5092 ice_setup_mc_magic_wake(pf); 5093 ice_vsi_release_all(pf); 5094 mutex_destroy(&hw->fdir_fltr_lock); 5095 ice_devlink_destroy_pf_port(pf); 5096 ice_set_wake(pf); 5097 ice_free_irq_msix_misc(pf); 5098 ice_for_each_vsi(pf, i) { 5099 if (!pf->vsi[i]) 5100 continue; 5101 ice_vsi_free_q_vectors(pf->vsi[i]); 5102 } 5103 devm_kfree(&pdev->dev, pf->vsi_stats); 5104 pf->vsi_stats = NULL; 5105 ice_deinit_pf(pf); 5106 ice_devlink_destroy_regions(pf); 5107 ice_deinit_hw(hw); 5108 5109 /* Issue a PFR as part of the prescribed driver unload flow. Do not 5110 * do it via ice_schedule_reset() since there is no need to rebuild 5111 * and the service task is already stopped. 5112 */ 5113 ice_reset(hw, ICE_RESET_PFR); 5114 pci_wait_for_pending_transaction(pdev); 5115 ice_clear_interrupt_scheme(pf); 5116 pci_disable_pcie_error_reporting(pdev); 5117 pci_disable_device(pdev); 5118 } 5119 5120 /** 5121 * ice_shutdown - PCI callback for shutting down device 5122 * @pdev: PCI device information struct 5123 */ 5124 static void ice_shutdown(struct pci_dev *pdev) 5125 { 5126 struct ice_pf *pf = pci_get_drvdata(pdev); 5127 5128 ice_remove(pdev); 5129 5130 if (system_state == SYSTEM_POWER_OFF) { 5131 pci_wake_from_d3(pdev, pf->wol_ena); 5132 pci_set_power_state(pdev, PCI_D3hot); 5133 } 5134 } 5135 5136 #ifdef CONFIG_PM 5137 /** 5138 * ice_prepare_for_shutdown - prep for PCI shutdown 5139 * @pf: board private structure 5140 * 5141 * Inform or close all dependent features in prep for PCI device shutdown 5142 */ 5143 static void ice_prepare_for_shutdown(struct ice_pf *pf) 5144 { 5145 struct ice_hw *hw = &pf->hw; 5146 u32 v; 5147 5148 /* Notify VFs of impending reset */ 5149 if (ice_check_sq_alive(hw, &hw->mailboxq)) 5150 ice_vc_notify_reset(pf); 5151 5152 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 5153 5154 /* disable the VSIs and their queues that are not already DOWN */ 5155 ice_pf_dis_all_vsi(pf, false); 5156 5157 ice_for_each_vsi(pf, v) 5158 if (pf->vsi[v]) 5159 pf->vsi[v]->vsi_num = 0; 5160 5161 ice_shutdown_all_ctrlq(hw); 5162 } 5163 5164 /** 5165 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 5166 * @pf: board private structure to reinitialize 5167 * 5168 * This routine reinitialize interrupt scheme that was cleared during 5169 * power management suspend callback. 5170 * 5171 * This should be called during resume routine to re-allocate the q_vectors 5172 * and reacquire interrupts. 5173 */ 5174 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 5175 { 5176 struct device *dev = ice_pf_to_dev(pf); 5177 int ret, v; 5178 5179 /* Since we clear MSIX flag during suspend, we need to 5180 * set it back during resume... 5181 */ 5182 5183 ret = ice_init_interrupt_scheme(pf); 5184 if (ret) { 5185 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 5186 return ret; 5187 } 5188 5189 /* Remap vectors and rings, after successful re-init interrupts */ 5190 ice_for_each_vsi(pf, v) { 5191 if (!pf->vsi[v]) 5192 continue; 5193 5194 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 5195 if (ret) 5196 goto err_reinit; 5197 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 5198 } 5199 5200 ret = ice_req_irq_msix_misc(pf); 5201 if (ret) { 5202 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 5203 ret); 5204 goto err_reinit; 5205 } 5206 5207 return 0; 5208 5209 err_reinit: 5210 while (v--) 5211 if (pf->vsi[v]) 5212 ice_vsi_free_q_vectors(pf->vsi[v]); 5213 5214 return ret; 5215 } 5216 5217 /** 5218 * ice_suspend 5219 * @dev: generic device information structure 5220 * 5221 * Power Management callback to quiesce the device and prepare 5222 * for D3 transition. 5223 */ 5224 static int __maybe_unused ice_suspend(struct device *dev) 5225 { 5226 struct pci_dev *pdev = to_pci_dev(dev); 5227 struct ice_pf *pf; 5228 int disabled, v; 5229 5230 pf = pci_get_drvdata(pdev); 5231 5232 if (!ice_pf_state_is_nominal(pf)) { 5233 dev_err(dev, "Device is not ready, no need to suspend it\n"); 5234 return -EBUSY; 5235 } 5236 5237 /* Stop watchdog tasks until resume completion. 5238 * Even though it is most likely that the service task is 5239 * disabled if the device is suspended or down, the service task's 5240 * state is controlled by a different state bit, and we should 5241 * store and honor whatever state that bit is in at this point. 5242 */ 5243 disabled = ice_service_task_stop(pf); 5244 5245 ice_unplug_aux_dev(pf); 5246 5247 /* Already suspended?, then there is nothing to do */ 5248 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5249 if (!disabled) 5250 ice_service_task_restart(pf); 5251 return 0; 5252 } 5253 5254 if (test_bit(ICE_DOWN, pf->state) || 5255 ice_is_reset_in_progress(pf->state)) { 5256 dev_err(dev, "can't suspend device in reset or already down\n"); 5257 if (!disabled) 5258 ice_service_task_restart(pf); 5259 return 0; 5260 } 5261 5262 ice_setup_mc_magic_wake(pf); 5263 5264 ice_prepare_for_shutdown(pf); 5265 5266 ice_set_wake(pf); 5267 5268 /* Free vectors, clear the interrupt scheme and release IRQs 5269 * for proper hibernation, especially with large number of CPUs. 5270 * Otherwise hibernation might fail when mapping all the vectors back 5271 * to CPU0. 5272 */ 5273 ice_free_irq_msix_misc(pf); 5274 ice_for_each_vsi(pf, v) { 5275 if (!pf->vsi[v]) 5276 continue; 5277 ice_vsi_free_q_vectors(pf->vsi[v]); 5278 } 5279 ice_clear_interrupt_scheme(pf); 5280 5281 pci_save_state(pdev); 5282 pci_wake_from_d3(pdev, pf->wol_ena); 5283 pci_set_power_state(pdev, PCI_D3hot); 5284 return 0; 5285 } 5286 5287 /** 5288 * ice_resume - PM callback for waking up from D3 5289 * @dev: generic device information structure 5290 */ 5291 static int __maybe_unused ice_resume(struct device *dev) 5292 { 5293 struct pci_dev *pdev = to_pci_dev(dev); 5294 enum ice_reset_req reset_type; 5295 struct ice_pf *pf; 5296 struct ice_hw *hw; 5297 int ret; 5298 5299 pci_set_power_state(pdev, PCI_D0); 5300 pci_restore_state(pdev); 5301 pci_save_state(pdev); 5302 5303 if (!pci_device_is_present(pdev)) 5304 return -ENODEV; 5305 5306 ret = pci_enable_device_mem(pdev); 5307 if (ret) { 5308 dev_err(dev, "Cannot enable device after suspend\n"); 5309 return ret; 5310 } 5311 5312 pf = pci_get_drvdata(pdev); 5313 hw = &pf->hw; 5314 5315 pf->wakeup_reason = rd32(hw, PFPM_WUS); 5316 ice_print_wake_reason(pf); 5317 5318 /* We cleared the interrupt scheme when we suspended, so we need to 5319 * restore it now to resume device functionality. 5320 */ 5321 ret = ice_reinit_interrupt_scheme(pf); 5322 if (ret) 5323 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5324 5325 clear_bit(ICE_DOWN, pf->state); 5326 /* Now perform PF reset and rebuild */ 5327 reset_type = ICE_RESET_PFR; 5328 /* re-enable service task for reset, but allow reset to schedule it */ 5329 clear_bit(ICE_SERVICE_DIS, pf->state); 5330 5331 if (ice_schedule_reset(pf, reset_type)) 5332 dev_err(dev, "Reset during resume failed.\n"); 5333 5334 clear_bit(ICE_SUSPENDED, pf->state); 5335 ice_service_task_restart(pf); 5336 5337 /* Restart the service task */ 5338 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5339 5340 return 0; 5341 } 5342 #endif /* CONFIG_PM */ 5343 5344 /** 5345 * ice_pci_err_detected - warning that PCI error has been detected 5346 * @pdev: PCI device information struct 5347 * @err: the type of PCI error 5348 * 5349 * Called to warn that something happened on the PCI bus and the error handling 5350 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 5351 */ 5352 static pci_ers_result_t 5353 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 5354 { 5355 struct ice_pf *pf = pci_get_drvdata(pdev); 5356 5357 if (!pf) { 5358 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 5359 __func__, err); 5360 return PCI_ERS_RESULT_DISCONNECT; 5361 } 5362 5363 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5364 ice_service_task_stop(pf); 5365 5366 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5367 set_bit(ICE_PFR_REQ, pf->state); 5368 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5369 } 5370 } 5371 5372 return PCI_ERS_RESULT_NEED_RESET; 5373 } 5374 5375 /** 5376 * ice_pci_err_slot_reset - a PCI slot reset has just happened 5377 * @pdev: PCI device information struct 5378 * 5379 * Called to determine if the driver can recover from the PCI slot reset by 5380 * using a register read to determine if the device is recoverable. 5381 */ 5382 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 5383 { 5384 struct ice_pf *pf = pci_get_drvdata(pdev); 5385 pci_ers_result_t result; 5386 int err; 5387 u32 reg; 5388 5389 err = pci_enable_device_mem(pdev); 5390 if (err) { 5391 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 5392 err); 5393 result = PCI_ERS_RESULT_DISCONNECT; 5394 } else { 5395 pci_set_master(pdev); 5396 pci_restore_state(pdev); 5397 pci_save_state(pdev); 5398 pci_wake_from_d3(pdev, false); 5399 5400 /* Check for life */ 5401 reg = rd32(&pf->hw, GLGEN_RTRIG); 5402 if (!reg) 5403 result = PCI_ERS_RESULT_RECOVERED; 5404 else 5405 result = PCI_ERS_RESULT_DISCONNECT; 5406 } 5407 5408 return result; 5409 } 5410 5411 /** 5412 * ice_pci_err_resume - restart operations after PCI error recovery 5413 * @pdev: PCI device information struct 5414 * 5415 * Called to allow the driver to bring things back up after PCI error and/or 5416 * reset recovery have finished 5417 */ 5418 static void ice_pci_err_resume(struct pci_dev *pdev) 5419 { 5420 struct ice_pf *pf = pci_get_drvdata(pdev); 5421 5422 if (!pf) { 5423 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 5424 __func__); 5425 return; 5426 } 5427 5428 if (test_bit(ICE_SUSPENDED, pf->state)) { 5429 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 5430 __func__); 5431 return; 5432 } 5433 5434 ice_restore_all_vfs_msi_state(pdev); 5435 5436 ice_do_reset(pf, ICE_RESET_PFR); 5437 ice_service_task_restart(pf); 5438 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5439 } 5440 5441 /** 5442 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 5443 * @pdev: PCI device information struct 5444 */ 5445 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 5446 { 5447 struct ice_pf *pf = pci_get_drvdata(pdev); 5448 5449 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5450 ice_service_task_stop(pf); 5451 5452 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5453 set_bit(ICE_PFR_REQ, pf->state); 5454 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5455 } 5456 } 5457 } 5458 5459 /** 5460 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5461 * @pdev: PCI device information struct 5462 */ 5463 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5464 { 5465 ice_pci_err_resume(pdev); 5466 } 5467 5468 /* ice_pci_tbl - PCI Device ID Table 5469 * 5470 * Wildcard entries (PCI_ANY_ID) should come last 5471 * Last entry must be all 0s 5472 * 5473 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5474 * Class, Class Mask, private data (not used) } 5475 */ 5476 static const struct pci_device_id ice_pci_tbl[] = { 5477 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 5478 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 5479 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 5480 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, 5481 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, 5482 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, 5483 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, 5484 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, 5485 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, 5486 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, 5487 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, 5488 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 5489 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 5490 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 5491 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 5492 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 5493 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, 5494 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 5495 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 5496 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 5497 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, 5498 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, 5499 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, 5500 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, 5501 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, 5502 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 }, 5503 /* required last entry */ 5504 { 0, } 5505 }; 5506 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5507 5508 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5509 5510 static const struct pci_error_handlers ice_pci_err_handler = { 5511 .error_detected = ice_pci_err_detected, 5512 .slot_reset = ice_pci_err_slot_reset, 5513 .reset_prepare = ice_pci_err_reset_prepare, 5514 .reset_done = ice_pci_err_reset_done, 5515 .resume = ice_pci_err_resume 5516 }; 5517 5518 static struct pci_driver ice_driver = { 5519 .name = KBUILD_MODNAME, 5520 .id_table = ice_pci_tbl, 5521 .probe = ice_probe, 5522 .remove = ice_remove, 5523 #ifdef CONFIG_PM 5524 .driver.pm = &ice_pm_ops, 5525 #endif /* CONFIG_PM */ 5526 .shutdown = ice_shutdown, 5527 .sriov_configure = ice_sriov_configure, 5528 .err_handler = &ice_pci_err_handler 5529 }; 5530 5531 /** 5532 * ice_module_init - Driver registration routine 5533 * 5534 * ice_module_init is the first routine called when the driver is 5535 * loaded. All it does is register with the PCI subsystem. 5536 */ 5537 static int __init ice_module_init(void) 5538 { 5539 int status; 5540 5541 pr_info("%s\n", ice_driver_string); 5542 pr_info("%s\n", ice_copyright); 5543 5544 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 5545 if (!ice_wq) { 5546 pr_err("Failed to create workqueue\n"); 5547 return -ENOMEM; 5548 } 5549 5550 status = pci_register_driver(&ice_driver); 5551 if (status) { 5552 pr_err("failed to register PCI driver, err %d\n", status); 5553 destroy_workqueue(ice_wq); 5554 } 5555 5556 return status; 5557 } 5558 module_init(ice_module_init); 5559 5560 /** 5561 * ice_module_exit - Driver exit cleanup routine 5562 * 5563 * ice_module_exit is called just before the driver is removed 5564 * from memory. 5565 */ 5566 static void __exit ice_module_exit(void) 5567 { 5568 pci_unregister_driver(&ice_driver); 5569 destroy_workqueue(ice_wq); 5570 pr_info("module unloaded\n"); 5571 } 5572 module_exit(ice_module_exit); 5573 5574 /** 5575 * ice_set_mac_address - NDO callback to set MAC address 5576 * @netdev: network interface device structure 5577 * @pi: pointer to an address structure 5578 * 5579 * Returns 0 on success, negative on failure 5580 */ 5581 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5582 { 5583 struct ice_netdev_priv *np = netdev_priv(netdev); 5584 struct ice_vsi *vsi = np->vsi; 5585 struct ice_pf *pf = vsi->back; 5586 struct ice_hw *hw = &pf->hw; 5587 struct sockaddr *addr = pi; 5588 u8 old_mac[ETH_ALEN]; 5589 u8 flags = 0; 5590 u8 *mac; 5591 int err; 5592 5593 mac = (u8 *)addr->sa_data; 5594 5595 if (!is_valid_ether_addr(mac)) 5596 return -EADDRNOTAVAIL; 5597 5598 if (ether_addr_equal(netdev->dev_addr, mac)) { 5599 netdev_dbg(netdev, "already using mac %pM\n", mac); 5600 return 0; 5601 } 5602 5603 if (test_bit(ICE_DOWN, pf->state) || 5604 ice_is_reset_in_progress(pf->state)) { 5605 netdev_err(netdev, "can't set mac %pM. device not ready\n", 5606 mac); 5607 return -EBUSY; 5608 } 5609 5610 if (ice_chnl_dmac_fltr_cnt(pf)) { 5611 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 5612 mac); 5613 return -EAGAIN; 5614 } 5615 5616 netif_addr_lock_bh(netdev); 5617 ether_addr_copy(old_mac, netdev->dev_addr); 5618 /* change the netdev's MAC address */ 5619 eth_hw_addr_set(netdev, mac); 5620 netif_addr_unlock_bh(netdev); 5621 5622 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 5623 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 5624 if (err && err != -ENOENT) { 5625 err = -EADDRNOTAVAIL; 5626 goto err_update_filters; 5627 } 5628 5629 /* Add filter for new MAC. If filter exists, return success */ 5630 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 5631 if (err == -EEXIST) { 5632 /* Although this MAC filter is already present in hardware it's 5633 * possible in some cases (e.g. bonding) that dev_addr was 5634 * modified outside of the driver and needs to be restored back 5635 * to this value. 5636 */ 5637 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 5638 5639 return 0; 5640 } else if (err) { 5641 /* error if the new filter addition failed */ 5642 err = -EADDRNOTAVAIL; 5643 } 5644 5645 err_update_filters: 5646 if (err) { 5647 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5648 mac); 5649 netif_addr_lock_bh(netdev); 5650 eth_hw_addr_set(netdev, old_mac); 5651 netif_addr_unlock_bh(netdev); 5652 return err; 5653 } 5654 5655 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5656 netdev->dev_addr); 5657 5658 /* write new MAC address to the firmware */ 5659 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 5660 err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 5661 if (err) { 5662 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 5663 mac, err); 5664 } 5665 return 0; 5666 } 5667 5668 /** 5669 * ice_set_rx_mode - NDO callback to set the netdev filters 5670 * @netdev: network interface device structure 5671 */ 5672 static void ice_set_rx_mode(struct net_device *netdev) 5673 { 5674 struct ice_netdev_priv *np = netdev_priv(netdev); 5675 struct ice_vsi *vsi = np->vsi; 5676 5677 if (!vsi) 5678 return; 5679 5680 /* Set the flags to synchronize filters 5681 * ndo_set_rx_mode may be triggered even without a change in netdev 5682 * flags 5683 */ 5684 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5685 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5686 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5687 5688 /* schedule our worker thread which will take care of 5689 * applying the new filter changes 5690 */ 5691 ice_service_task_schedule(vsi->back); 5692 } 5693 5694 /** 5695 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 5696 * @netdev: network interface device structure 5697 * @queue_index: Queue ID 5698 * @maxrate: maximum bandwidth in Mbps 5699 */ 5700 static int 5701 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 5702 { 5703 struct ice_netdev_priv *np = netdev_priv(netdev); 5704 struct ice_vsi *vsi = np->vsi; 5705 u16 q_handle; 5706 int status; 5707 u8 tc; 5708 5709 /* Validate maxrate requested is within permitted range */ 5710 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 5711 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 5712 maxrate, queue_index); 5713 return -EINVAL; 5714 } 5715 5716 q_handle = vsi->tx_rings[queue_index]->q_handle; 5717 tc = ice_dcb_get_tc(vsi, queue_index); 5718 5719 /* Set BW back to default, when user set maxrate to 0 */ 5720 if (!maxrate) 5721 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 5722 q_handle, ICE_MAX_BW); 5723 else 5724 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 5725 q_handle, ICE_MAX_BW, maxrate * 1000); 5726 if (status) 5727 netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 5728 status); 5729 5730 return status; 5731 } 5732 5733 /** 5734 * ice_fdb_add - add an entry to the hardware database 5735 * @ndm: the input from the stack 5736 * @tb: pointer to array of nladdr (unused) 5737 * @dev: the net device pointer 5738 * @addr: the MAC address entry being added 5739 * @vid: VLAN ID 5740 * @flags: instructions from stack about fdb operation 5741 * @extack: netlink extended ack 5742 */ 5743 static int 5744 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 5745 struct net_device *dev, const unsigned char *addr, u16 vid, 5746 u16 flags, struct netlink_ext_ack __always_unused *extack) 5747 { 5748 int err; 5749 5750 if (vid) { 5751 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5752 return -EINVAL; 5753 } 5754 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5755 netdev_err(dev, "FDB only supports static addresses\n"); 5756 return -EINVAL; 5757 } 5758 5759 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5760 err = dev_uc_add_excl(dev, addr); 5761 else if (is_multicast_ether_addr(addr)) 5762 err = dev_mc_add_excl(dev, addr); 5763 else 5764 err = -EINVAL; 5765 5766 /* Only return duplicate errors if NLM_F_EXCL is set */ 5767 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5768 err = 0; 5769 5770 return err; 5771 } 5772 5773 /** 5774 * ice_fdb_del - delete an entry from the hardware database 5775 * @ndm: the input from the stack 5776 * @tb: pointer to array of nladdr (unused) 5777 * @dev: the net device pointer 5778 * @addr: the MAC address entry being added 5779 * @vid: VLAN ID 5780 * @extack: netlink extended ack 5781 */ 5782 static int 5783 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5784 struct net_device *dev, const unsigned char *addr, 5785 __always_unused u16 vid, struct netlink_ext_ack *extack) 5786 { 5787 int err; 5788 5789 if (ndm->ndm_state & NUD_PERMANENT) { 5790 netdev_err(dev, "FDB only supports static addresses\n"); 5791 return -EINVAL; 5792 } 5793 5794 if (is_unicast_ether_addr(addr)) 5795 err = dev_uc_del(dev, addr); 5796 else if (is_multicast_ether_addr(addr)) 5797 err = dev_mc_del(dev, addr); 5798 else 5799 err = -EINVAL; 5800 5801 return err; 5802 } 5803 5804 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 5805 NETIF_F_HW_VLAN_CTAG_TX | \ 5806 NETIF_F_HW_VLAN_STAG_RX | \ 5807 NETIF_F_HW_VLAN_STAG_TX) 5808 5809 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 5810 NETIF_F_HW_VLAN_STAG_RX) 5811 5812 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ 5813 NETIF_F_HW_VLAN_STAG_FILTER) 5814 5815 /** 5816 * ice_fix_features - fix the netdev features flags based on device limitations 5817 * @netdev: ptr to the netdev that flags are being fixed on 5818 * @features: features that need to be checked and possibly fixed 5819 * 5820 * Make sure any fixups are made to features in this callback. This enables the 5821 * driver to not have to check unsupported configurations throughout the driver 5822 * because that's the responsiblity of this callback. 5823 * 5824 * Single VLAN Mode (SVM) Supported Features: 5825 * NETIF_F_HW_VLAN_CTAG_FILTER 5826 * NETIF_F_HW_VLAN_CTAG_RX 5827 * NETIF_F_HW_VLAN_CTAG_TX 5828 * 5829 * Double VLAN Mode (DVM) Supported Features: 5830 * NETIF_F_HW_VLAN_CTAG_FILTER 5831 * NETIF_F_HW_VLAN_CTAG_RX 5832 * NETIF_F_HW_VLAN_CTAG_TX 5833 * 5834 * NETIF_F_HW_VLAN_STAG_FILTER 5835 * NETIF_HW_VLAN_STAG_RX 5836 * NETIF_HW_VLAN_STAG_TX 5837 * 5838 * Features that need fixing: 5839 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. 5840 * These are mutually exlusive as the VSI context cannot support multiple 5841 * VLAN ethertypes simultaneously for stripping and/or insertion. If this 5842 * is not done, then default to clearing the requested STAG offload 5843 * settings. 5844 * 5845 * All supported filtering has to be enabled or disabled together. For 5846 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled 5847 * together. If this is not done, then default to VLAN filtering disabled. 5848 * These are mutually exclusive as there is currently no way to 5849 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN 5850 * prune rules. 5851 */ 5852 static netdev_features_t 5853 ice_fix_features(struct net_device *netdev, netdev_features_t features) 5854 { 5855 struct ice_netdev_priv *np = netdev_priv(netdev); 5856 netdev_features_t req_vlan_fltr, cur_vlan_fltr; 5857 bool cur_ctag, cur_stag, req_ctag, req_stag; 5858 5859 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; 5860 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 5861 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 5862 5863 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; 5864 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 5865 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 5866 5867 if (req_vlan_fltr != cur_vlan_fltr) { 5868 if (ice_is_dvm_ena(&np->vsi->back->hw)) { 5869 if (req_ctag && req_stag) { 5870 features |= NETIF_VLAN_FILTERING_FEATURES; 5871 } else if (!req_ctag && !req_stag) { 5872 features &= ~NETIF_VLAN_FILTERING_FEATURES; 5873 } else if ((!cur_ctag && req_ctag && !cur_stag) || 5874 (!cur_stag && req_stag && !cur_ctag)) { 5875 features |= NETIF_VLAN_FILTERING_FEATURES; 5876 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); 5877 } else if ((cur_ctag && !req_ctag && cur_stag) || 5878 (cur_stag && !req_stag && cur_ctag)) { 5879 features &= ~NETIF_VLAN_FILTERING_FEATURES; 5880 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); 5881 } 5882 } else { 5883 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) 5884 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); 5885 5886 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) 5887 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 5888 } 5889 } 5890 5891 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 5892 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { 5893 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 5894 features &= ~(NETIF_F_HW_VLAN_STAG_RX | 5895 NETIF_F_HW_VLAN_STAG_TX); 5896 } 5897 5898 if (!(netdev->features & NETIF_F_RXFCS) && 5899 (features & NETIF_F_RXFCS) && 5900 (features & NETIF_VLAN_STRIPPING_FEATURES) && 5901 !ice_vsi_has_non_zero_vlans(np->vsi)) { 5902 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); 5903 features &= ~NETIF_VLAN_STRIPPING_FEATURES; 5904 } 5905 5906 return features; 5907 } 5908 5909 /** 5910 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI 5911 * @vsi: PF's VSI 5912 * @features: features used to determine VLAN offload settings 5913 * 5914 * First, determine the vlan_ethertype based on the VLAN offload bits in 5915 * features. Then determine if stripping and insertion should be enabled or 5916 * disabled. Finally enable or disable VLAN stripping and insertion. 5917 */ 5918 static int 5919 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) 5920 { 5921 bool enable_stripping = true, enable_insertion = true; 5922 struct ice_vsi_vlan_ops *vlan_ops; 5923 int strip_err = 0, insert_err = 0; 5924 u16 vlan_ethertype = 0; 5925 5926 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 5927 5928 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 5929 vlan_ethertype = ETH_P_8021AD; 5930 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 5931 vlan_ethertype = ETH_P_8021Q; 5932 5933 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 5934 enable_stripping = false; 5935 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 5936 enable_insertion = false; 5937 5938 if (enable_stripping) 5939 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); 5940 else 5941 strip_err = vlan_ops->dis_stripping(vsi); 5942 5943 if (enable_insertion) 5944 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); 5945 else 5946 insert_err = vlan_ops->dis_insertion(vsi); 5947 5948 if (strip_err || insert_err) 5949 return -EIO; 5950 5951 return 0; 5952 } 5953 5954 /** 5955 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI 5956 * @vsi: PF's VSI 5957 * @features: features used to determine VLAN filtering settings 5958 * 5959 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the 5960 * features. 5961 */ 5962 static int 5963 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) 5964 { 5965 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 5966 int err = 0; 5967 5968 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking 5969 * if either bit is set 5970 */ 5971 if (features & 5972 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) 5973 err = vlan_ops->ena_rx_filtering(vsi); 5974 else 5975 err = vlan_ops->dis_rx_filtering(vsi); 5976 5977 return err; 5978 } 5979 5980 /** 5981 * ice_set_vlan_features - set VLAN settings based on suggested feature set 5982 * @netdev: ptr to the netdev being adjusted 5983 * @features: the feature set that the stack is suggesting 5984 * 5985 * Only update VLAN settings if the requested_vlan_features are different than 5986 * the current_vlan_features. 5987 */ 5988 static int 5989 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) 5990 { 5991 netdev_features_t current_vlan_features, requested_vlan_features; 5992 struct ice_netdev_priv *np = netdev_priv(netdev); 5993 struct ice_vsi *vsi = np->vsi; 5994 int err; 5995 5996 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; 5997 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; 5998 if (current_vlan_features ^ requested_vlan_features) { 5999 if ((features & NETIF_F_RXFCS) && 6000 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6001 dev_err(ice_pf_to_dev(vsi->back), 6002 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); 6003 return -EIO; 6004 } 6005 6006 err = ice_set_vlan_offload_features(vsi, features); 6007 if (err) 6008 return err; 6009 } 6010 6011 current_vlan_features = netdev->features & 6012 NETIF_VLAN_FILTERING_FEATURES; 6013 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; 6014 if (current_vlan_features ^ requested_vlan_features) { 6015 err = ice_set_vlan_filtering_features(vsi, features); 6016 if (err) 6017 return err; 6018 } 6019 6020 return 0; 6021 } 6022 6023 /** 6024 * ice_set_loopback - turn on/off loopback mode on underlying PF 6025 * @vsi: ptr to VSI 6026 * @ena: flag to indicate the on/off setting 6027 */ 6028 static int ice_set_loopback(struct ice_vsi *vsi, bool ena) 6029 { 6030 bool if_running = netif_running(vsi->netdev); 6031 int ret; 6032 6033 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6034 ret = ice_down(vsi); 6035 if (ret) { 6036 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); 6037 return ret; 6038 } 6039 } 6040 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); 6041 if (ret) 6042 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); 6043 if (if_running) 6044 ret = ice_up(vsi); 6045 6046 return ret; 6047 } 6048 6049 /** 6050 * ice_set_features - set the netdev feature flags 6051 * @netdev: ptr to the netdev being adjusted 6052 * @features: the feature set that the stack is suggesting 6053 */ 6054 static int 6055 ice_set_features(struct net_device *netdev, netdev_features_t features) 6056 { 6057 netdev_features_t changed = netdev->features ^ features; 6058 struct ice_netdev_priv *np = netdev_priv(netdev); 6059 struct ice_vsi *vsi = np->vsi; 6060 struct ice_pf *pf = vsi->back; 6061 int ret = 0; 6062 6063 /* Don't set any netdev advanced features with device in Safe Mode */ 6064 if (ice_is_safe_mode(pf)) { 6065 dev_err(ice_pf_to_dev(pf), 6066 "Device is in Safe Mode - not enabling advanced netdev features\n"); 6067 return ret; 6068 } 6069 6070 /* Do not change setting during reset */ 6071 if (ice_is_reset_in_progress(pf->state)) { 6072 dev_err(ice_pf_to_dev(pf), 6073 "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 6074 return -EBUSY; 6075 } 6076 6077 /* Multiple features can be changed in one call so keep features in 6078 * separate if/else statements to guarantee each feature is checked 6079 */ 6080 if (changed & NETIF_F_RXHASH) 6081 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); 6082 6083 ret = ice_set_vlan_features(netdev, features); 6084 if (ret) 6085 return ret; 6086 6087 /* Turn on receive of FCS aka CRC, and after setting this 6088 * flag the packet data will have the 4 byte CRC appended 6089 */ 6090 if (changed & NETIF_F_RXFCS) { 6091 if ((features & NETIF_F_RXFCS) && 6092 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6093 dev_err(ice_pf_to_dev(vsi->back), 6094 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); 6095 return -EIO; 6096 } 6097 6098 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); 6099 ret = ice_down_up(vsi); 6100 if (ret) 6101 return ret; 6102 } 6103 6104 if (changed & NETIF_F_NTUPLE) { 6105 bool ena = !!(features & NETIF_F_NTUPLE); 6106 6107 ice_vsi_manage_fdir(vsi, ena); 6108 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); 6109 } 6110 6111 /* don't turn off hw_tc_offload when ADQ is already enabled */ 6112 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 6113 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 6114 return -EACCES; 6115 } 6116 6117 if (changed & NETIF_F_HW_TC) { 6118 bool ena = !!(features & NETIF_F_HW_TC); 6119 6120 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : 6121 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 6122 } 6123 6124 if (changed & NETIF_F_LOOPBACK) 6125 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); 6126 6127 return ret; 6128 } 6129 6130 /** 6131 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI 6132 * @vsi: VSI to setup VLAN properties for 6133 */ 6134 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 6135 { 6136 int err; 6137 6138 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); 6139 if (err) 6140 return err; 6141 6142 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); 6143 if (err) 6144 return err; 6145 6146 return ice_vsi_add_vlan_zero(vsi); 6147 } 6148 6149 /** 6150 * ice_vsi_cfg - Setup the VSI 6151 * @vsi: the VSI being configured 6152 * 6153 * Return 0 on success and negative value on error 6154 */ 6155 int ice_vsi_cfg(struct ice_vsi *vsi) 6156 { 6157 int err; 6158 6159 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6160 ice_set_rx_mode(vsi->netdev); 6161 6162 err = ice_vsi_vlan_setup(vsi); 6163 if (err) 6164 return err; 6165 } 6166 ice_vsi_cfg_dcb_rings(vsi); 6167 6168 err = ice_vsi_cfg_lan_txqs(vsi); 6169 if (!err && ice_is_xdp_ena_vsi(vsi)) 6170 err = ice_vsi_cfg_xdp_txqs(vsi); 6171 if (!err) 6172 err = ice_vsi_cfg_rxqs(vsi); 6173 6174 return err; 6175 } 6176 6177 /* THEORY OF MODERATION: 6178 * The ice driver hardware works differently than the hardware that DIMLIB was 6179 * originally made for. ice hardware doesn't have packet count limits that 6180 * can trigger an interrupt, but it *does* have interrupt rate limit support, 6181 * which is hard-coded to a limit of 250,000 ints/second. 6182 * If not using dynamic moderation, the INTRL value can be modified 6183 * by ethtool rx-usecs-high. 6184 */ 6185 struct ice_dim { 6186 /* the throttle rate for interrupts, basically worst case delay before 6187 * an initial interrupt fires, value is stored in microseconds. 6188 */ 6189 u16 itr; 6190 }; 6191 6192 /* Make a different profile for Rx that doesn't allow quite so aggressive 6193 * moderation at the high end (it maxes out at 126us or about 8k interrupts a 6194 * second. 6195 */ 6196 static const struct ice_dim rx_profile[] = { 6197 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6198 {8}, /* 125,000 ints/s */ 6199 {16}, /* 62,500 ints/s */ 6200 {62}, /* 16,129 ints/s */ 6201 {126} /* 7,936 ints/s */ 6202 }; 6203 6204 /* The transmit profile, which has the same sorts of values 6205 * as the previous struct 6206 */ 6207 static const struct ice_dim tx_profile[] = { 6208 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6209 {8}, /* 125,000 ints/s */ 6210 {40}, /* 16,125 ints/s */ 6211 {128}, /* 7,812 ints/s */ 6212 {256} /* 3,906 ints/s */ 6213 }; 6214 6215 static void ice_tx_dim_work(struct work_struct *work) 6216 { 6217 struct ice_ring_container *rc; 6218 struct dim *dim; 6219 u16 itr; 6220 6221 dim = container_of(work, struct dim, work); 6222 rc = (struct ice_ring_container *)dim->priv; 6223 6224 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 6225 6226 /* look up the values in our local table */ 6227 itr = tx_profile[dim->profile_ix].itr; 6228 6229 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 6230 ice_write_itr(rc, itr); 6231 6232 dim->state = DIM_START_MEASURE; 6233 } 6234 6235 static void ice_rx_dim_work(struct work_struct *work) 6236 { 6237 struct ice_ring_container *rc; 6238 struct dim *dim; 6239 u16 itr; 6240 6241 dim = container_of(work, struct dim, work); 6242 rc = (struct ice_ring_container *)dim->priv; 6243 6244 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 6245 6246 /* look up the values in our local table */ 6247 itr = rx_profile[dim->profile_ix].itr; 6248 6249 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 6250 ice_write_itr(rc, itr); 6251 6252 dim->state = DIM_START_MEASURE; 6253 } 6254 6255 #define ICE_DIM_DEFAULT_PROFILE_IX 1 6256 6257 /** 6258 * ice_init_moderation - set up interrupt moderation 6259 * @q_vector: the vector containing rings to be configured 6260 * 6261 * Set up interrupt moderation registers, with the intent to do the right thing 6262 * when called from reset or from probe, and whether or not dynamic moderation 6263 * is enabled or not. Take special care to write all the registers in both 6264 * dynamic moderation mode or not in order to make sure hardware is in a known 6265 * state. 6266 */ 6267 static void ice_init_moderation(struct ice_q_vector *q_vector) 6268 { 6269 struct ice_ring_container *rc; 6270 bool tx_dynamic, rx_dynamic; 6271 6272 rc = &q_vector->tx; 6273 INIT_WORK(&rc->dim.work, ice_tx_dim_work); 6274 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6275 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6276 rc->dim.priv = rc; 6277 tx_dynamic = ITR_IS_DYNAMIC(rc); 6278 6279 /* set the initial TX ITR to match the above */ 6280 ice_write_itr(rc, tx_dynamic ? 6281 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 6282 6283 rc = &q_vector->rx; 6284 INIT_WORK(&rc->dim.work, ice_rx_dim_work); 6285 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6286 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6287 rc->dim.priv = rc; 6288 rx_dynamic = ITR_IS_DYNAMIC(rc); 6289 6290 /* set the initial RX ITR to match the above */ 6291 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 6292 rc->itr_setting); 6293 6294 ice_set_q_vector_intrl(q_vector); 6295 } 6296 6297 /** 6298 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 6299 * @vsi: the VSI being configured 6300 */ 6301 static void ice_napi_enable_all(struct ice_vsi *vsi) 6302 { 6303 int q_idx; 6304 6305 if (!vsi->netdev) 6306 return; 6307 6308 ice_for_each_q_vector(vsi, q_idx) { 6309 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6310 6311 ice_init_moderation(q_vector); 6312 6313 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6314 napi_enable(&q_vector->napi); 6315 } 6316 } 6317 6318 /** 6319 * ice_up_complete - Finish the last steps of bringing up a connection 6320 * @vsi: The VSI being configured 6321 * 6322 * Return 0 on success and negative value on error 6323 */ 6324 static int ice_up_complete(struct ice_vsi *vsi) 6325 { 6326 struct ice_pf *pf = vsi->back; 6327 int err; 6328 6329 ice_vsi_cfg_msix(vsi); 6330 6331 /* Enable only Rx rings, Tx rings were enabled by the FW when the 6332 * Tx queue group list was configured and the context bits were 6333 * programmed using ice_vsi_cfg_txqs 6334 */ 6335 err = ice_vsi_start_all_rx_rings(vsi); 6336 if (err) 6337 return err; 6338 6339 clear_bit(ICE_VSI_DOWN, vsi->state); 6340 ice_napi_enable_all(vsi); 6341 ice_vsi_ena_irq(vsi); 6342 6343 if (vsi->port_info && 6344 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 6345 vsi->netdev && vsi->type == ICE_VSI_PF) { 6346 ice_print_link_msg(vsi, true); 6347 netif_tx_start_all_queues(vsi->netdev); 6348 netif_carrier_on(vsi->netdev); 6349 ice_ptp_link_change(pf, pf->hw.pf_id, true); 6350 } 6351 6352 /* Perform an initial read of the statistics registers now to 6353 * set the baseline so counters are ready when interface is up 6354 */ 6355 ice_update_eth_stats(vsi); 6356 6357 if (vsi->type == ICE_VSI_PF) 6358 ice_service_task_schedule(pf); 6359 6360 return 0; 6361 } 6362 6363 /** 6364 * ice_up - Bring the connection back up after being down 6365 * @vsi: VSI being configured 6366 */ 6367 int ice_up(struct ice_vsi *vsi) 6368 { 6369 int err; 6370 6371 err = ice_vsi_cfg(vsi); 6372 if (!err) 6373 err = ice_up_complete(vsi); 6374 6375 return err; 6376 } 6377 6378 /** 6379 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 6380 * @syncp: pointer to u64_stats_sync 6381 * @stats: stats that pkts and bytes count will be taken from 6382 * @pkts: packets stats counter 6383 * @bytes: bytes stats counter 6384 * 6385 * This function fetches stats from the ring considering the atomic operations 6386 * that needs to be performed to read u64 values in 32 bit machine. 6387 */ 6388 void 6389 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 6390 struct ice_q_stats stats, u64 *pkts, u64 *bytes) 6391 { 6392 unsigned int start; 6393 6394 do { 6395 start = u64_stats_fetch_begin(syncp); 6396 *pkts = stats.pkts; 6397 *bytes = stats.bytes; 6398 } while (u64_stats_fetch_retry(syncp, start)); 6399 } 6400 6401 /** 6402 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 6403 * @vsi: the VSI to be updated 6404 * @vsi_stats: the stats struct to be updated 6405 * @rings: rings to work on 6406 * @count: number of rings 6407 */ 6408 static void 6409 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 6410 struct rtnl_link_stats64 *vsi_stats, 6411 struct ice_tx_ring **rings, u16 count) 6412 { 6413 u16 i; 6414 6415 for (i = 0; i < count; i++) { 6416 struct ice_tx_ring *ring; 6417 u64 pkts = 0, bytes = 0; 6418 6419 ring = READ_ONCE(rings[i]); 6420 if (!ring || !ring->ring_stats) 6421 continue; 6422 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, 6423 ring->ring_stats->stats, &pkts, 6424 &bytes); 6425 vsi_stats->tx_packets += pkts; 6426 vsi_stats->tx_bytes += bytes; 6427 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; 6428 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; 6429 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; 6430 } 6431 } 6432 6433 /** 6434 * ice_update_vsi_ring_stats - Update VSI stats counters 6435 * @vsi: the VSI to be updated 6436 */ 6437 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 6438 { 6439 struct rtnl_link_stats64 *net_stats, *stats_prev; 6440 struct rtnl_link_stats64 *vsi_stats; 6441 u64 pkts, bytes; 6442 int i; 6443 6444 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 6445 if (!vsi_stats) 6446 return; 6447 6448 /* reset non-netdev (extended) stats */ 6449 vsi->tx_restart = 0; 6450 vsi->tx_busy = 0; 6451 vsi->tx_linearize = 0; 6452 vsi->rx_buf_failed = 0; 6453 vsi->rx_page_failed = 0; 6454 6455 rcu_read_lock(); 6456 6457 /* update Tx rings counters */ 6458 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 6459 vsi->num_txq); 6460 6461 /* update Rx rings counters */ 6462 ice_for_each_rxq(vsi, i) { 6463 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 6464 struct ice_ring_stats *ring_stats; 6465 6466 ring_stats = ring->ring_stats; 6467 ice_fetch_u64_stats_per_ring(&ring_stats->syncp, 6468 ring_stats->stats, &pkts, 6469 &bytes); 6470 vsi_stats->rx_packets += pkts; 6471 vsi_stats->rx_bytes += bytes; 6472 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; 6473 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; 6474 } 6475 6476 /* update XDP Tx rings counters */ 6477 if (ice_is_xdp_ena_vsi(vsi)) 6478 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 6479 vsi->num_xdp_txq); 6480 6481 rcu_read_unlock(); 6482 6483 net_stats = &vsi->net_stats; 6484 stats_prev = &vsi->net_stats_prev; 6485 6486 /* clear prev counters after reset */ 6487 if (vsi_stats->tx_packets < stats_prev->tx_packets || 6488 vsi_stats->rx_packets < stats_prev->rx_packets) { 6489 stats_prev->tx_packets = 0; 6490 stats_prev->tx_bytes = 0; 6491 stats_prev->rx_packets = 0; 6492 stats_prev->rx_bytes = 0; 6493 } 6494 6495 /* update netdev counters */ 6496 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; 6497 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; 6498 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; 6499 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; 6500 6501 stats_prev->tx_packets = vsi_stats->tx_packets; 6502 stats_prev->tx_bytes = vsi_stats->tx_bytes; 6503 stats_prev->rx_packets = vsi_stats->rx_packets; 6504 stats_prev->rx_bytes = vsi_stats->rx_bytes; 6505 6506 kfree(vsi_stats); 6507 } 6508 6509 /** 6510 * ice_update_vsi_stats - Update VSI stats counters 6511 * @vsi: the VSI to be updated 6512 */ 6513 void ice_update_vsi_stats(struct ice_vsi *vsi) 6514 { 6515 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6516 struct ice_eth_stats *cur_es = &vsi->eth_stats; 6517 struct ice_pf *pf = vsi->back; 6518 6519 if (test_bit(ICE_VSI_DOWN, vsi->state) || 6520 test_bit(ICE_CFG_BUSY, pf->state)) 6521 return; 6522 6523 /* get stats as recorded by Tx/Rx rings */ 6524 ice_update_vsi_ring_stats(vsi); 6525 6526 /* get VSI stats as recorded by the hardware */ 6527 ice_update_eth_stats(vsi); 6528 6529 cur_ns->tx_errors = cur_es->tx_errors; 6530 cur_ns->rx_dropped = cur_es->rx_discards; 6531 cur_ns->tx_dropped = cur_es->tx_discards; 6532 cur_ns->multicast = cur_es->rx_multicast; 6533 6534 /* update some more netdev stats if this is main VSI */ 6535 if (vsi->type == ICE_VSI_PF) { 6536 cur_ns->rx_crc_errors = pf->stats.crc_errors; 6537 cur_ns->rx_errors = pf->stats.crc_errors + 6538 pf->stats.illegal_bytes + 6539 pf->stats.rx_len_errors + 6540 pf->stats.rx_undersize + 6541 pf->hw_csum_rx_error + 6542 pf->stats.rx_jabber + 6543 pf->stats.rx_fragments + 6544 pf->stats.rx_oversize; 6545 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 6546 /* record drops from the port level */ 6547 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 6548 } 6549 } 6550 6551 /** 6552 * ice_update_pf_stats - Update PF port stats counters 6553 * @pf: PF whose stats needs to be updated 6554 */ 6555 void ice_update_pf_stats(struct ice_pf *pf) 6556 { 6557 struct ice_hw_port_stats *prev_ps, *cur_ps; 6558 struct ice_hw *hw = &pf->hw; 6559 u16 fd_ctr_base; 6560 u8 port; 6561 6562 port = hw->port_info->lport; 6563 prev_ps = &pf->stats_prev; 6564 cur_ps = &pf->stats; 6565 6566 if (ice_is_reset_in_progress(pf->state)) 6567 pf->stat_prev_loaded = false; 6568 6569 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 6570 &prev_ps->eth.rx_bytes, 6571 &cur_ps->eth.rx_bytes); 6572 6573 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 6574 &prev_ps->eth.rx_unicast, 6575 &cur_ps->eth.rx_unicast); 6576 6577 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 6578 &prev_ps->eth.rx_multicast, 6579 &cur_ps->eth.rx_multicast); 6580 6581 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 6582 &prev_ps->eth.rx_broadcast, 6583 &cur_ps->eth.rx_broadcast); 6584 6585 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 6586 &prev_ps->eth.rx_discards, 6587 &cur_ps->eth.rx_discards); 6588 6589 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 6590 &prev_ps->eth.tx_bytes, 6591 &cur_ps->eth.tx_bytes); 6592 6593 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 6594 &prev_ps->eth.tx_unicast, 6595 &cur_ps->eth.tx_unicast); 6596 6597 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 6598 &prev_ps->eth.tx_multicast, 6599 &cur_ps->eth.tx_multicast); 6600 6601 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 6602 &prev_ps->eth.tx_broadcast, 6603 &cur_ps->eth.tx_broadcast); 6604 6605 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 6606 &prev_ps->tx_dropped_link_down, 6607 &cur_ps->tx_dropped_link_down); 6608 6609 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 6610 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 6611 6612 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 6613 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 6614 6615 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 6616 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 6617 6618 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 6619 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 6620 6621 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 6622 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 6623 6624 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 6625 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 6626 6627 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 6628 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 6629 6630 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 6631 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 6632 6633 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 6634 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 6635 6636 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 6637 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 6638 6639 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 6640 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 6641 6642 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 6643 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 6644 6645 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 6646 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 6647 6648 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 6649 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 6650 6651 fd_ctr_base = hw->fd_ctr_base; 6652 6653 ice_stat_update40(hw, 6654 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 6655 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 6656 &cur_ps->fd_sb_match); 6657 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 6658 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 6659 6660 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 6661 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 6662 6663 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 6664 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 6665 6666 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 6667 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 6668 6669 ice_update_dcb_stats(pf); 6670 6671 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 6672 &prev_ps->crc_errors, &cur_ps->crc_errors); 6673 6674 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 6675 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 6676 6677 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 6678 &prev_ps->mac_local_faults, 6679 &cur_ps->mac_local_faults); 6680 6681 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 6682 &prev_ps->mac_remote_faults, 6683 &cur_ps->mac_remote_faults); 6684 6685 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 6686 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 6687 6688 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 6689 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 6690 6691 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 6692 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 6693 6694 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 6695 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 6696 6697 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 6698 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 6699 6700 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 6701 6702 pf->stat_prev_loaded = true; 6703 } 6704 6705 /** 6706 * ice_get_stats64 - get statistics for network device structure 6707 * @netdev: network interface device structure 6708 * @stats: main device statistics structure 6709 */ 6710 static 6711 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 6712 { 6713 struct ice_netdev_priv *np = netdev_priv(netdev); 6714 struct rtnl_link_stats64 *vsi_stats; 6715 struct ice_vsi *vsi = np->vsi; 6716 6717 vsi_stats = &vsi->net_stats; 6718 6719 if (!vsi->num_txq || !vsi->num_rxq) 6720 return; 6721 6722 /* netdev packet/byte stats come from ring counter. These are obtained 6723 * by summing up ring counters (done by ice_update_vsi_ring_stats). 6724 * But, only call the update routine and read the registers if VSI is 6725 * not down. 6726 */ 6727 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 6728 ice_update_vsi_ring_stats(vsi); 6729 stats->tx_packets = vsi_stats->tx_packets; 6730 stats->tx_bytes = vsi_stats->tx_bytes; 6731 stats->rx_packets = vsi_stats->rx_packets; 6732 stats->rx_bytes = vsi_stats->rx_bytes; 6733 6734 /* The rest of the stats can be read from the hardware but instead we 6735 * just return values that the watchdog task has already obtained from 6736 * the hardware. 6737 */ 6738 stats->multicast = vsi_stats->multicast; 6739 stats->tx_errors = vsi_stats->tx_errors; 6740 stats->tx_dropped = vsi_stats->tx_dropped; 6741 stats->rx_errors = vsi_stats->rx_errors; 6742 stats->rx_dropped = vsi_stats->rx_dropped; 6743 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 6744 stats->rx_length_errors = vsi_stats->rx_length_errors; 6745 } 6746 6747 /** 6748 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 6749 * @vsi: VSI having NAPI disabled 6750 */ 6751 static void ice_napi_disable_all(struct ice_vsi *vsi) 6752 { 6753 int q_idx; 6754 6755 if (!vsi->netdev) 6756 return; 6757 6758 ice_for_each_q_vector(vsi, q_idx) { 6759 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6760 6761 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6762 napi_disable(&q_vector->napi); 6763 6764 cancel_work_sync(&q_vector->tx.dim.work); 6765 cancel_work_sync(&q_vector->rx.dim.work); 6766 } 6767 } 6768 6769 /** 6770 * ice_down - Shutdown the connection 6771 * @vsi: The VSI being stopped 6772 * 6773 * Caller of this function is expected to set the vsi->state ICE_DOWN bit 6774 */ 6775 int ice_down(struct ice_vsi *vsi) 6776 { 6777 int i, tx_err, rx_err, vlan_err = 0; 6778 6779 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 6780 6781 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6782 vlan_err = ice_vsi_del_vlan_zero(vsi); 6783 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); 6784 netif_carrier_off(vsi->netdev); 6785 netif_tx_disable(vsi->netdev); 6786 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 6787 ice_eswitch_stop_all_tx_queues(vsi->back); 6788 } 6789 6790 ice_vsi_dis_irq(vsi); 6791 6792 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 6793 if (tx_err) 6794 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 6795 vsi->vsi_num, tx_err); 6796 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 6797 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 6798 if (tx_err) 6799 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 6800 vsi->vsi_num, tx_err); 6801 } 6802 6803 rx_err = ice_vsi_stop_all_rx_rings(vsi); 6804 if (rx_err) 6805 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 6806 vsi->vsi_num, rx_err); 6807 6808 ice_napi_disable_all(vsi); 6809 6810 ice_for_each_txq(vsi, i) 6811 ice_clean_tx_ring(vsi->tx_rings[i]); 6812 6813 ice_for_each_rxq(vsi, i) 6814 ice_clean_rx_ring(vsi->rx_rings[i]); 6815 6816 if (tx_err || rx_err || vlan_err) { 6817 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 6818 vsi->vsi_num, vsi->vsw->sw_id); 6819 return -EIO; 6820 } 6821 6822 return 0; 6823 } 6824 6825 /** 6826 * ice_down_up - shutdown the VSI connection and bring it up 6827 * @vsi: the VSI to be reconnected 6828 */ 6829 int ice_down_up(struct ice_vsi *vsi) 6830 { 6831 int ret; 6832 6833 /* if DOWN already set, nothing to do */ 6834 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 6835 return 0; 6836 6837 ret = ice_down(vsi); 6838 if (ret) 6839 return ret; 6840 6841 ret = ice_up(vsi); 6842 if (ret) { 6843 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); 6844 return ret; 6845 } 6846 6847 return 0; 6848 } 6849 6850 /** 6851 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 6852 * @vsi: VSI having resources allocated 6853 * 6854 * Return 0 on success, negative on failure 6855 */ 6856 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 6857 { 6858 int i, err = 0; 6859 6860 if (!vsi->num_txq) { 6861 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 6862 vsi->vsi_num); 6863 return -EINVAL; 6864 } 6865 6866 ice_for_each_txq(vsi, i) { 6867 struct ice_tx_ring *ring = vsi->tx_rings[i]; 6868 6869 if (!ring) 6870 return -EINVAL; 6871 6872 if (vsi->netdev) 6873 ring->netdev = vsi->netdev; 6874 err = ice_setup_tx_ring(ring); 6875 if (err) 6876 break; 6877 } 6878 6879 return err; 6880 } 6881 6882 /** 6883 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 6884 * @vsi: VSI having resources allocated 6885 * 6886 * Return 0 on success, negative on failure 6887 */ 6888 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 6889 { 6890 int i, err = 0; 6891 6892 if (!vsi->num_rxq) { 6893 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 6894 vsi->vsi_num); 6895 return -EINVAL; 6896 } 6897 6898 ice_for_each_rxq(vsi, i) { 6899 struct ice_rx_ring *ring = vsi->rx_rings[i]; 6900 6901 if (!ring) 6902 return -EINVAL; 6903 6904 if (vsi->netdev) 6905 ring->netdev = vsi->netdev; 6906 err = ice_setup_rx_ring(ring); 6907 if (err) 6908 break; 6909 } 6910 6911 return err; 6912 } 6913 6914 /** 6915 * ice_vsi_open_ctrl - open control VSI for use 6916 * @vsi: the VSI to open 6917 * 6918 * Initialization of the Control VSI 6919 * 6920 * Returns 0 on success, negative value on error 6921 */ 6922 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 6923 { 6924 char int_name[ICE_INT_NAME_STR_LEN]; 6925 struct ice_pf *pf = vsi->back; 6926 struct device *dev; 6927 int err; 6928 6929 dev = ice_pf_to_dev(pf); 6930 /* allocate descriptors */ 6931 err = ice_vsi_setup_tx_rings(vsi); 6932 if (err) 6933 goto err_setup_tx; 6934 6935 err = ice_vsi_setup_rx_rings(vsi); 6936 if (err) 6937 goto err_setup_rx; 6938 6939 err = ice_vsi_cfg(vsi); 6940 if (err) 6941 goto err_setup_rx; 6942 6943 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 6944 dev_driver_string(dev), dev_name(dev)); 6945 err = ice_vsi_req_irq_msix(vsi, int_name); 6946 if (err) 6947 goto err_setup_rx; 6948 6949 ice_vsi_cfg_msix(vsi); 6950 6951 err = ice_vsi_start_all_rx_rings(vsi); 6952 if (err) 6953 goto err_up_complete; 6954 6955 clear_bit(ICE_VSI_DOWN, vsi->state); 6956 ice_vsi_ena_irq(vsi); 6957 6958 return 0; 6959 6960 err_up_complete: 6961 ice_down(vsi); 6962 err_setup_rx: 6963 ice_vsi_free_rx_rings(vsi); 6964 err_setup_tx: 6965 ice_vsi_free_tx_rings(vsi); 6966 6967 return err; 6968 } 6969 6970 /** 6971 * ice_vsi_open - Called when a network interface is made active 6972 * @vsi: the VSI to open 6973 * 6974 * Initialization of the VSI 6975 * 6976 * Returns 0 on success, negative value on error 6977 */ 6978 int ice_vsi_open(struct ice_vsi *vsi) 6979 { 6980 char int_name[ICE_INT_NAME_STR_LEN]; 6981 struct ice_pf *pf = vsi->back; 6982 int err; 6983 6984 /* allocate descriptors */ 6985 err = ice_vsi_setup_tx_rings(vsi); 6986 if (err) 6987 goto err_setup_tx; 6988 6989 err = ice_vsi_setup_rx_rings(vsi); 6990 if (err) 6991 goto err_setup_rx; 6992 6993 err = ice_vsi_cfg(vsi); 6994 if (err) 6995 goto err_setup_rx; 6996 6997 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 6998 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 6999 err = ice_vsi_req_irq_msix(vsi, int_name); 7000 if (err) 7001 goto err_setup_rx; 7002 7003 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 7004 7005 if (vsi->type == ICE_VSI_PF) { 7006 /* Notify the stack of the actual queue counts. */ 7007 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 7008 if (err) 7009 goto err_set_qs; 7010 7011 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 7012 if (err) 7013 goto err_set_qs; 7014 } 7015 7016 err = ice_up_complete(vsi); 7017 if (err) 7018 goto err_up_complete; 7019 7020 return 0; 7021 7022 err_up_complete: 7023 ice_down(vsi); 7024 err_set_qs: 7025 ice_vsi_free_irq(vsi); 7026 err_setup_rx: 7027 ice_vsi_free_rx_rings(vsi); 7028 err_setup_tx: 7029 ice_vsi_free_tx_rings(vsi); 7030 7031 return err; 7032 } 7033 7034 /** 7035 * ice_vsi_release_all - Delete all VSIs 7036 * @pf: PF from which all VSIs are being removed 7037 */ 7038 static void ice_vsi_release_all(struct ice_pf *pf) 7039 { 7040 int err, i; 7041 7042 if (!pf->vsi) 7043 return; 7044 7045 ice_for_each_vsi(pf, i) { 7046 if (!pf->vsi[i]) 7047 continue; 7048 7049 if (pf->vsi[i]->type == ICE_VSI_CHNL) 7050 continue; 7051 7052 err = ice_vsi_release(pf->vsi[i]); 7053 if (err) 7054 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 7055 i, err, pf->vsi[i]->vsi_num); 7056 } 7057 } 7058 7059 /** 7060 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 7061 * @pf: pointer to the PF instance 7062 * @type: VSI type to rebuild 7063 * 7064 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 7065 */ 7066 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 7067 { 7068 struct device *dev = ice_pf_to_dev(pf); 7069 int i, err; 7070 7071 ice_for_each_vsi(pf, i) { 7072 struct ice_vsi *vsi = pf->vsi[i]; 7073 7074 if (!vsi || vsi->type != type) 7075 continue; 7076 7077 /* rebuild the VSI */ 7078 err = ice_vsi_rebuild(vsi, true); 7079 if (err) { 7080 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 7081 err, vsi->idx, ice_vsi_type_str(type)); 7082 return err; 7083 } 7084 7085 /* replay filters for the VSI */ 7086 err = ice_replay_vsi(&pf->hw, vsi->idx); 7087 if (err) { 7088 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 7089 err, vsi->idx, ice_vsi_type_str(type)); 7090 return err; 7091 } 7092 7093 /* Re-map HW VSI number, using VSI handle that has been 7094 * previously validated in ice_replay_vsi() call above 7095 */ 7096 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 7097 7098 /* enable the VSI */ 7099 err = ice_ena_vsi(vsi, false); 7100 if (err) { 7101 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 7102 err, vsi->idx, ice_vsi_type_str(type)); 7103 return err; 7104 } 7105 7106 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 7107 ice_vsi_type_str(type)); 7108 } 7109 7110 return 0; 7111 } 7112 7113 /** 7114 * ice_update_pf_netdev_link - Update PF netdev link status 7115 * @pf: pointer to the PF instance 7116 */ 7117 static void ice_update_pf_netdev_link(struct ice_pf *pf) 7118 { 7119 bool link_up; 7120 int i; 7121 7122 ice_for_each_vsi(pf, i) { 7123 struct ice_vsi *vsi = pf->vsi[i]; 7124 7125 if (!vsi || vsi->type != ICE_VSI_PF) 7126 return; 7127 7128 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 7129 if (link_up) { 7130 netif_carrier_on(pf->vsi[i]->netdev); 7131 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 7132 } else { 7133 netif_carrier_off(pf->vsi[i]->netdev); 7134 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 7135 } 7136 } 7137 } 7138 7139 /** 7140 * ice_rebuild - rebuild after reset 7141 * @pf: PF to rebuild 7142 * @reset_type: type of reset 7143 * 7144 * Do not rebuild VF VSI in this flow because that is already handled via 7145 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 7146 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 7147 * to reset/rebuild all the VF VSI twice. 7148 */ 7149 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 7150 { 7151 struct device *dev = ice_pf_to_dev(pf); 7152 struct ice_hw *hw = &pf->hw; 7153 bool dvm; 7154 int err; 7155 7156 if (test_bit(ICE_DOWN, pf->state)) 7157 goto clear_recovery; 7158 7159 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 7160 7161 #define ICE_EMP_RESET_SLEEP_MS 5000 7162 if (reset_type == ICE_RESET_EMPR) { 7163 /* If an EMP reset has occurred, any previously pending flash 7164 * update will have completed. We no longer know whether or 7165 * not the NVM update EMP reset is restricted. 7166 */ 7167 pf->fw_emp_reset_disabled = false; 7168 7169 msleep(ICE_EMP_RESET_SLEEP_MS); 7170 } 7171 7172 err = ice_init_all_ctrlq(hw); 7173 if (err) { 7174 dev_err(dev, "control queues init failed %d\n", err); 7175 goto err_init_ctrlq; 7176 } 7177 7178 /* if DDP was previously loaded successfully */ 7179 if (!ice_is_safe_mode(pf)) { 7180 /* reload the SW DB of filter tables */ 7181 if (reset_type == ICE_RESET_PFR) 7182 ice_fill_blk_tbls(hw); 7183 else 7184 /* Reload DDP Package after CORER/GLOBR reset */ 7185 ice_load_pkg(NULL, pf); 7186 } 7187 7188 err = ice_clear_pf_cfg(hw); 7189 if (err) { 7190 dev_err(dev, "clear PF configuration failed %d\n", err); 7191 goto err_init_ctrlq; 7192 } 7193 7194 ice_clear_pxe_mode(hw); 7195 7196 err = ice_init_nvm(hw); 7197 if (err) { 7198 dev_err(dev, "ice_init_nvm failed %d\n", err); 7199 goto err_init_ctrlq; 7200 } 7201 7202 err = ice_get_caps(hw); 7203 if (err) { 7204 dev_err(dev, "ice_get_caps failed %d\n", err); 7205 goto err_init_ctrlq; 7206 } 7207 7208 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 7209 if (err) { 7210 dev_err(dev, "set_mac_cfg failed %d\n", err); 7211 goto err_init_ctrlq; 7212 } 7213 7214 dvm = ice_is_dvm_ena(hw); 7215 7216 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 7217 if (err) 7218 goto err_init_ctrlq; 7219 7220 err = ice_sched_init_port(hw->port_info); 7221 if (err) 7222 goto err_sched_init_port; 7223 7224 /* start misc vector */ 7225 err = ice_req_irq_msix_misc(pf); 7226 if (err) { 7227 dev_err(dev, "misc vector setup failed: %d\n", err); 7228 goto err_sched_init_port; 7229 } 7230 7231 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7232 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 7233 if (!rd32(hw, PFQF_FD_SIZE)) { 7234 u16 unused, guar, b_effort; 7235 7236 guar = hw->func_caps.fd_fltr_guar; 7237 b_effort = hw->func_caps.fd_fltr_best_effort; 7238 7239 /* force guaranteed filter pool for PF */ 7240 ice_alloc_fd_guar_item(hw, &unused, guar); 7241 /* force shared filter pool for PF */ 7242 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 7243 } 7244 } 7245 7246 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 7247 ice_dcb_rebuild(pf); 7248 7249 /* If the PF previously had enabled PTP, PTP init needs to happen before 7250 * the VSI rebuild. If not, this causes the PTP link status events to 7251 * fail. 7252 */ 7253 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 7254 ice_ptp_reset(pf); 7255 7256 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 7257 ice_gnss_init(pf); 7258 7259 /* rebuild PF VSI */ 7260 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 7261 if (err) { 7262 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 7263 goto err_vsi_rebuild; 7264 } 7265 7266 /* configure PTP timestamping after VSI rebuild */ 7267 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 7268 ice_ptp_cfg_timestamp(pf, false); 7269 7270 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); 7271 if (err) { 7272 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); 7273 goto err_vsi_rebuild; 7274 } 7275 7276 if (reset_type == ICE_RESET_PFR) { 7277 err = ice_rebuild_channels(pf); 7278 if (err) { 7279 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 7280 err); 7281 goto err_vsi_rebuild; 7282 } 7283 } 7284 7285 /* If Flow Director is active */ 7286 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7287 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 7288 if (err) { 7289 dev_err(dev, "control VSI rebuild failed: %d\n", err); 7290 goto err_vsi_rebuild; 7291 } 7292 7293 /* replay HW Flow Director recipes */ 7294 if (hw->fdir_prof) 7295 ice_fdir_replay_flows(hw); 7296 7297 /* replay Flow Director filters */ 7298 ice_fdir_replay_fltrs(pf); 7299 7300 ice_rebuild_arfs(pf); 7301 } 7302 7303 ice_update_pf_netdev_link(pf); 7304 7305 /* tell the firmware we are up */ 7306 err = ice_send_version(pf); 7307 if (err) { 7308 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 7309 err); 7310 goto err_vsi_rebuild; 7311 } 7312 7313 ice_replay_post(hw); 7314 7315 /* if we get here, reset flow is successful */ 7316 clear_bit(ICE_RESET_FAILED, pf->state); 7317 7318 ice_plug_aux_dev(pf); 7319 return; 7320 7321 err_vsi_rebuild: 7322 err_sched_init_port: 7323 ice_sched_cleanup_all(hw); 7324 err_init_ctrlq: 7325 ice_shutdown_all_ctrlq(hw); 7326 set_bit(ICE_RESET_FAILED, pf->state); 7327 clear_recovery: 7328 /* set this bit in PF state to control service task scheduling */ 7329 set_bit(ICE_NEEDS_RESTART, pf->state); 7330 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 7331 } 7332 7333 /** 7334 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 7335 * @vsi: Pointer to VSI structure 7336 */ 7337 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 7338 { 7339 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 7340 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 7341 else 7342 return ICE_RXBUF_3072; 7343 } 7344 7345 /** 7346 * ice_change_mtu - NDO callback to change the MTU 7347 * @netdev: network interface device structure 7348 * @new_mtu: new value for maximum frame size 7349 * 7350 * Returns 0 on success, negative on failure 7351 */ 7352 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 7353 { 7354 struct ice_netdev_priv *np = netdev_priv(netdev); 7355 struct ice_vsi *vsi = np->vsi; 7356 struct ice_pf *pf = vsi->back; 7357 u8 count = 0; 7358 int err = 0; 7359 7360 if (new_mtu == (int)netdev->mtu) { 7361 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 7362 return 0; 7363 } 7364 7365 if (ice_is_xdp_ena_vsi(vsi)) { 7366 int frame_size = ice_max_xdp_frame_size(vsi); 7367 7368 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 7369 netdev_err(netdev, "max MTU for XDP usage is %d\n", 7370 frame_size - ICE_ETH_PKT_HDR_PAD); 7371 return -EINVAL; 7372 } 7373 } 7374 7375 /* if a reset is in progress, wait for some time for it to complete */ 7376 do { 7377 if (ice_is_reset_in_progress(pf->state)) { 7378 count++; 7379 usleep_range(1000, 2000); 7380 } else { 7381 break; 7382 } 7383 7384 } while (count < 100); 7385 7386 if (count == 100) { 7387 netdev_err(netdev, "can't change MTU. Device is busy\n"); 7388 return -EBUSY; 7389 } 7390 7391 netdev->mtu = (unsigned int)new_mtu; 7392 7393 /* if VSI is up, bring it down and then back up */ 7394 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 7395 err = ice_down(vsi); 7396 if (err) { 7397 netdev_err(netdev, "change MTU if_down err %d\n", err); 7398 return err; 7399 } 7400 7401 err = ice_up(vsi); 7402 if (err) { 7403 netdev_err(netdev, "change MTU if_up err %d\n", err); 7404 return err; 7405 } 7406 } 7407 7408 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 7409 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); 7410 7411 return err; 7412 } 7413 7414 /** 7415 * ice_eth_ioctl - Access the hwtstamp interface 7416 * @netdev: network interface device structure 7417 * @ifr: interface request data 7418 * @cmd: ioctl command 7419 */ 7420 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 7421 { 7422 struct ice_netdev_priv *np = netdev_priv(netdev); 7423 struct ice_pf *pf = np->vsi->back; 7424 7425 switch (cmd) { 7426 case SIOCGHWTSTAMP: 7427 return ice_ptp_get_ts_config(pf, ifr); 7428 case SIOCSHWTSTAMP: 7429 return ice_ptp_set_ts_config(pf, ifr); 7430 default: 7431 return -EOPNOTSUPP; 7432 } 7433 } 7434 7435 /** 7436 * ice_aq_str - convert AQ err code to a string 7437 * @aq_err: the AQ error code to convert 7438 */ 7439 const char *ice_aq_str(enum ice_aq_err aq_err) 7440 { 7441 switch (aq_err) { 7442 case ICE_AQ_RC_OK: 7443 return "OK"; 7444 case ICE_AQ_RC_EPERM: 7445 return "ICE_AQ_RC_EPERM"; 7446 case ICE_AQ_RC_ENOENT: 7447 return "ICE_AQ_RC_ENOENT"; 7448 case ICE_AQ_RC_ENOMEM: 7449 return "ICE_AQ_RC_ENOMEM"; 7450 case ICE_AQ_RC_EBUSY: 7451 return "ICE_AQ_RC_EBUSY"; 7452 case ICE_AQ_RC_EEXIST: 7453 return "ICE_AQ_RC_EEXIST"; 7454 case ICE_AQ_RC_EINVAL: 7455 return "ICE_AQ_RC_EINVAL"; 7456 case ICE_AQ_RC_ENOSPC: 7457 return "ICE_AQ_RC_ENOSPC"; 7458 case ICE_AQ_RC_ENOSYS: 7459 return "ICE_AQ_RC_ENOSYS"; 7460 case ICE_AQ_RC_EMODE: 7461 return "ICE_AQ_RC_EMODE"; 7462 case ICE_AQ_RC_ENOSEC: 7463 return "ICE_AQ_RC_ENOSEC"; 7464 case ICE_AQ_RC_EBADSIG: 7465 return "ICE_AQ_RC_EBADSIG"; 7466 case ICE_AQ_RC_ESVN: 7467 return "ICE_AQ_RC_ESVN"; 7468 case ICE_AQ_RC_EBADMAN: 7469 return "ICE_AQ_RC_EBADMAN"; 7470 case ICE_AQ_RC_EBADBUF: 7471 return "ICE_AQ_RC_EBADBUF"; 7472 } 7473 7474 return "ICE_AQ_RC_UNKNOWN"; 7475 } 7476 7477 /** 7478 * ice_set_rss_lut - Set RSS LUT 7479 * @vsi: Pointer to VSI structure 7480 * @lut: Lookup table 7481 * @lut_size: Lookup table size 7482 * 7483 * Returns 0 on success, negative on failure 7484 */ 7485 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7486 { 7487 struct ice_aq_get_set_rss_lut_params params = {}; 7488 struct ice_hw *hw = &vsi->back->hw; 7489 int status; 7490 7491 if (!lut) 7492 return -EINVAL; 7493 7494 params.vsi_handle = vsi->idx; 7495 params.lut_size = lut_size; 7496 params.lut_type = vsi->rss_lut_type; 7497 params.lut = lut; 7498 7499 status = ice_aq_set_rss_lut(hw, ¶ms); 7500 if (status) 7501 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 7502 status, ice_aq_str(hw->adminq.sq_last_status)); 7503 7504 return status; 7505 } 7506 7507 /** 7508 * ice_set_rss_key - Set RSS key 7509 * @vsi: Pointer to the VSI structure 7510 * @seed: RSS hash seed 7511 * 7512 * Returns 0 on success, negative on failure 7513 */ 7514 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 7515 { 7516 struct ice_hw *hw = &vsi->back->hw; 7517 int status; 7518 7519 if (!seed) 7520 return -EINVAL; 7521 7522 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7523 if (status) 7524 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 7525 status, ice_aq_str(hw->adminq.sq_last_status)); 7526 7527 return status; 7528 } 7529 7530 /** 7531 * ice_get_rss_lut - Get RSS LUT 7532 * @vsi: Pointer to VSI structure 7533 * @lut: Buffer to store the lookup table entries 7534 * @lut_size: Size of buffer to store the lookup table entries 7535 * 7536 * Returns 0 on success, negative on failure 7537 */ 7538 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7539 { 7540 struct ice_aq_get_set_rss_lut_params params = {}; 7541 struct ice_hw *hw = &vsi->back->hw; 7542 int status; 7543 7544 if (!lut) 7545 return -EINVAL; 7546 7547 params.vsi_handle = vsi->idx; 7548 params.lut_size = lut_size; 7549 params.lut_type = vsi->rss_lut_type; 7550 params.lut = lut; 7551 7552 status = ice_aq_get_rss_lut(hw, ¶ms); 7553 if (status) 7554 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 7555 status, ice_aq_str(hw->adminq.sq_last_status)); 7556 7557 return status; 7558 } 7559 7560 /** 7561 * ice_get_rss_key - Get RSS key 7562 * @vsi: Pointer to VSI structure 7563 * @seed: Buffer to store the key in 7564 * 7565 * Returns 0 on success, negative on failure 7566 */ 7567 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 7568 { 7569 struct ice_hw *hw = &vsi->back->hw; 7570 int status; 7571 7572 if (!seed) 7573 return -EINVAL; 7574 7575 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7576 if (status) 7577 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 7578 status, ice_aq_str(hw->adminq.sq_last_status)); 7579 7580 return status; 7581 } 7582 7583 /** 7584 * ice_bridge_getlink - Get the hardware bridge mode 7585 * @skb: skb buff 7586 * @pid: process ID 7587 * @seq: RTNL message seq 7588 * @dev: the netdev being configured 7589 * @filter_mask: filter mask passed in 7590 * @nlflags: netlink flags passed in 7591 * 7592 * Return the bridge mode (VEB/VEPA) 7593 */ 7594 static int 7595 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7596 struct net_device *dev, u32 filter_mask, int nlflags) 7597 { 7598 struct ice_netdev_priv *np = netdev_priv(dev); 7599 struct ice_vsi *vsi = np->vsi; 7600 struct ice_pf *pf = vsi->back; 7601 u16 bmode; 7602 7603 bmode = pf->first_sw->bridge_mode; 7604 7605 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 7606 filter_mask, NULL); 7607 } 7608 7609 /** 7610 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 7611 * @vsi: Pointer to VSI structure 7612 * @bmode: Hardware bridge mode (VEB/VEPA) 7613 * 7614 * Returns 0 on success, negative on failure 7615 */ 7616 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 7617 { 7618 struct ice_aqc_vsi_props *vsi_props; 7619 struct ice_hw *hw = &vsi->back->hw; 7620 struct ice_vsi_ctx *ctxt; 7621 int ret; 7622 7623 vsi_props = &vsi->info; 7624 7625 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 7626 if (!ctxt) 7627 return -ENOMEM; 7628 7629 ctxt->info = vsi->info; 7630 7631 if (bmode == BRIDGE_MODE_VEB) 7632 /* change from VEPA to VEB mode */ 7633 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7634 else 7635 /* change from VEB to VEPA mode */ 7636 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7637 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 7638 7639 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 7640 if (ret) { 7641 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 7642 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); 7643 goto out; 7644 } 7645 /* Update sw flags for book keeping */ 7646 vsi_props->sw_flags = ctxt->info.sw_flags; 7647 7648 out: 7649 kfree(ctxt); 7650 return ret; 7651 } 7652 7653 /** 7654 * ice_bridge_setlink - Set the hardware bridge mode 7655 * @dev: the netdev being configured 7656 * @nlh: RTNL message 7657 * @flags: bridge setlink flags 7658 * @extack: netlink extended ack 7659 * 7660 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 7661 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 7662 * not already set for all VSIs connected to this switch. And also update the 7663 * unicast switch filter rules for the corresponding switch of the netdev. 7664 */ 7665 static int 7666 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7667 u16 __always_unused flags, 7668 struct netlink_ext_ack __always_unused *extack) 7669 { 7670 struct ice_netdev_priv *np = netdev_priv(dev); 7671 struct ice_pf *pf = np->vsi->back; 7672 struct nlattr *attr, *br_spec; 7673 struct ice_hw *hw = &pf->hw; 7674 struct ice_sw *pf_sw; 7675 int rem, v, err = 0; 7676 7677 pf_sw = pf->first_sw; 7678 /* find the attribute in the netlink message */ 7679 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7680 7681 nla_for_each_nested(attr, br_spec, rem) { 7682 __u16 mode; 7683 7684 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7685 continue; 7686 mode = nla_get_u16(attr); 7687 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 7688 return -EINVAL; 7689 /* Continue if bridge mode is not being flipped */ 7690 if (mode == pf_sw->bridge_mode) 7691 continue; 7692 /* Iterates through the PF VSI list and update the loopback 7693 * mode of the VSI 7694 */ 7695 ice_for_each_vsi(pf, v) { 7696 if (!pf->vsi[v]) 7697 continue; 7698 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 7699 if (err) 7700 return err; 7701 } 7702 7703 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 7704 /* Update the unicast switch filter rules for the corresponding 7705 * switch of the netdev 7706 */ 7707 err = ice_update_sw_rule_bridge_mode(hw); 7708 if (err) { 7709 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 7710 mode, err, 7711 ice_aq_str(hw->adminq.sq_last_status)); 7712 /* revert hw->evb_veb */ 7713 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 7714 return err; 7715 } 7716 7717 pf_sw->bridge_mode = mode; 7718 } 7719 7720 return 0; 7721 } 7722 7723 /** 7724 * ice_tx_timeout - Respond to a Tx Hang 7725 * @netdev: network interface device structure 7726 * @txqueue: Tx queue 7727 */ 7728 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 7729 { 7730 struct ice_netdev_priv *np = netdev_priv(netdev); 7731 struct ice_tx_ring *tx_ring = NULL; 7732 struct ice_vsi *vsi = np->vsi; 7733 struct ice_pf *pf = vsi->back; 7734 u32 i; 7735 7736 pf->tx_timeout_count++; 7737 7738 /* Check if PFC is enabled for the TC to which the queue belongs 7739 * to. If yes then Tx timeout is not caused by a hung queue, no 7740 * need to reset and rebuild 7741 */ 7742 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 7743 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 7744 txqueue); 7745 return; 7746 } 7747 7748 /* now that we have an index, find the tx_ring struct */ 7749 ice_for_each_txq(vsi, i) 7750 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 7751 if (txqueue == vsi->tx_rings[i]->q_index) { 7752 tx_ring = vsi->tx_rings[i]; 7753 break; 7754 } 7755 7756 /* Reset recovery level if enough time has elapsed after last timeout. 7757 * Also ensure no new reset action happens before next timeout period. 7758 */ 7759 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 7760 pf->tx_timeout_recovery_level = 1; 7761 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 7762 netdev->watchdog_timeo))) 7763 return; 7764 7765 if (tx_ring) { 7766 struct ice_hw *hw = &pf->hw; 7767 u32 head, val = 0; 7768 7769 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 7770 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 7771 /* Read interrupt register */ 7772 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 7773 7774 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 7775 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 7776 head, tx_ring->next_to_use, val); 7777 } 7778 7779 pf->tx_timeout_last_recovery = jiffies; 7780 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 7781 pf->tx_timeout_recovery_level, txqueue); 7782 7783 switch (pf->tx_timeout_recovery_level) { 7784 case 1: 7785 set_bit(ICE_PFR_REQ, pf->state); 7786 break; 7787 case 2: 7788 set_bit(ICE_CORER_REQ, pf->state); 7789 break; 7790 case 3: 7791 set_bit(ICE_GLOBR_REQ, pf->state); 7792 break; 7793 default: 7794 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 7795 set_bit(ICE_DOWN, pf->state); 7796 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 7797 set_bit(ICE_SERVICE_DIS, pf->state); 7798 break; 7799 } 7800 7801 ice_service_task_schedule(pf); 7802 pf->tx_timeout_recovery_level++; 7803 } 7804 7805 /** 7806 * ice_setup_tc_cls_flower - flower classifier offloads 7807 * @np: net device to configure 7808 * @filter_dev: device on which filter is added 7809 * @cls_flower: offload data 7810 */ 7811 static int 7812 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 7813 struct net_device *filter_dev, 7814 struct flow_cls_offload *cls_flower) 7815 { 7816 struct ice_vsi *vsi = np->vsi; 7817 7818 if (cls_flower->common.chain_index) 7819 return -EOPNOTSUPP; 7820 7821 switch (cls_flower->command) { 7822 case FLOW_CLS_REPLACE: 7823 return ice_add_cls_flower(filter_dev, vsi, cls_flower); 7824 case FLOW_CLS_DESTROY: 7825 return ice_del_cls_flower(vsi, cls_flower); 7826 default: 7827 return -EINVAL; 7828 } 7829 } 7830 7831 /** 7832 * ice_setup_tc_block_cb - callback handler registered for TC block 7833 * @type: TC SETUP type 7834 * @type_data: TC flower offload data that contains user input 7835 * @cb_priv: netdev private data 7836 */ 7837 static int 7838 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 7839 { 7840 struct ice_netdev_priv *np = cb_priv; 7841 7842 switch (type) { 7843 case TC_SETUP_CLSFLOWER: 7844 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 7845 type_data); 7846 default: 7847 return -EOPNOTSUPP; 7848 } 7849 } 7850 7851 /** 7852 * ice_validate_mqprio_qopt - Validate TCF input parameters 7853 * @vsi: Pointer to VSI 7854 * @mqprio_qopt: input parameters for mqprio queue configuration 7855 * 7856 * This function validates MQPRIO params, such as qcount (power of 2 wherever 7857 * needed), and make sure user doesn't specify qcount and BW rate limit 7858 * for TCs, which are more than "num_tc" 7859 */ 7860 static int 7861 ice_validate_mqprio_qopt(struct ice_vsi *vsi, 7862 struct tc_mqprio_qopt_offload *mqprio_qopt) 7863 { 7864 u64 sum_max_rate = 0, sum_min_rate = 0; 7865 int non_power_of_2_qcount = 0; 7866 struct ice_pf *pf = vsi->back; 7867 int max_rss_q_cnt = 0; 7868 struct device *dev; 7869 int i, speed; 7870 u8 num_tc; 7871 7872 if (vsi->type != ICE_VSI_PF) 7873 return -EINVAL; 7874 7875 if (mqprio_qopt->qopt.offset[0] != 0 || 7876 mqprio_qopt->qopt.num_tc < 1 || 7877 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 7878 return -EINVAL; 7879 7880 dev = ice_pf_to_dev(pf); 7881 vsi->ch_rss_size = 0; 7882 num_tc = mqprio_qopt->qopt.num_tc; 7883 7884 for (i = 0; num_tc; i++) { 7885 int qcount = mqprio_qopt->qopt.count[i]; 7886 u64 max_rate, min_rate, rem; 7887 7888 if (!qcount) 7889 return -EINVAL; 7890 7891 if (is_power_of_2(qcount)) { 7892 if (non_power_of_2_qcount && 7893 qcount > non_power_of_2_qcount) { 7894 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 7895 qcount, non_power_of_2_qcount); 7896 return -EINVAL; 7897 } 7898 if (qcount > max_rss_q_cnt) 7899 max_rss_q_cnt = qcount; 7900 } else { 7901 if (non_power_of_2_qcount && 7902 qcount != non_power_of_2_qcount) { 7903 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 7904 qcount, non_power_of_2_qcount); 7905 return -EINVAL; 7906 } 7907 if (qcount < max_rss_q_cnt) { 7908 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 7909 qcount, max_rss_q_cnt); 7910 return -EINVAL; 7911 } 7912 max_rss_q_cnt = qcount; 7913 non_power_of_2_qcount = qcount; 7914 } 7915 7916 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 7917 * converts the bandwidth rate limit into Bytes/s when 7918 * passing it down to the driver. So convert input bandwidth 7919 * from Bytes/s to Kbps 7920 */ 7921 max_rate = mqprio_qopt->max_rate[i]; 7922 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 7923 sum_max_rate += max_rate; 7924 7925 /* min_rate is minimum guaranteed rate and it can't be zero */ 7926 min_rate = mqprio_qopt->min_rate[i]; 7927 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 7928 sum_min_rate += min_rate; 7929 7930 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 7931 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 7932 min_rate, ICE_MIN_BW_LIMIT); 7933 return -EINVAL; 7934 } 7935 7936 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 7937 if (rem) { 7938 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 7939 i, ICE_MIN_BW_LIMIT); 7940 return -EINVAL; 7941 } 7942 7943 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 7944 if (rem) { 7945 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 7946 i, ICE_MIN_BW_LIMIT); 7947 return -EINVAL; 7948 } 7949 7950 /* min_rate can't be more than max_rate, except when max_rate 7951 * is zero (implies max_rate sought is max line rate). In such 7952 * a case min_rate can be more than max. 7953 */ 7954 if (max_rate && min_rate > max_rate) { 7955 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 7956 min_rate, max_rate); 7957 return -EINVAL; 7958 } 7959 7960 if (i >= mqprio_qopt->qopt.num_tc - 1) 7961 break; 7962 if (mqprio_qopt->qopt.offset[i + 1] != 7963 (mqprio_qopt->qopt.offset[i] + qcount)) 7964 return -EINVAL; 7965 } 7966 if (vsi->num_rxq < 7967 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7968 return -EINVAL; 7969 if (vsi->num_txq < 7970 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7971 return -EINVAL; 7972 7973 speed = ice_get_link_speed_kbps(vsi); 7974 if (sum_max_rate && sum_max_rate > (u64)speed) { 7975 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n", 7976 sum_max_rate, speed); 7977 return -EINVAL; 7978 } 7979 if (sum_min_rate && sum_min_rate > (u64)speed) { 7980 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 7981 sum_min_rate, speed); 7982 return -EINVAL; 7983 } 7984 7985 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 7986 vsi->ch_rss_size = max_rss_q_cnt; 7987 7988 return 0; 7989 } 7990 7991 /** 7992 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 7993 * @pf: ptr to PF device 7994 * @vsi: ptr to VSI 7995 */ 7996 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 7997 { 7998 struct device *dev = ice_pf_to_dev(pf); 7999 bool added = false; 8000 struct ice_hw *hw; 8001 int flow; 8002 8003 if (!(vsi->num_gfltr || vsi->num_bfltr)) 8004 return -EINVAL; 8005 8006 hw = &pf->hw; 8007 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 8008 struct ice_fd_hw_prof *prof; 8009 int tun, status; 8010 u64 entry_h; 8011 8012 if (!(hw->fdir_prof && hw->fdir_prof[flow] && 8013 hw->fdir_prof[flow]->cnt)) 8014 continue; 8015 8016 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 8017 enum ice_flow_priority prio; 8018 u64 prof_id; 8019 8020 /* add this VSI to FDir profile for this flow */ 8021 prio = ICE_FLOW_PRIO_NORMAL; 8022 prof = hw->fdir_prof[flow]; 8023 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 8024 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, 8025 prof->vsi_h[0], vsi->idx, 8026 prio, prof->fdir_seg[tun], 8027 &entry_h); 8028 if (status) { 8029 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 8030 vsi->idx, flow); 8031 continue; 8032 } 8033 8034 prof->entry_h[prof->cnt][tun] = entry_h; 8035 } 8036 8037 /* store VSI for filter replay and delete */ 8038 prof->vsi_h[prof->cnt] = vsi->idx; 8039 prof->cnt++; 8040 8041 added = true; 8042 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 8043 flow); 8044 } 8045 8046 if (!added) 8047 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 8048 8049 return 0; 8050 } 8051 8052 /** 8053 * ice_add_channel - add a channel by adding VSI 8054 * @pf: ptr to PF device 8055 * @sw_id: underlying HW switching element ID 8056 * @ch: ptr to channel structure 8057 * 8058 * Add a channel (VSI) using add_vsi and queue_map 8059 */ 8060 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 8061 { 8062 struct device *dev = ice_pf_to_dev(pf); 8063 struct ice_vsi *vsi; 8064 8065 if (ch->type != ICE_VSI_CHNL) { 8066 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 8067 return -EINVAL; 8068 } 8069 8070 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 8071 if (!vsi || vsi->type != ICE_VSI_CHNL) { 8072 dev_err(dev, "create chnl VSI failure\n"); 8073 return -EINVAL; 8074 } 8075 8076 ice_add_vsi_to_fdir(pf, vsi); 8077 8078 ch->sw_id = sw_id; 8079 ch->vsi_num = vsi->vsi_num; 8080 ch->info.mapping_flags = vsi->info.mapping_flags; 8081 ch->ch_vsi = vsi; 8082 /* set the back pointer of channel for newly created VSI */ 8083 vsi->ch = ch; 8084 8085 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 8086 sizeof(vsi->info.q_mapping)); 8087 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 8088 sizeof(vsi->info.tc_mapping)); 8089 8090 return 0; 8091 } 8092 8093 /** 8094 * ice_chnl_cfg_res 8095 * @vsi: the VSI being setup 8096 * @ch: ptr to channel structure 8097 * 8098 * Configure channel specific resources such as rings, vector. 8099 */ 8100 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 8101 { 8102 int i; 8103 8104 for (i = 0; i < ch->num_txq; i++) { 8105 struct ice_q_vector *tx_q_vector, *rx_q_vector; 8106 struct ice_ring_container *rc; 8107 struct ice_tx_ring *tx_ring; 8108 struct ice_rx_ring *rx_ring; 8109 8110 tx_ring = vsi->tx_rings[ch->base_q + i]; 8111 rx_ring = vsi->rx_rings[ch->base_q + i]; 8112 if (!tx_ring || !rx_ring) 8113 continue; 8114 8115 /* setup ring being channel enabled */ 8116 tx_ring->ch = ch; 8117 rx_ring->ch = ch; 8118 8119 /* following code block sets up vector specific attributes */ 8120 tx_q_vector = tx_ring->q_vector; 8121 rx_q_vector = rx_ring->q_vector; 8122 if (!tx_q_vector && !rx_q_vector) 8123 continue; 8124 8125 if (tx_q_vector) { 8126 tx_q_vector->ch = ch; 8127 /* setup Tx and Rx ITR setting if DIM is off */ 8128 rc = &tx_q_vector->tx; 8129 if (!ITR_IS_DYNAMIC(rc)) 8130 ice_write_itr(rc, rc->itr_setting); 8131 } 8132 if (rx_q_vector) { 8133 rx_q_vector->ch = ch; 8134 /* setup Tx and Rx ITR setting if DIM is off */ 8135 rc = &rx_q_vector->rx; 8136 if (!ITR_IS_DYNAMIC(rc)) 8137 ice_write_itr(rc, rc->itr_setting); 8138 } 8139 } 8140 8141 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 8142 * GLINT_ITR register would have written to perform in-context 8143 * update, hence perform flush 8144 */ 8145 if (ch->num_txq || ch->num_rxq) 8146 ice_flush(&vsi->back->hw); 8147 } 8148 8149 /** 8150 * ice_cfg_chnl_all_res - configure channel resources 8151 * @vsi: pte to main_vsi 8152 * @ch: ptr to channel structure 8153 * 8154 * This function configures channel specific resources such as flow-director 8155 * counter index, and other resources such as queues, vectors, ITR settings 8156 */ 8157 static void 8158 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 8159 { 8160 /* configure channel (aka ADQ) resources such as queues, vectors, 8161 * ITR settings for channel specific vectors and anything else 8162 */ 8163 ice_chnl_cfg_res(vsi, ch); 8164 } 8165 8166 /** 8167 * ice_setup_hw_channel - setup new channel 8168 * @pf: ptr to PF device 8169 * @vsi: the VSI being setup 8170 * @ch: ptr to channel structure 8171 * @sw_id: underlying HW switching element ID 8172 * @type: type of channel to be created (VMDq2/VF) 8173 * 8174 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8175 * and configures Tx rings accordingly 8176 */ 8177 static int 8178 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8179 struct ice_channel *ch, u16 sw_id, u8 type) 8180 { 8181 struct device *dev = ice_pf_to_dev(pf); 8182 int ret; 8183 8184 ch->base_q = vsi->next_base_q; 8185 ch->type = type; 8186 8187 ret = ice_add_channel(pf, sw_id, ch); 8188 if (ret) { 8189 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 8190 return ret; 8191 } 8192 8193 /* configure/setup ADQ specific resources */ 8194 ice_cfg_chnl_all_res(vsi, ch); 8195 8196 /* make sure to update the next_base_q so that subsequent channel's 8197 * (aka ADQ) VSI queue map is correct 8198 */ 8199 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 8200 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 8201 ch->num_rxq); 8202 8203 return 0; 8204 } 8205 8206 /** 8207 * ice_setup_channel - setup new channel using uplink element 8208 * @pf: ptr to PF device 8209 * @vsi: the VSI being setup 8210 * @ch: ptr to channel structure 8211 * 8212 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8213 * and uplink switching element 8214 */ 8215 static bool 8216 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8217 struct ice_channel *ch) 8218 { 8219 struct device *dev = ice_pf_to_dev(pf); 8220 u16 sw_id; 8221 int ret; 8222 8223 if (vsi->type != ICE_VSI_PF) { 8224 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 8225 return false; 8226 } 8227 8228 sw_id = pf->first_sw->sw_id; 8229 8230 /* create channel (VSI) */ 8231 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 8232 if (ret) { 8233 dev_err(dev, "failed to setup hw_channel\n"); 8234 return false; 8235 } 8236 dev_dbg(dev, "successfully created channel()\n"); 8237 8238 return ch->ch_vsi ? true : false; 8239 } 8240 8241 /** 8242 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 8243 * @vsi: VSI to be configured 8244 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 8245 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 8246 */ 8247 static int 8248 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 8249 { 8250 int err; 8251 8252 err = ice_set_min_bw_limit(vsi, min_tx_rate); 8253 if (err) 8254 return err; 8255 8256 return ice_set_max_bw_limit(vsi, max_tx_rate); 8257 } 8258 8259 /** 8260 * ice_create_q_channel - function to create channel 8261 * @vsi: VSI to be configured 8262 * @ch: ptr to channel (it contains channel specific params) 8263 * 8264 * This function creates channel (VSI) using num_queues specified by user, 8265 * reconfigs RSS if needed. 8266 */ 8267 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 8268 { 8269 struct ice_pf *pf = vsi->back; 8270 struct device *dev; 8271 8272 if (!ch) 8273 return -EINVAL; 8274 8275 dev = ice_pf_to_dev(pf); 8276 if (!ch->num_txq || !ch->num_rxq) { 8277 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 8278 return -EINVAL; 8279 } 8280 8281 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 8282 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 8283 vsi->cnt_q_avail, ch->num_txq); 8284 return -EINVAL; 8285 } 8286 8287 if (!ice_setup_channel(pf, vsi, ch)) { 8288 dev_info(dev, "Failed to setup channel\n"); 8289 return -EINVAL; 8290 } 8291 /* configure BW rate limit */ 8292 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 8293 int ret; 8294 8295 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 8296 ch->min_tx_rate); 8297 if (ret) 8298 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 8299 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8300 else 8301 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 8302 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8303 } 8304 8305 vsi->cnt_q_avail -= ch->num_txq; 8306 8307 return 0; 8308 } 8309 8310 /** 8311 * ice_rem_all_chnl_fltrs - removes all channel filters 8312 * @pf: ptr to PF, TC-flower based filter are tracked at PF level 8313 * 8314 * Remove all advanced switch filters only if they are channel specific 8315 * tc-flower based filter 8316 */ 8317 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 8318 { 8319 struct ice_tc_flower_fltr *fltr; 8320 struct hlist_node *node; 8321 8322 /* to remove all channel filters, iterate an ordered list of filters */ 8323 hlist_for_each_entry_safe(fltr, node, 8324 &pf->tc_flower_fltr_list, 8325 tc_flower_node) { 8326 struct ice_rule_query_data rule; 8327 int status; 8328 8329 /* for now process only channel specific filters */ 8330 if (!ice_is_chnl_fltr(fltr)) 8331 continue; 8332 8333 rule.rid = fltr->rid; 8334 rule.rule_id = fltr->rule_id; 8335 rule.vsi_handle = fltr->dest_vsi_handle; 8336 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 8337 if (status) { 8338 if (status == -ENOENT) 8339 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 8340 rule.rule_id); 8341 else 8342 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 8343 status); 8344 } else if (fltr->dest_vsi) { 8345 /* update advanced switch filter count */ 8346 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 8347 u32 flags = fltr->flags; 8348 8349 fltr->dest_vsi->num_chnl_fltr--; 8350 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 8351 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 8352 pf->num_dmac_chnl_fltrs--; 8353 } 8354 } 8355 8356 hlist_del(&fltr->tc_flower_node); 8357 kfree(fltr); 8358 } 8359 } 8360 8361 /** 8362 * ice_remove_q_channels - Remove queue channels for the TCs 8363 * @vsi: VSI to be configured 8364 * @rem_fltr: delete advanced switch filter or not 8365 * 8366 * Remove queue channels for the TCs 8367 */ 8368 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 8369 { 8370 struct ice_channel *ch, *ch_tmp; 8371 struct ice_pf *pf = vsi->back; 8372 int i; 8373 8374 /* remove all tc-flower based filter if they are channel filters only */ 8375 if (rem_fltr) 8376 ice_rem_all_chnl_fltrs(pf); 8377 8378 /* remove ntuple filters since queue configuration is being changed */ 8379 if (vsi->netdev->features & NETIF_F_NTUPLE) { 8380 struct ice_hw *hw = &pf->hw; 8381 8382 mutex_lock(&hw->fdir_fltr_lock); 8383 ice_fdir_del_all_fltrs(vsi); 8384 mutex_unlock(&hw->fdir_fltr_lock); 8385 } 8386 8387 /* perform cleanup for channels if they exist */ 8388 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 8389 struct ice_vsi *ch_vsi; 8390 8391 list_del(&ch->list); 8392 ch_vsi = ch->ch_vsi; 8393 if (!ch_vsi) { 8394 kfree(ch); 8395 continue; 8396 } 8397 8398 /* Reset queue contexts */ 8399 for (i = 0; i < ch->num_rxq; i++) { 8400 struct ice_tx_ring *tx_ring; 8401 struct ice_rx_ring *rx_ring; 8402 8403 tx_ring = vsi->tx_rings[ch->base_q + i]; 8404 rx_ring = vsi->rx_rings[ch->base_q + i]; 8405 if (tx_ring) { 8406 tx_ring->ch = NULL; 8407 if (tx_ring->q_vector) 8408 tx_ring->q_vector->ch = NULL; 8409 } 8410 if (rx_ring) { 8411 rx_ring->ch = NULL; 8412 if (rx_ring->q_vector) 8413 rx_ring->q_vector->ch = NULL; 8414 } 8415 } 8416 8417 /* Release FD resources for the channel VSI */ 8418 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 8419 8420 /* clear the VSI from scheduler tree */ 8421 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 8422 8423 /* Delete VSI from FW */ 8424 ice_vsi_delete(ch->ch_vsi); 8425 8426 /* Delete VSI from PF and HW VSI arrays */ 8427 ice_vsi_clear(ch->ch_vsi); 8428 8429 /* free the channel */ 8430 kfree(ch); 8431 } 8432 8433 /* clear the channel VSI map which is stored in main VSI */ 8434 ice_for_each_chnl_tc(i) 8435 vsi->tc_map_vsi[i] = NULL; 8436 8437 /* reset main VSI's all TC information */ 8438 vsi->all_enatc = 0; 8439 vsi->all_numtc = 0; 8440 } 8441 8442 /** 8443 * ice_rebuild_channels - rebuild channel 8444 * @pf: ptr to PF 8445 * 8446 * Recreate channel VSIs and replay filters 8447 */ 8448 static int ice_rebuild_channels(struct ice_pf *pf) 8449 { 8450 struct device *dev = ice_pf_to_dev(pf); 8451 struct ice_vsi *main_vsi; 8452 bool rem_adv_fltr = true; 8453 struct ice_channel *ch; 8454 struct ice_vsi *vsi; 8455 int tc_idx = 1; 8456 int i, err; 8457 8458 main_vsi = ice_get_main_vsi(pf); 8459 if (!main_vsi) 8460 return 0; 8461 8462 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 8463 main_vsi->old_numtc == 1) 8464 return 0; /* nothing to be done */ 8465 8466 /* reconfigure main VSI based on old value of TC and cached values 8467 * for MQPRIO opts 8468 */ 8469 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 8470 if (err) { 8471 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 8472 main_vsi->old_ena_tc, main_vsi->vsi_num); 8473 return err; 8474 } 8475 8476 /* rebuild ADQ VSIs */ 8477 ice_for_each_vsi(pf, i) { 8478 enum ice_vsi_type type; 8479 8480 vsi = pf->vsi[i]; 8481 if (!vsi || vsi->type != ICE_VSI_CHNL) 8482 continue; 8483 8484 type = vsi->type; 8485 8486 /* rebuild ADQ VSI */ 8487 err = ice_vsi_rebuild(vsi, true); 8488 if (err) { 8489 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 8490 ice_vsi_type_str(type), vsi->idx, err); 8491 goto cleanup; 8492 } 8493 8494 /* Re-map HW VSI number, using VSI handle that has been 8495 * previously validated in ice_replay_vsi() call above 8496 */ 8497 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 8498 8499 /* replay filters for the VSI */ 8500 err = ice_replay_vsi(&pf->hw, vsi->idx); 8501 if (err) { 8502 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 8503 ice_vsi_type_str(type), err, vsi->idx); 8504 rem_adv_fltr = false; 8505 goto cleanup; 8506 } 8507 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 8508 ice_vsi_type_str(type), vsi->idx); 8509 8510 /* store ADQ VSI at correct TC index in main VSI's 8511 * map of TC to VSI 8512 */ 8513 main_vsi->tc_map_vsi[tc_idx++] = vsi; 8514 } 8515 8516 /* ADQ VSI(s) has been rebuilt successfully, so setup 8517 * channel for main VSI's Tx and Rx rings 8518 */ 8519 list_for_each_entry(ch, &main_vsi->ch_list, list) { 8520 struct ice_vsi *ch_vsi; 8521 8522 ch_vsi = ch->ch_vsi; 8523 if (!ch_vsi) 8524 continue; 8525 8526 /* reconfig channel resources */ 8527 ice_cfg_chnl_all_res(main_vsi, ch); 8528 8529 /* replay BW rate limit if it is non-zero */ 8530 if (!ch->max_tx_rate && !ch->min_tx_rate) 8531 continue; 8532 8533 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 8534 ch->min_tx_rate); 8535 if (err) 8536 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8537 err, ch->max_tx_rate, ch->min_tx_rate, 8538 ch_vsi->vsi_num); 8539 else 8540 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8541 ch->max_tx_rate, ch->min_tx_rate, 8542 ch_vsi->vsi_num); 8543 } 8544 8545 /* reconfig RSS for main VSI */ 8546 if (main_vsi->ch_rss_size) 8547 ice_vsi_cfg_rss_lut_key(main_vsi); 8548 8549 return 0; 8550 8551 cleanup: 8552 ice_remove_q_channels(main_vsi, rem_adv_fltr); 8553 return err; 8554 } 8555 8556 /** 8557 * ice_create_q_channels - Add queue channel for the given TCs 8558 * @vsi: VSI to be configured 8559 * 8560 * Configures queue channel mapping to the given TCs 8561 */ 8562 static int ice_create_q_channels(struct ice_vsi *vsi) 8563 { 8564 struct ice_pf *pf = vsi->back; 8565 struct ice_channel *ch; 8566 int ret = 0, i; 8567 8568 ice_for_each_chnl_tc(i) { 8569 if (!(vsi->all_enatc & BIT(i))) 8570 continue; 8571 8572 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 8573 if (!ch) { 8574 ret = -ENOMEM; 8575 goto err_free; 8576 } 8577 INIT_LIST_HEAD(&ch->list); 8578 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 8579 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 8580 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 8581 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 8582 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 8583 8584 /* convert to Kbits/s */ 8585 if (ch->max_tx_rate) 8586 ch->max_tx_rate = div_u64(ch->max_tx_rate, 8587 ICE_BW_KBPS_DIVISOR); 8588 if (ch->min_tx_rate) 8589 ch->min_tx_rate = div_u64(ch->min_tx_rate, 8590 ICE_BW_KBPS_DIVISOR); 8591 8592 ret = ice_create_q_channel(vsi, ch); 8593 if (ret) { 8594 dev_err(ice_pf_to_dev(pf), 8595 "failed creating channel TC:%d\n", i); 8596 kfree(ch); 8597 goto err_free; 8598 } 8599 list_add_tail(&ch->list, &vsi->ch_list); 8600 vsi->tc_map_vsi[i] = ch->ch_vsi; 8601 dev_dbg(ice_pf_to_dev(pf), 8602 "successfully created channel: VSI %pK\n", ch->ch_vsi); 8603 } 8604 return 0; 8605 8606 err_free: 8607 ice_remove_q_channels(vsi, false); 8608 8609 return ret; 8610 } 8611 8612 /** 8613 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 8614 * @netdev: net device to configure 8615 * @type_data: TC offload data 8616 */ 8617 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 8618 { 8619 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 8620 struct ice_netdev_priv *np = netdev_priv(netdev); 8621 struct ice_vsi *vsi = np->vsi; 8622 struct ice_pf *pf = vsi->back; 8623 u16 mode, ena_tc_qdisc = 0; 8624 int cur_txq, cur_rxq; 8625 u8 hw = 0, num_tcf; 8626 struct device *dev; 8627 int ret, i; 8628 8629 dev = ice_pf_to_dev(pf); 8630 num_tcf = mqprio_qopt->qopt.num_tc; 8631 hw = mqprio_qopt->qopt.hw; 8632 mode = mqprio_qopt->mode; 8633 if (!hw) { 8634 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8635 vsi->ch_rss_size = 0; 8636 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8637 goto config_tcf; 8638 } 8639 8640 /* Generate queue region map for number of TCF requested */ 8641 for (i = 0; i < num_tcf; i++) 8642 ena_tc_qdisc |= BIT(i); 8643 8644 switch (mode) { 8645 case TC_MQPRIO_MODE_CHANNEL: 8646 8647 if (pf->hw.port_info->is_custom_tx_enabled) { 8648 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); 8649 return -EBUSY; 8650 } 8651 ice_tear_down_devlink_rate_tree(pf); 8652 8653 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 8654 if (ret) { 8655 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 8656 ret); 8657 return ret; 8658 } 8659 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8660 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8661 /* don't assume state of hw_tc_offload during driver load 8662 * and set the flag for TC flower filter if hw_tc_offload 8663 * already ON 8664 */ 8665 if (vsi->netdev->features & NETIF_F_HW_TC) 8666 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 8667 break; 8668 default: 8669 return -EINVAL; 8670 } 8671 8672 config_tcf: 8673 8674 /* Requesting same TCF configuration as already enabled */ 8675 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 8676 mode != TC_MQPRIO_MODE_CHANNEL) 8677 return 0; 8678 8679 /* Pause VSI queues */ 8680 ice_dis_vsi(vsi, true); 8681 8682 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 8683 ice_remove_q_channels(vsi, true); 8684 8685 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8686 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 8687 num_online_cpus()); 8688 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 8689 num_online_cpus()); 8690 } else { 8691 /* logic to rebuild VSI, same like ethtool -L */ 8692 u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 8693 8694 for (i = 0; i < num_tcf; i++) { 8695 if (!(ena_tc_qdisc & BIT(i))) 8696 continue; 8697 8698 offset = vsi->mqprio_qopt.qopt.offset[i]; 8699 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 8700 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 8701 } 8702 vsi->req_txq = offset + qcount_tx; 8703 vsi->req_rxq = offset + qcount_rx; 8704 8705 /* store away original rss_size info, so that it gets reused 8706 * form ice_vsi_rebuild during tc-qdisc delete stage - to 8707 * determine, what should be the rss_sizefor main VSI 8708 */ 8709 vsi->orig_rss_size = vsi->rss_size; 8710 } 8711 8712 /* save current values of Tx and Rx queues before calling VSI rebuild 8713 * for fallback option 8714 */ 8715 cur_txq = vsi->num_txq; 8716 cur_rxq = vsi->num_rxq; 8717 8718 /* proceed with rebuild main VSI using correct number of queues */ 8719 ret = ice_vsi_rebuild(vsi, false); 8720 if (ret) { 8721 /* fallback to current number of queues */ 8722 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 8723 vsi->req_txq = cur_txq; 8724 vsi->req_rxq = cur_rxq; 8725 clear_bit(ICE_RESET_FAILED, pf->state); 8726 if (ice_vsi_rebuild(vsi, false)) { 8727 dev_err(dev, "Rebuild of main VSI failed again\n"); 8728 return ret; 8729 } 8730 } 8731 8732 vsi->all_numtc = num_tcf; 8733 vsi->all_enatc = ena_tc_qdisc; 8734 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 8735 if (ret) { 8736 netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 8737 vsi->vsi_num); 8738 goto exit; 8739 } 8740 8741 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8742 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 8743 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 8744 8745 /* set TC0 rate limit if specified */ 8746 if (max_tx_rate || min_tx_rate) { 8747 /* convert to Kbits/s */ 8748 if (max_tx_rate) 8749 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 8750 if (min_tx_rate) 8751 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 8752 8753 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 8754 if (!ret) { 8755 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 8756 max_tx_rate, min_tx_rate, vsi->vsi_num); 8757 } else { 8758 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 8759 max_tx_rate, min_tx_rate, vsi->vsi_num); 8760 goto exit; 8761 } 8762 } 8763 ret = ice_create_q_channels(vsi); 8764 if (ret) { 8765 netdev_err(netdev, "failed configuring queue channels\n"); 8766 goto exit; 8767 } else { 8768 netdev_dbg(netdev, "successfully configured channels\n"); 8769 } 8770 } 8771 8772 if (vsi->ch_rss_size) 8773 ice_vsi_cfg_rss_lut_key(vsi); 8774 8775 exit: 8776 /* if error, reset the all_numtc and all_enatc */ 8777 if (ret) { 8778 vsi->all_numtc = 0; 8779 vsi->all_enatc = 0; 8780 } 8781 /* resume VSI */ 8782 ice_ena_vsi(vsi, true); 8783 8784 return ret; 8785 } 8786 8787 static LIST_HEAD(ice_block_cb_list); 8788 8789 static int 8790 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 8791 void *type_data) 8792 { 8793 struct ice_netdev_priv *np = netdev_priv(netdev); 8794 struct ice_pf *pf = np->vsi->back; 8795 int err; 8796 8797 switch (type) { 8798 case TC_SETUP_BLOCK: 8799 return flow_block_cb_setup_simple(type_data, 8800 &ice_block_cb_list, 8801 ice_setup_tc_block_cb, 8802 np, np, true); 8803 case TC_SETUP_QDISC_MQPRIO: 8804 /* setup traffic classifier for receive side */ 8805 mutex_lock(&pf->tc_mutex); 8806 err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 8807 mutex_unlock(&pf->tc_mutex); 8808 return err; 8809 default: 8810 return -EOPNOTSUPP; 8811 } 8812 return -EOPNOTSUPP; 8813 } 8814 8815 static struct ice_indr_block_priv * 8816 ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 8817 struct net_device *netdev) 8818 { 8819 struct ice_indr_block_priv *cb_priv; 8820 8821 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 8822 if (!cb_priv->netdev) 8823 return NULL; 8824 if (cb_priv->netdev == netdev) 8825 return cb_priv; 8826 } 8827 return NULL; 8828 } 8829 8830 static int 8831 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 8832 void *indr_priv) 8833 { 8834 struct ice_indr_block_priv *priv = indr_priv; 8835 struct ice_netdev_priv *np = priv->np; 8836 8837 switch (type) { 8838 case TC_SETUP_CLSFLOWER: 8839 return ice_setup_tc_cls_flower(np, priv->netdev, 8840 (struct flow_cls_offload *) 8841 type_data); 8842 default: 8843 return -EOPNOTSUPP; 8844 } 8845 } 8846 8847 static int 8848 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 8849 struct ice_netdev_priv *np, 8850 struct flow_block_offload *f, void *data, 8851 void (*cleanup)(struct flow_block_cb *block_cb)) 8852 { 8853 struct ice_indr_block_priv *indr_priv; 8854 struct flow_block_cb *block_cb; 8855 8856 if (!ice_is_tunnel_supported(netdev) && 8857 !(is_vlan_dev(netdev) && 8858 vlan_dev_real_dev(netdev) == np->vsi->netdev)) 8859 return -EOPNOTSUPP; 8860 8861 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 8862 return -EOPNOTSUPP; 8863 8864 switch (f->command) { 8865 case FLOW_BLOCK_BIND: 8866 indr_priv = ice_indr_block_priv_lookup(np, netdev); 8867 if (indr_priv) 8868 return -EEXIST; 8869 8870 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 8871 if (!indr_priv) 8872 return -ENOMEM; 8873 8874 indr_priv->netdev = netdev; 8875 indr_priv->np = np; 8876 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 8877 8878 block_cb = 8879 flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 8880 indr_priv, indr_priv, 8881 ice_rep_indr_tc_block_unbind, 8882 f, netdev, sch, data, np, 8883 cleanup); 8884 8885 if (IS_ERR(block_cb)) { 8886 list_del(&indr_priv->list); 8887 kfree(indr_priv); 8888 return PTR_ERR(block_cb); 8889 } 8890 flow_block_cb_add(block_cb, f); 8891 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 8892 break; 8893 case FLOW_BLOCK_UNBIND: 8894 indr_priv = ice_indr_block_priv_lookup(np, netdev); 8895 if (!indr_priv) 8896 return -ENOENT; 8897 8898 block_cb = flow_block_cb_lookup(f->block, 8899 ice_indr_setup_block_cb, 8900 indr_priv); 8901 if (!block_cb) 8902 return -ENOENT; 8903 8904 flow_indr_block_cb_remove(block_cb, f); 8905 8906 list_del(&block_cb->driver_list); 8907 break; 8908 default: 8909 return -EOPNOTSUPP; 8910 } 8911 return 0; 8912 } 8913 8914 static int 8915 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 8916 void *cb_priv, enum tc_setup_type type, void *type_data, 8917 void *data, 8918 void (*cleanup)(struct flow_block_cb *block_cb)) 8919 { 8920 switch (type) { 8921 case TC_SETUP_BLOCK: 8922 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 8923 data, cleanup); 8924 8925 default: 8926 return -EOPNOTSUPP; 8927 } 8928 } 8929 8930 /** 8931 * ice_open - Called when a network interface becomes active 8932 * @netdev: network interface device structure 8933 * 8934 * The open entry point is called when a network interface is made 8935 * active by the system (IFF_UP). At this point all resources needed 8936 * for transmit and receive operations are allocated, the interrupt 8937 * handler is registered with the OS, the netdev watchdog is enabled, 8938 * and the stack is notified that the interface is ready. 8939 * 8940 * Returns 0 on success, negative value on failure 8941 */ 8942 int ice_open(struct net_device *netdev) 8943 { 8944 struct ice_netdev_priv *np = netdev_priv(netdev); 8945 struct ice_pf *pf = np->vsi->back; 8946 8947 if (ice_is_reset_in_progress(pf->state)) { 8948 netdev_err(netdev, "can't open net device while reset is in progress"); 8949 return -EBUSY; 8950 } 8951 8952 return ice_open_internal(netdev); 8953 } 8954 8955 /** 8956 * ice_open_internal - Called when a network interface becomes active 8957 * @netdev: network interface device structure 8958 * 8959 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 8960 * handling routine 8961 * 8962 * Returns 0 on success, negative value on failure 8963 */ 8964 int ice_open_internal(struct net_device *netdev) 8965 { 8966 struct ice_netdev_priv *np = netdev_priv(netdev); 8967 struct ice_vsi *vsi = np->vsi; 8968 struct ice_pf *pf = vsi->back; 8969 struct ice_port_info *pi; 8970 int err; 8971 8972 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 8973 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 8974 return -EIO; 8975 } 8976 8977 netif_carrier_off(netdev); 8978 8979 pi = vsi->port_info; 8980 err = ice_update_link_info(pi); 8981 if (err) { 8982 netdev_err(netdev, "Failed to get link info, error %d\n", err); 8983 return err; 8984 } 8985 8986 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 8987 8988 /* Set PHY if there is media, otherwise, turn off PHY */ 8989 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 8990 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8991 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 8992 err = ice_init_phy_user_cfg(pi); 8993 if (err) { 8994 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 8995 err); 8996 return err; 8997 } 8998 } 8999 9000 err = ice_configure_phy(vsi); 9001 if (err) { 9002 netdev_err(netdev, "Failed to set physical link up, error %d\n", 9003 err); 9004 return err; 9005 } 9006 } else { 9007 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9008 ice_set_link(vsi, false); 9009 } 9010 9011 err = ice_vsi_open(vsi); 9012 if (err) 9013 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9014 vsi->vsi_num, vsi->vsw->sw_id); 9015 9016 /* Update existing tunnels information */ 9017 udp_tunnel_get_rx_info(netdev); 9018 9019 return err; 9020 } 9021 9022 /** 9023 * ice_stop - Disables a network interface 9024 * @netdev: network interface device structure 9025 * 9026 * The stop entry point is called when an interface is de-activated by the OS, 9027 * and the netdevice enters the DOWN state. The hardware is still under the 9028 * driver's control, but the netdev interface is disabled. 9029 * 9030 * Returns success only - not allowed to fail 9031 */ 9032 int ice_stop(struct net_device *netdev) 9033 { 9034 struct ice_netdev_priv *np = netdev_priv(netdev); 9035 struct ice_vsi *vsi = np->vsi; 9036 struct ice_pf *pf = vsi->back; 9037 9038 if (ice_is_reset_in_progress(pf->state)) { 9039 netdev_err(netdev, "can't stop net device while reset is in progress"); 9040 return -EBUSY; 9041 } 9042 9043 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 9044 int link_err = ice_force_phys_link_state(vsi, false); 9045 9046 if (link_err) { 9047 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 9048 vsi->vsi_num, link_err); 9049 return -EIO; 9050 } 9051 } 9052 9053 ice_vsi_close(vsi); 9054 9055 return 0; 9056 } 9057 9058 /** 9059 * ice_features_check - Validate encapsulated packet conforms to limits 9060 * @skb: skb buffer 9061 * @netdev: This port's netdev 9062 * @features: Offload features that the stack believes apply 9063 */ 9064 static netdev_features_t 9065 ice_features_check(struct sk_buff *skb, 9066 struct net_device __always_unused *netdev, 9067 netdev_features_t features) 9068 { 9069 bool gso = skb_is_gso(skb); 9070 size_t len; 9071 9072 /* No point in doing any of this if neither checksum nor GSO are 9073 * being requested for this frame. We can rule out both by just 9074 * checking for CHECKSUM_PARTIAL 9075 */ 9076 if (skb->ip_summed != CHECKSUM_PARTIAL) 9077 return features; 9078 9079 /* We cannot support GSO if the MSS is going to be less than 9080 * 64 bytes. If it is then we need to drop support for GSO. 9081 */ 9082 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 9083 features &= ~NETIF_F_GSO_MASK; 9084 9085 len = skb_network_offset(skb); 9086 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 9087 goto out_rm_features; 9088 9089 len = skb_network_header_len(skb); 9090 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9091 goto out_rm_features; 9092 9093 if (skb->encapsulation) { 9094 /* this must work for VXLAN frames AND IPIP/SIT frames, and in 9095 * the case of IPIP frames, the transport header pointer is 9096 * after the inner header! So check to make sure that this 9097 * is a GRE or UDP_TUNNEL frame before doing that math. 9098 */ 9099 if (gso && (skb_shinfo(skb)->gso_type & 9100 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 9101 len = skb_inner_network_header(skb) - 9102 skb_transport_header(skb); 9103 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 9104 goto out_rm_features; 9105 } 9106 9107 len = skb_inner_network_header_len(skb); 9108 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9109 goto out_rm_features; 9110 } 9111 9112 return features; 9113 out_rm_features: 9114 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9115 } 9116 9117 static const struct net_device_ops ice_netdev_safe_mode_ops = { 9118 .ndo_open = ice_open, 9119 .ndo_stop = ice_stop, 9120 .ndo_start_xmit = ice_start_xmit, 9121 .ndo_set_mac_address = ice_set_mac_address, 9122 .ndo_validate_addr = eth_validate_addr, 9123 .ndo_change_mtu = ice_change_mtu, 9124 .ndo_get_stats64 = ice_get_stats64, 9125 .ndo_tx_timeout = ice_tx_timeout, 9126 .ndo_bpf = ice_xdp_safe_mode, 9127 }; 9128 9129 static const struct net_device_ops ice_netdev_ops = { 9130 .ndo_open = ice_open, 9131 .ndo_stop = ice_stop, 9132 .ndo_start_xmit = ice_start_xmit, 9133 .ndo_select_queue = ice_select_queue, 9134 .ndo_features_check = ice_features_check, 9135 .ndo_fix_features = ice_fix_features, 9136 .ndo_set_rx_mode = ice_set_rx_mode, 9137 .ndo_set_mac_address = ice_set_mac_address, 9138 .ndo_validate_addr = eth_validate_addr, 9139 .ndo_change_mtu = ice_change_mtu, 9140 .ndo_get_stats64 = ice_get_stats64, 9141 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 9142 .ndo_eth_ioctl = ice_eth_ioctl, 9143 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 9144 .ndo_set_vf_mac = ice_set_vf_mac, 9145 .ndo_get_vf_config = ice_get_vf_cfg, 9146 .ndo_set_vf_trust = ice_set_vf_trust, 9147 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 9148 .ndo_set_vf_link_state = ice_set_vf_link_state, 9149 .ndo_get_vf_stats = ice_get_vf_stats, 9150 .ndo_set_vf_rate = ice_set_vf_bw, 9151 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 9152 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 9153 .ndo_setup_tc = ice_setup_tc, 9154 .ndo_set_features = ice_set_features, 9155 .ndo_bridge_getlink = ice_bridge_getlink, 9156 .ndo_bridge_setlink = ice_bridge_setlink, 9157 .ndo_fdb_add = ice_fdb_add, 9158 .ndo_fdb_del = ice_fdb_del, 9159 #ifdef CONFIG_RFS_ACCEL 9160 .ndo_rx_flow_steer = ice_rx_flow_steer, 9161 #endif 9162 .ndo_tx_timeout = ice_tx_timeout, 9163 .ndo_bpf = ice_xdp, 9164 .ndo_xdp_xmit = ice_xdp_xmit, 9165 .ndo_xsk_wakeup = ice_xsk_wakeup, 9166 }; 9167