1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include "ice.h" 10 #include "ice_base.h" 11 #include "ice_lib.h" 12 #include "ice_fltr.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_dcb_nl.h" 15 #include "ice_devlink.h" 16 17 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 18 static const char ice_driver_string[] = DRV_SUMMARY; 19 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 20 21 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 22 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 23 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 24 25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 26 MODULE_DESCRIPTION(DRV_SUMMARY); 27 MODULE_LICENSE("GPL v2"); 28 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 29 30 static int debug = -1; 31 module_param(debug, int, 0644); 32 #ifndef CONFIG_DYNAMIC_DEBUG 33 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 34 #else 35 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 36 #endif /* !CONFIG_DYNAMIC_DEBUG */ 37 38 static DEFINE_IDA(ice_aux_ida); 39 40 static struct workqueue_struct *ice_wq; 41 static const struct net_device_ops ice_netdev_safe_mode_ops; 42 static const struct net_device_ops ice_netdev_ops; 43 static int ice_vsi_open(struct ice_vsi *vsi); 44 45 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 46 47 static void ice_vsi_release_all(struct ice_pf *pf); 48 49 bool netif_is_ice(struct net_device *dev) 50 { 51 return dev && (dev->netdev_ops == &ice_netdev_ops); 52 } 53 54 /** 55 * ice_get_tx_pending - returns number of Tx descriptors not processed 56 * @ring: the ring of descriptors 57 */ 58 static u16 ice_get_tx_pending(struct ice_ring *ring) 59 { 60 u16 head, tail; 61 62 head = ring->next_to_clean; 63 tail = ring->next_to_use; 64 65 if (head != tail) 66 return (head < tail) ? 67 tail - head : (tail + ring->count - head); 68 return 0; 69 } 70 71 /** 72 * ice_check_for_hang_subtask - check for and recover hung queues 73 * @pf: pointer to PF struct 74 */ 75 static void ice_check_for_hang_subtask(struct ice_pf *pf) 76 { 77 struct ice_vsi *vsi = NULL; 78 struct ice_hw *hw; 79 unsigned int i; 80 int packets; 81 u32 v; 82 83 ice_for_each_vsi(pf, v) 84 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 85 vsi = pf->vsi[v]; 86 break; 87 } 88 89 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 90 return; 91 92 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 93 return; 94 95 hw = &vsi->back->hw; 96 97 for (i = 0; i < vsi->num_txq; i++) { 98 struct ice_ring *tx_ring = vsi->tx_rings[i]; 99 100 if (tx_ring && tx_ring->desc) { 101 /* If packet counter has not changed the queue is 102 * likely stalled, so force an interrupt for this 103 * queue. 104 * 105 * prev_pkt would be negative if there was no 106 * pending work. 107 */ 108 packets = tx_ring->stats.pkts & INT_MAX; 109 if (tx_ring->tx_stats.prev_pkt == packets) { 110 /* Trigger sw interrupt to revive the queue */ 111 ice_trigger_sw_intr(hw, tx_ring->q_vector); 112 continue; 113 } 114 115 /* Memory barrier between read of packet count and call 116 * to ice_get_tx_pending() 117 */ 118 smp_rmb(); 119 tx_ring->tx_stats.prev_pkt = 120 ice_get_tx_pending(tx_ring) ? packets : -1; 121 } 122 } 123 } 124 125 /** 126 * ice_init_mac_fltr - Set initial MAC filters 127 * @pf: board private structure 128 * 129 * Set initial set of MAC filters for PF VSI; configure filters for permanent 130 * address and broadcast address. If an error is encountered, netdevice will be 131 * unregistered. 132 */ 133 static int ice_init_mac_fltr(struct ice_pf *pf) 134 { 135 enum ice_status status; 136 struct ice_vsi *vsi; 137 u8 *perm_addr; 138 139 vsi = ice_get_main_vsi(pf); 140 if (!vsi) 141 return -EINVAL; 142 143 perm_addr = vsi->port_info->mac.perm_addr; 144 status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 145 if (status) 146 return -EIO; 147 148 return 0; 149 } 150 151 /** 152 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 153 * @netdev: the net device on which the sync is happening 154 * @addr: MAC address to sync 155 * 156 * This is a callback function which is called by the in kernel device sync 157 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 158 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 159 * MAC filters from the hardware. 160 */ 161 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 162 { 163 struct ice_netdev_priv *np = netdev_priv(netdev); 164 struct ice_vsi *vsi = np->vsi; 165 166 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 167 ICE_FWD_TO_VSI)) 168 return -EINVAL; 169 170 return 0; 171 } 172 173 /** 174 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 175 * @netdev: the net device on which the unsync is happening 176 * @addr: MAC address to unsync 177 * 178 * This is a callback function which is called by the in kernel device unsync 179 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 180 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 181 * delete the MAC filters from the hardware. 182 */ 183 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 184 { 185 struct ice_netdev_priv *np = netdev_priv(netdev); 186 struct ice_vsi *vsi = np->vsi; 187 188 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 189 ICE_FWD_TO_VSI)) 190 return -EINVAL; 191 192 return 0; 193 } 194 195 /** 196 * ice_vsi_fltr_changed - check if filter state changed 197 * @vsi: VSI to be checked 198 * 199 * returns true if filter state has changed, false otherwise. 200 */ 201 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 202 { 203 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 204 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || 205 test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 206 } 207 208 /** 209 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF 210 * @vsi: the VSI being configured 211 * @promisc_m: mask of promiscuous config bits 212 * @set_promisc: enable or disable promisc flag request 213 * 214 */ 215 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) 216 { 217 struct ice_hw *hw = &vsi->back->hw; 218 enum ice_status status = 0; 219 220 if (vsi->type != ICE_VSI_PF) 221 return 0; 222 223 if (vsi->num_vlan > 1) { 224 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, 225 set_promisc); 226 } else { 227 if (set_promisc) 228 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, 229 0); 230 else 231 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, 232 0); 233 } 234 235 if (status) 236 return -EIO; 237 238 return 0; 239 } 240 241 /** 242 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 243 * @vsi: ptr to the VSI 244 * 245 * Push any outstanding VSI filter changes through the AdminQ. 246 */ 247 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 248 { 249 struct device *dev = ice_pf_to_dev(vsi->back); 250 struct net_device *netdev = vsi->netdev; 251 bool promisc_forced_on = false; 252 struct ice_pf *pf = vsi->back; 253 struct ice_hw *hw = &pf->hw; 254 enum ice_status status = 0; 255 u32 changed_flags = 0; 256 u8 promisc_m; 257 int err = 0; 258 259 if (!vsi->netdev) 260 return -EINVAL; 261 262 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 263 usleep_range(1000, 2000); 264 265 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 266 vsi->current_netdev_flags = vsi->netdev->flags; 267 268 INIT_LIST_HEAD(&vsi->tmp_sync_list); 269 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 270 271 if (ice_vsi_fltr_changed(vsi)) { 272 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 273 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 274 clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 275 276 /* grab the netdev's addr_list_lock */ 277 netif_addr_lock_bh(netdev); 278 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 279 ice_add_mac_to_unsync_list); 280 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 281 ice_add_mac_to_unsync_list); 282 /* our temp lists are populated. release lock */ 283 netif_addr_unlock_bh(netdev); 284 } 285 286 /* Remove MAC addresses in the unsync list */ 287 status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 288 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 289 if (status) { 290 netdev_err(netdev, "Failed to delete MAC filters\n"); 291 /* if we failed because of alloc failures, just bail */ 292 if (status == ICE_ERR_NO_MEMORY) { 293 err = -ENOMEM; 294 goto out; 295 } 296 } 297 298 /* Add MAC addresses in the sync list */ 299 status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 300 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 301 /* If filter is added successfully or already exists, do not go into 302 * 'if' condition and report it as error. Instead continue processing 303 * rest of the function. 304 */ 305 if (status && status != ICE_ERR_ALREADY_EXISTS) { 306 netdev_err(netdev, "Failed to add MAC filters\n"); 307 /* If there is no more space for new umac filters, VSI 308 * should go into promiscuous mode. There should be some 309 * space reserved for promiscuous filters. 310 */ 311 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 312 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 313 vsi->state)) { 314 promisc_forced_on = true; 315 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 316 vsi->vsi_num); 317 } else { 318 err = -EIO; 319 goto out; 320 } 321 } 322 /* check for changes in promiscuous modes */ 323 if (changed_flags & IFF_ALLMULTI) { 324 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 325 if (vsi->num_vlan > 1) 326 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 327 else 328 promisc_m = ICE_MCAST_PROMISC_BITS; 329 330 err = ice_cfg_promisc(vsi, promisc_m, true); 331 if (err) { 332 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", 333 vsi->vsi_num); 334 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 335 goto out_promisc; 336 } 337 } else { 338 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 339 if (vsi->num_vlan > 1) 340 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 341 else 342 promisc_m = ICE_MCAST_PROMISC_BITS; 343 344 err = ice_cfg_promisc(vsi, promisc_m, false); 345 if (err) { 346 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", 347 vsi->vsi_num); 348 vsi->current_netdev_flags |= IFF_ALLMULTI; 349 goto out_promisc; 350 } 351 } 352 } 353 354 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 355 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 356 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 357 if (vsi->current_netdev_flags & IFF_PROMISC) { 358 /* Apply Rx filter rule to get traffic from wire */ 359 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { 360 err = ice_set_dflt_vsi(pf->first_sw, vsi); 361 if (err && err != -EEXIST) { 362 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 363 err, vsi->vsi_num); 364 vsi->current_netdev_flags &= 365 ~IFF_PROMISC; 366 goto out_promisc; 367 } 368 ice_cfg_vlan_pruning(vsi, false, false); 369 } 370 } else { 371 /* Clear Rx filter to remove traffic from wire */ 372 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { 373 err = ice_clear_dflt_vsi(pf->first_sw); 374 if (err) { 375 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 376 err, vsi->vsi_num); 377 vsi->current_netdev_flags |= 378 IFF_PROMISC; 379 goto out_promisc; 380 } 381 if (vsi->num_vlan > 1) 382 ice_cfg_vlan_pruning(vsi, true, false); 383 } 384 } 385 } 386 goto exit; 387 388 out_promisc: 389 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 390 goto exit; 391 out: 392 /* if something went wrong then set the changed flag so we try again */ 393 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 394 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 395 exit: 396 clear_bit(ICE_CFG_BUSY, vsi->state); 397 return err; 398 } 399 400 /** 401 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 402 * @pf: board private structure 403 */ 404 static void ice_sync_fltr_subtask(struct ice_pf *pf) 405 { 406 int v; 407 408 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 409 return; 410 411 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 412 413 ice_for_each_vsi(pf, v) 414 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 415 ice_vsi_sync_fltr(pf->vsi[v])) { 416 /* come back and try again later */ 417 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 418 break; 419 } 420 } 421 422 /** 423 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 424 * @pf: the PF 425 * @locked: is the rtnl_lock already held 426 */ 427 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 428 { 429 int node; 430 int v; 431 432 ice_for_each_vsi(pf, v) 433 if (pf->vsi[v]) 434 ice_dis_vsi(pf->vsi[v], locked); 435 436 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 437 pf->pf_agg_node[node].num_vsis = 0; 438 439 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 440 pf->vf_agg_node[node].num_vsis = 0; 441 } 442 443 /** 444 * ice_prepare_for_reset - prep for the core to reset 445 * @pf: board private structure 446 * 447 * Inform or close all dependent features in prep for reset. 448 */ 449 static void 450 ice_prepare_for_reset(struct ice_pf *pf) 451 { 452 struct ice_hw *hw = &pf->hw; 453 unsigned int i; 454 455 /* already prepared for reset */ 456 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 457 return; 458 459 ice_unplug_aux_dev(pf); 460 461 /* Notify VFs of impending reset */ 462 if (ice_check_sq_alive(hw, &hw->mailboxq)) 463 ice_vc_notify_reset(pf); 464 465 /* Disable VFs until reset is completed */ 466 ice_for_each_vf(pf, i) 467 ice_set_vf_state_qs_dis(&pf->vf[i]); 468 469 /* clear SW filtering DB */ 470 ice_clear_hw_tbls(hw); 471 /* disable the VSIs and their queues that are not already DOWN */ 472 ice_pf_dis_all_vsi(pf, false); 473 474 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 475 ice_ptp_release(pf); 476 477 if (hw->port_info) 478 ice_sched_clear_port(hw->port_info); 479 480 ice_shutdown_all_ctrlq(hw); 481 482 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 483 } 484 485 /** 486 * ice_do_reset - Initiate one of many types of resets 487 * @pf: board private structure 488 * @reset_type: reset type requested 489 * before this function was called. 490 */ 491 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 492 { 493 struct device *dev = ice_pf_to_dev(pf); 494 struct ice_hw *hw = &pf->hw; 495 496 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 497 498 ice_prepare_for_reset(pf); 499 500 /* trigger the reset */ 501 if (ice_reset(hw, reset_type)) { 502 dev_err(dev, "reset %d failed\n", reset_type); 503 set_bit(ICE_RESET_FAILED, pf->state); 504 clear_bit(ICE_RESET_OICR_RECV, pf->state); 505 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 506 clear_bit(ICE_PFR_REQ, pf->state); 507 clear_bit(ICE_CORER_REQ, pf->state); 508 clear_bit(ICE_GLOBR_REQ, pf->state); 509 wake_up(&pf->reset_wait_queue); 510 return; 511 } 512 513 /* PFR is a bit of a special case because it doesn't result in an OICR 514 * interrupt. So for PFR, rebuild after the reset and clear the reset- 515 * associated state bits. 516 */ 517 if (reset_type == ICE_RESET_PFR) { 518 pf->pfr_count++; 519 ice_rebuild(pf, reset_type); 520 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 521 clear_bit(ICE_PFR_REQ, pf->state); 522 wake_up(&pf->reset_wait_queue); 523 ice_reset_all_vfs(pf, true); 524 } 525 } 526 527 /** 528 * ice_reset_subtask - Set up for resetting the device and driver 529 * @pf: board private structure 530 */ 531 static void ice_reset_subtask(struct ice_pf *pf) 532 { 533 enum ice_reset_req reset_type = ICE_RESET_INVAL; 534 535 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 536 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 537 * of reset is pending and sets bits in pf->state indicating the reset 538 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 539 * prepare for pending reset if not already (for PF software-initiated 540 * global resets the software should already be prepared for it as 541 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 542 * by firmware or software on other PFs, that bit is not set so prepare 543 * for the reset now), poll for reset done, rebuild and return. 544 */ 545 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 546 /* Perform the largest reset requested */ 547 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 548 reset_type = ICE_RESET_CORER; 549 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 550 reset_type = ICE_RESET_GLOBR; 551 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 552 reset_type = ICE_RESET_EMPR; 553 /* return if no valid reset type requested */ 554 if (reset_type == ICE_RESET_INVAL) 555 return; 556 ice_prepare_for_reset(pf); 557 558 /* make sure we are ready to rebuild */ 559 if (ice_check_reset(&pf->hw)) { 560 set_bit(ICE_RESET_FAILED, pf->state); 561 } else { 562 /* done with reset. start rebuild */ 563 pf->hw.reset_ongoing = false; 564 ice_rebuild(pf, reset_type); 565 /* clear bit to resume normal operations, but 566 * ICE_NEEDS_RESTART bit is set in case rebuild failed 567 */ 568 clear_bit(ICE_RESET_OICR_RECV, pf->state); 569 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 570 clear_bit(ICE_PFR_REQ, pf->state); 571 clear_bit(ICE_CORER_REQ, pf->state); 572 clear_bit(ICE_GLOBR_REQ, pf->state); 573 wake_up(&pf->reset_wait_queue); 574 ice_reset_all_vfs(pf, true); 575 } 576 577 return; 578 } 579 580 /* No pending resets to finish processing. Check for new resets */ 581 if (test_bit(ICE_PFR_REQ, pf->state)) 582 reset_type = ICE_RESET_PFR; 583 if (test_bit(ICE_CORER_REQ, pf->state)) 584 reset_type = ICE_RESET_CORER; 585 if (test_bit(ICE_GLOBR_REQ, pf->state)) 586 reset_type = ICE_RESET_GLOBR; 587 /* If no valid reset type requested just return */ 588 if (reset_type == ICE_RESET_INVAL) 589 return; 590 591 /* reset if not already down or busy */ 592 if (!test_bit(ICE_DOWN, pf->state) && 593 !test_bit(ICE_CFG_BUSY, pf->state)) { 594 ice_do_reset(pf, reset_type); 595 } 596 } 597 598 /** 599 * ice_print_topo_conflict - print topology conflict message 600 * @vsi: the VSI whose topology status is being checked 601 */ 602 static void ice_print_topo_conflict(struct ice_vsi *vsi) 603 { 604 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 605 case ICE_AQ_LINK_TOPO_CONFLICT: 606 case ICE_AQ_LINK_MEDIA_CONFLICT: 607 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 608 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 609 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 610 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 611 break; 612 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 613 netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 614 break; 615 default: 616 break; 617 } 618 } 619 620 /** 621 * ice_print_link_msg - print link up or down message 622 * @vsi: the VSI whose link status is being queried 623 * @isup: boolean for if the link is now up or down 624 */ 625 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 626 { 627 struct ice_aqc_get_phy_caps_data *caps; 628 const char *an_advertised; 629 enum ice_status status; 630 const char *fec_req; 631 const char *speed; 632 const char *fec; 633 const char *fc; 634 const char *an; 635 636 if (!vsi) 637 return; 638 639 if (vsi->current_isup == isup) 640 return; 641 642 vsi->current_isup = isup; 643 644 if (!isup) { 645 netdev_info(vsi->netdev, "NIC Link is Down\n"); 646 return; 647 } 648 649 switch (vsi->port_info->phy.link_info.link_speed) { 650 case ICE_AQ_LINK_SPEED_100GB: 651 speed = "100 G"; 652 break; 653 case ICE_AQ_LINK_SPEED_50GB: 654 speed = "50 G"; 655 break; 656 case ICE_AQ_LINK_SPEED_40GB: 657 speed = "40 G"; 658 break; 659 case ICE_AQ_LINK_SPEED_25GB: 660 speed = "25 G"; 661 break; 662 case ICE_AQ_LINK_SPEED_20GB: 663 speed = "20 G"; 664 break; 665 case ICE_AQ_LINK_SPEED_10GB: 666 speed = "10 G"; 667 break; 668 case ICE_AQ_LINK_SPEED_5GB: 669 speed = "5 G"; 670 break; 671 case ICE_AQ_LINK_SPEED_2500MB: 672 speed = "2.5 G"; 673 break; 674 case ICE_AQ_LINK_SPEED_1000MB: 675 speed = "1 G"; 676 break; 677 case ICE_AQ_LINK_SPEED_100MB: 678 speed = "100 M"; 679 break; 680 default: 681 speed = "Unknown "; 682 break; 683 } 684 685 switch (vsi->port_info->fc.current_mode) { 686 case ICE_FC_FULL: 687 fc = "Rx/Tx"; 688 break; 689 case ICE_FC_TX_PAUSE: 690 fc = "Tx"; 691 break; 692 case ICE_FC_RX_PAUSE: 693 fc = "Rx"; 694 break; 695 case ICE_FC_NONE: 696 fc = "None"; 697 break; 698 default: 699 fc = "Unknown"; 700 break; 701 } 702 703 /* Get FEC mode based on negotiated link info */ 704 switch (vsi->port_info->phy.link_info.fec_info) { 705 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 706 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 707 fec = "RS-FEC"; 708 break; 709 case ICE_AQ_LINK_25G_KR_FEC_EN: 710 fec = "FC-FEC/BASE-R"; 711 break; 712 default: 713 fec = "NONE"; 714 break; 715 } 716 717 /* check if autoneg completed, might be false due to not supported */ 718 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 719 an = "True"; 720 else 721 an = "False"; 722 723 /* Get FEC mode requested based on PHY caps last SW configuration */ 724 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 725 if (!caps) { 726 fec_req = "Unknown"; 727 an_advertised = "Unknown"; 728 goto done; 729 } 730 731 status = ice_aq_get_phy_caps(vsi->port_info, false, 732 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 733 if (status) 734 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 735 736 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 737 738 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 739 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 740 fec_req = "RS-FEC"; 741 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 742 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 743 fec_req = "FC-FEC/BASE-R"; 744 else 745 fec_req = "NONE"; 746 747 kfree(caps); 748 749 done: 750 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 751 speed, fec_req, fec, an_advertised, an, fc); 752 ice_print_topo_conflict(vsi); 753 } 754 755 /** 756 * ice_vsi_link_event - update the VSI's netdev 757 * @vsi: the VSI on which the link event occurred 758 * @link_up: whether or not the VSI needs to be set up or down 759 */ 760 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 761 { 762 if (!vsi) 763 return; 764 765 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 766 return; 767 768 if (vsi->type == ICE_VSI_PF) { 769 if (link_up == netif_carrier_ok(vsi->netdev)) 770 return; 771 772 if (link_up) { 773 netif_carrier_on(vsi->netdev); 774 netif_tx_wake_all_queues(vsi->netdev); 775 } else { 776 netif_carrier_off(vsi->netdev); 777 netif_tx_stop_all_queues(vsi->netdev); 778 } 779 } 780 } 781 782 /** 783 * ice_set_dflt_mib - send a default config MIB to the FW 784 * @pf: private PF struct 785 * 786 * This function sends a default configuration MIB to the FW. 787 * 788 * If this function errors out at any point, the driver is still able to 789 * function. The main impact is that LFC may not operate as expected. 790 * Therefore an error state in this function should be treated with a DBG 791 * message and continue on with driver rebuild/reenable. 792 */ 793 static void ice_set_dflt_mib(struct ice_pf *pf) 794 { 795 struct device *dev = ice_pf_to_dev(pf); 796 u8 mib_type, *buf, *lldpmib = NULL; 797 u16 len, typelen, offset = 0; 798 struct ice_lldp_org_tlv *tlv; 799 struct ice_hw *hw = &pf->hw; 800 u32 ouisubtype; 801 802 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 803 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 804 if (!lldpmib) { 805 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 806 __func__); 807 return; 808 } 809 810 /* Add ETS CFG TLV */ 811 tlv = (struct ice_lldp_org_tlv *)lldpmib; 812 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 813 ICE_IEEE_ETS_TLV_LEN); 814 tlv->typelen = htons(typelen); 815 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 816 ICE_IEEE_SUBTYPE_ETS_CFG); 817 tlv->ouisubtype = htonl(ouisubtype); 818 819 buf = tlv->tlvinfo; 820 buf[0] = 0; 821 822 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 823 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 824 * Octets 13 - 20 are TSA values - leave as zeros 825 */ 826 buf[5] = 0x64; 827 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 828 offset += len + 2; 829 tlv = (struct ice_lldp_org_tlv *) 830 ((char *)tlv + sizeof(tlv->typelen) + len); 831 832 /* Add ETS REC TLV */ 833 buf = tlv->tlvinfo; 834 tlv->typelen = htons(typelen); 835 836 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 837 ICE_IEEE_SUBTYPE_ETS_REC); 838 tlv->ouisubtype = htonl(ouisubtype); 839 840 /* First octet of buf is reserved 841 * Octets 1 - 4 map UP to TC - all UPs map to zero 842 * Octets 5 - 12 are BW values - set TC 0 to 100%. 843 * Octets 13 - 20 are TSA value - leave as zeros 844 */ 845 buf[5] = 0x64; 846 offset += len + 2; 847 tlv = (struct ice_lldp_org_tlv *) 848 ((char *)tlv + sizeof(tlv->typelen) + len); 849 850 /* Add PFC CFG TLV */ 851 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 852 ICE_IEEE_PFC_TLV_LEN); 853 tlv->typelen = htons(typelen); 854 855 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 856 ICE_IEEE_SUBTYPE_PFC_CFG); 857 tlv->ouisubtype = htonl(ouisubtype); 858 859 /* Octet 1 left as all zeros - PFC disabled */ 860 buf[0] = 0x08; 861 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 862 offset += len + 2; 863 864 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 865 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 866 867 kfree(lldpmib); 868 } 869 870 /** 871 * ice_check_module_power 872 * @pf: pointer to PF struct 873 * @link_cfg_err: bitmap from the link info structure 874 * 875 * check module power level returned by a previous call to aq_get_link_info 876 * and print error messages if module power level is not supported 877 */ 878 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 879 { 880 /* if module power level is supported, clear the flag */ 881 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 882 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 883 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 884 return; 885 } 886 887 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 888 * above block didn't clear this bit, there's nothing to do 889 */ 890 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 891 return; 892 893 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 894 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 895 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 896 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 897 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 898 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 899 } 900 } 901 902 /** 903 * ice_link_event - process the link event 904 * @pf: PF that the link event is associated with 905 * @pi: port_info for the port that the link event is associated with 906 * @link_up: true if the physical link is up and false if it is down 907 * @link_speed: current link speed received from the link event 908 * 909 * Returns 0 on success and negative on failure 910 */ 911 static int 912 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 913 u16 link_speed) 914 { 915 struct device *dev = ice_pf_to_dev(pf); 916 struct ice_phy_info *phy_info; 917 enum ice_status status; 918 struct ice_vsi *vsi; 919 u16 old_link_speed; 920 bool old_link; 921 922 phy_info = &pi->phy; 923 phy_info->link_info_old = phy_info->link_info; 924 925 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 926 old_link_speed = phy_info->link_info_old.link_speed; 927 928 /* update the link info structures and re-enable link events, 929 * don't bail on failure due to other book keeping needed 930 */ 931 status = ice_update_link_info(pi); 932 if (status) 933 dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", 934 pi->lport, ice_stat_str(status), 935 ice_aq_str(pi->hw->adminq.sq_last_status)); 936 937 ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); 938 939 /* Check if the link state is up after updating link info, and treat 940 * this event as an UP event since the link is actually UP now. 941 */ 942 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 943 link_up = true; 944 945 vsi = ice_get_main_vsi(pf); 946 if (!vsi || !vsi->port_info) 947 return -EINVAL; 948 949 /* turn off PHY if media was removed */ 950 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 951 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 952 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 953 ice_set_link(vsi, false); 954 } 955 956 /* if the old link up/down and speed is the same as the new */ 957 if (link_up == old_link && link_speed == old_link_speed) 958 return 0; 959 960 if (ice_is_dcb_active(pf)) { 961 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 962 ice_dcb_rebuild(pf); 963 } else { 964 if (link_up) 965 ice_set_dflt_mib(pf); 966 } 967 ice_vsi_link_event(vsi, link_up); 968 ice_print_link_msg(vsi, link_up); 969 970 ice_vc_notify_link_state(pf); 971 972 return 0; 973 } 974 975 /** 976 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 977 * @pf: board private structure 978 */ 979 static void ice_watchdog_subtask(struct ice_pf *pf) 980 { 981 int i; 982 983 /* if interface is down do nothing */ 984 if (test_bit(ICE_DOWN, pf->state) || 985 test_bit(ICE_CFG_BUSY, pf->state)) 986 return; 987 988 /* make sure we don't do these things too often */ 989 if (time_before(jiffies, 990 pf->serv_tmr_prev + pf->serv_tmr_period)) 991 return; 992 993 pf->serv_tmr_prev = jiffies; 994 995 /* Update the stats for active netdevs so the network stack 996 * can look at updated numbers whenever it cares to 997 */ 998 ice_update_pf_stats(pf); 999 ice_for_each_vsi(pf, i) 1000 if (pf->vsi[i] && pf->vsi[i]->netdev) 1001 ice_update_vsi_stats(pf->vsi[i]); 1002 } 1003 1004 /** 1005 * ice_init_link_events - enable/initialize link events 1006 * @pi: pointer to the port_info instance 1007 * 1008 * Returns -EIO on failure, 0 on success 1009 */ 1010 static int ice_init_link_events(struct ice_port_info *pi) 1011 { 1012 u16 mask; 1013 1014 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1015 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); 1016 1017 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1018 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1019 pi->lport); 1020 return -EIO; 1021 } 1022 1023 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1024 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1025 pi->lport); 1026 return -EIO; 1027 } 1028 1029 return 0; 1030 } 1031 1032 /** 1033 * ice_handle_link_event - handle link event via ARQ 1034 * @pf: PF that the link event is associated with 1035 * @event: event structure containing link status info 1036 */ 1037 static int 1038 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1039 { 1040 struct ice_aqc_get_link_status_data *link_data; 1041 struct ice_port_info *port_info; 1042 int status; 1043 1044 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1045 port_info = pf->hw.port_info; 1046 if (!port_info) 1047 return -EINVAL; 1048 1049 status = ice_link_event(pf, port_info, 1050 !!(link_data->link_info & ICE_AQ_LINK_UP), 1051 le16_to_cpu(link_data->link_speed)); 1052 if (status) 1053 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1054 status); 1055 1056 return status; 1057 } 1058 1059 enum ice_aq_task_state { 1060 ICE_AQ_TASK_WAITING = 0, 1061 ICE_AQ_TASK_COMPLETE, 1062 ICE_AQ_TASK_CANCELED, 1063 }; 1064 1065 struct ice_aq_task { 1066 struct hlist_node entry; 1067 1068 u16 opcode; 1069 struct ice_rq_event_info *event; 1070 enum ice_aq_task_state state; 1071 }; 1072 1073 /** 1074 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1075 * @pf: pointer to the PF private structure 1076 * @opcode: the opcode to wait for 1077 * @timeout: how long to wait, in jiffies 1078 * @event: storage for the event info 1079 * 1080 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1081 * current thread will be put to sleep until the specified event occurs or 1082 * until the given timeout is reached. 1083 * 1084 * To obtain only the descriptor contents, pass an event without an allocated 1085 * msg_buf. If the complete data buffer is desired, allocate the 1086 * event->msg_buf with enough space ahead of time. 1087 * 1088 * Returns: zero on success, or a negative error code on failure. 1089 */ 1090 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1091 struct ice_rq_event_info *event) 1092 { 1093 struct device *dev = ice_pf_to_dev(pf); 1094 struct ice_aq_task *task; 1095 unsigned long start; 1096 long ret; 1097 int err; 1098 1099 task = kzalloc(sizeof(*task), GFP_KERNEL); 1100 if (!task) 1101 return -ENOMEM; 1102 1103 INIT_HLIST_NODE(&task->entry); 1104 task->opcode = opcode; 1105 task->event = event; 1106 task->state = ICE_AQ_TASK_WAITING; 1107 1108 spin_lock_bh(&pf->aq_wait_lock); 1109 hlist_add_head(&task->entry, &pf->aq_wait_list); 1110 spin_unlock_bh(&pf->aq_wait_lock); 1111 1112 start = jiffies; 1113 1114 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1115 timeout); 1116 switch (task->state) { 1117 case ICE_AQ_TASK_WAITING: 1118 err = ret < 0 ? ret : -ETIMEDOUT; 1119 break; 1120 case ICE_AQ_TASK_CANCELED: 1121 err = ret < 0 ? ret : -ECANCELED; 1122 break; 1123 case ICE_AQ_TASK_COMPLETE: 1124 err = ret < 0 ? ret : 0; 1125 break; 1126 default: 1127 WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1128 err = -EINVAL; 1129 break; 1130 } 1131 1132 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1133 jiffies_to_msecs(jiffies - start), 1134 jiffies_to_msecs(timeout), 1135 opcode); 1136 1137 spin_lock_bh(&pf->aq_wait_lock); 1138 hlist_del(&task->entry); 1139 spin_unlock_bh(&pf->aq_wait_lock); 1140 kfree(task); 1141 1142 return err; 1143 } 1144 1145 /** 1146 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1147 * @pf: pointer to the PF private structure 1148 * @opcode: the opcode of the event 1149 * @event: the event to check 1150 * 1151 * Loops over the current list of pending threads waiting for an AdminQ event. 1152 * For each matching task, copy the contents of the event into the task 1153 * structure and wake up the thread. 1154 * 1155 * If multiple threads wait for the same opcode, they will all be woken up. 1156 * 1157 * Note that event->msg_buf will only be duplicated if the event has a buffer 1158 * with enough space already allocated. Otherwise, only the descriptor and 1159 * message length will be copied. 1160 * 1161 * Returns: true if an event was found, false otherwise 1162 */ 1163 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1164 struct ice_rq_event_info *event) 1165 { 1166 struct ice_aq_task *task; 1167 bool found = false; 1168 1169 spin_lock_bh(&pf->aq_wait_lock); 1170 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1171 if (task->state || task->opcode != opcode) 1172 continue; 1173 1174 memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1175 task->event->msg_len = event->msg_len; 1176 1177 /* Only copy the data buffer if a destination was set */ 1178 if (task->event->msg_buf && 1179 task->event->buf_len > event->buf_len) { 1180 memcpy(task->event->msg_buf, event->msg_buf, 1181 event->buf_len); 1182 task->event->buf_len = event->buf_len; 1183 } 1184 1185 task->state = ICE_AQ_TASK_COMPLETE; 1186 found = true; 1187 } 1188 spin_unlock_bh(&pf->aq_wait_lock); 1189 1190 if (found) 1191 wake_up(&pf->aq_wait_queue); 1192 } 1193 1194 /** 1195 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1196 * @pf: the PF private structure 1197 * 1198 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1199 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1200 */ 1201 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1202 { 1203 struct ice_aq_task *task; 1204 1205 spin_lock_bh(&pf->aq_wait_lock); 1206 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1207 task->state = ICE_AQ_TASK_CANCELED; 1208 spin_unlock_bh(&pf->aq_wait_lock); 1209 1210 wake_up(&pf->aq_wait_queue); 1211 } 1212 1213 /** 1214 * __ice_clean_ctrlq - helper function to clean controlq rings 1215 * @pf: ptr to struct ice_pf 1216 * @q_type: specific Control queue type 1217 */ 1218 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1219 { 1220 struct device *dev = ice_pf_to_dev(pf); 1221 struct ice_rq_event_info event; 1222 struct ice_hw *hw = &pf->hw; 1223 struct ice_ctl_q_info *cq; 1224 u16 pending, i = 0; 1225 const char *qtype; 1226 u32 oldval, val; 1227 1228 /* Do not clean control queue if/when PF reset fails */ 1229 if (test_bit(ICE_RESET_FAILED, pf->state)) 1230 return 0; 1231 1232 switch (q_type) { 1233 case ICE_CTL_Q_ADMIN: 1234 cq = &hw->adminq; 1235 qtype = "Admin"; 1236 break; 1237 case ICE_CTL_Q_SB: 1238 cq = &hw->sbq; 1239 qtype = "Sideband"; 1240 break; 1241 case ICE_CTL_Q_MAILBOX: 1242 cq = &hw->mailboxq; 1243 qtype = "Mailbox"; 1244 /* we are going to try to detect a malicious VF, so set the 1245 * state to begin detection 1246 */ 1247 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1248 break; 1249 default: 1250 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1251 return 0; 1252 } 1253 1254 /* check for error indications - PF_xx_AxQLEN register layout for 1255 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1256 */ 1257 val = rd32(hw, cq->rq.len); 1258 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1259 PF_FW_ARQLEN_ARQCRIT_M)) { 1260 oldval = val; 1261 if (val & PF_FW_ARQLEN_ARQVFE_M) 1262 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1263 qtype); 1264 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1265 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1266 qtype); 1267 } 1268 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1269 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1270 qtype); 1271 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1272 PF_FW_ARQLEN_ARQCRIT_M); 1273 if (oldval != val) 1274 wr32(hw, cq->rq.len, val); 1275 } 1276 1277 val = rd32(hw, cq->sq.len); 1278 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1279 PF_FW_ATQLEN_ATQCRIT_M)) { 1280 oldval = val; 1281 if (val & PF_FW_ATQLEN_ATQVFE_M) 1282 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1283 qtype); 1284 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1285 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1286 qtype); 1287 } 1288 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1289 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1290 qtype); 1291 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1292 PF_FW_ATQLEN_ATQCRIT_M); 1293 if (oldval != val) 1294 wr32(hw, cq->sq.len, val); 1295 } 1296 1297 event.buf_len = cq->rq_buf_size; 1298 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1299 if (!event.msg_buf) 1300 return 0; 1301 1302 do { 1303 enum ice_status ret; 1304 u16 opcode; 1305 1306 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1307 if (ret == ICE_ERR_AQ_NO_WORK) 1308 break; 1309 if (ret) { 1310 dev_err(dev, "%s Receive Queue event error %s\n", qtype, 1311 ice_stat_str(ret)); 1312 break; 1313 } 1314 1315 opcode = le16_to_cpu(event.desc.opcode); 1316 1317 /* Notify any thread that might be waiting for this event */ 1318 ice_aq_check_events(pf, opcode, &event); 1319 1320 switch (opcode) { 1321 case ice_aqc_opc_get_link_status: 1322 if (ice_handle_link_event(pf, &event)) 1323 dev_err(dev, "Could not handle link event\n"); 1324 break; 1325 case ice_aqc_opc_event_lan_overflow: 1326 ice_vf_lan_overflow_event(pf, &event); 1327 break; 1328 case ice_mbx_opc_send_msg_to_pf: 1329 if (!ice_is_malicious_vf(pf, &event, i, pending)) 1330 ice_vc_process_vf_msg(pf, &event); 1331 break; 1332 case ice_aqc_opc_fw_logging: 1333 ice_output_fw_log(hw, &event.desc, event.msg_buf); 1334 break; 1335 case ice_aqc_opc_lldp_set_mib_change: 1336 ice_dcb_process_lldp_set_mib_change(pf, &event); 1337 break; 1338 default: 1339 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1340 qtype, opcode); 1341 break; 1342 } 1343 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1344 1345 kfree(event.msg_buf); 1346 1347 return pending && (i == ICE_DFLT_IRQ_WORK); 1348 } 1349 1350 /** 1351 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1352 * @hw: pointer to hardware info 1353 * @cq: control queue information 1354 * 1355 * returns true if there are pending messages in a queue, false if there aren't 1356 */ 1357 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1358 { 1359 u16 ntu; 1360 1361 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1362 return cq->rq.next_to_clean != ntu; 1363 } 1364 1365 /** 1366 * ice_clean_adminq_subtask - clean the AdminQ rings 1367 * @pf: board private structure 1368 */ 1369 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1370 { 1371 struct ice_hw *hw = &pf->hw; 1372 1373 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1374 return; 1375 1376 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1377 return; 1378 1379 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1380 1381 /* There might be a situation where new messages arrive to a control 1382 * queue between processing the last message and clearing the 1383 * EVENT_PENDING bit. So before exiting, check queue head again (using 1384 * ice_ctrlq_pending) and process new messages if any. 1385 */ 1386 if (ice_ctrlq_pending(hw, &hw->adminq)) 1387 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1388 1389 ice_flush(hw); 1390 } 1391 1392 /** 1393 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1394 * @pf: board private structure 1395 */ 1396 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1397 { 1398 struct ice_hw *hw = &pf->hw; 1399 1400 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1401 return; 1402 1403 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1404 return; 1405 1406 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1407 1408 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1409 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1410 1411 ice_flush(hw); 1412 } 1413 1414 /** 1415 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1416 * @pf: board private structure 1417 */ 1418 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1419 { 1420 struct ice_hw *hw = &pf->hw; 1421 1422 /* Nothing to do here if sideband queue is not supported */ 1423 if (!ice_is_sbq_supported(hw)) { 1424 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1425 return; 1426 } 1427 1428 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1429 return; 1430 1431 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1432 return; 1433 1434 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1435 1436 if (ice_ctrlq_pending(hw, &hw->sbq)) 1437 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1438 1439 ice_flush(hw); 1440 } 1441 1442 /** 1443 * ice_service_task_schedule - schedule the service task to wake up 1444 * @pf: board private structure 1445 * 1446 * If not already scheduled, this puts the task into the work queue. 1447 */ 1448 void ice_service_task_schedule(struct ice_pf *pf) 1449 { 1450 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1451 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1452 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1453 queue_work(ice_wq, &pf->serv_task); 1454 } 1455 1456 /** 1457 * ice_service_task_complete - finish up the service task 1458 * @pf: board private structure 1459 */ 1460 static void ice_service_task_complete(struct ice_pf *pf) 1461 { 1462 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1463 1464 /* force memory (pf->state) to sync before next service task */ 1465 smp_mb__before_atomic(); 1466 clear_bit(ICE_SERVICE_SCHED, pf->state); 1467 } 1468 1469 /** 1470 * ice_service_task_stop - stop service task and cancel works 1471 * @pf: board private structure 1472 * 1473 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1474 * 1 otherwise. 1475 */ 1476 static int ice_service_task_stop(struct ice_pf *pf) 1477 { 1478 int ret; 1479 1480 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1481 1482 if (pf->serv_tmr.function) 1483 del_timer_sync(&pf->serv_tmr); 1484 if (pf->serv_task.func) 1485 cancel_work_sync(&pf->serv_task); 1486 1487 clear_bit(ICE_SERVICE_SCHED, pf->state); 1488 return ret; 1489 } 1490 1491 /** 1492 * ice_service_task_restart - restart service task and schedule works 1493 * @pf: board private structure 1494 * 1495 * This function is needed for suspend and resume works (e.g WoL scenario) 1496 */ 1497 static void ice_service_task_restart(struct ice_pf *pf) 1498 { 1499 clear_bit(ICE_SERVICE_DIS, pf->state); 1500 ice_service_task_schedule(pf); 1501 } 1502 1503 /** 1504 * ice_service_timer - timer callback to schedule service task 1505 * @t: pointer to timer_list 1506 */ 1507 static void ice_service_timer(struct timer_list *t) 1508 { 1509 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1510 1511 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1512 ice_service_task_schedule(pf); 1513 } 1514 1515 /** 1516 * ice_handle_mdd_event - handle malicious driver detect event 1517 * @pf: pointer to the PF structure 1518 * 1519 * Called from service task. OICR interrupt handler indicates MDD event. 1520 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1521 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1522 * disable the queue, the PF can be configured to reset the VF using ethtool 1523 * private flag mdd-auto-reset-vf. 1524 */ 1525 static void ice_handle_mdd_event(struct ice_pf *pf) 1526 { 1527 struct device *dev = ice_pf_to_dev(pf); 1528 struct ice_hw *hw = &pf->hw; 1529 unsigned int i; 1530 u32 reg; 1531 1532 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1533 /* Since the VF MDD event logging is rate limited, check if 1534 * there are pending MDD events. 1535 */ 1536 ice_print_vfs_mdd_events(pf); 1537 return; 1538 } 1539 1540 /* find what triggered an MDD event */ 1541 reg = rd32(hw, GL_MDET_TX_PQM); 1542 if (reg & GL_MDET_TX_PQM_VALID_M) { 1543 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1544 GL_MDET_TX_PQM_PF_NUM_S; 1545 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1546 GL_MDET_TX_PQM_VF_NUM_S; 1547 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1548 GL_MDET_TX_PQM_MAL_TYPE_S; 1549 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1550 GL_MDET_TX_PQM_QNUM_S); 1551 1552 if (netif_msg_tx_err(pf)) 1553 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1554 event, queue, pf_num, vf_num); 1555 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1556 } 1557 1558 reg = rd32(hw, GL_MDET_TX_TCLAN); 1559 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1560 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1561 GL_MDET_TX_TCLAN_PF_NUM_S; 1562 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1563 GL_MDET_TX_TCLAN_VF_NUM_S; 1564 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1565 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1566 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1567 GL_MDET_TX_TCLAN_QNUM_S); 1568 1569 if (netif_msg_tx_err(pf)) 1570 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1571 event, queue, pf_num, vf_num); 1572 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1573 } 1574 1575 reg = rd32(hw, GL_MDET_RX); 1576 if (reg & GL_MDET_RX_VALID_M) { 1577 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1578 GL_MDET_RX_PF_NUM_S; 1579 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1580 GL_MDET_RX_VF_NUM_S; 1581 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1582 GL_MDET_RX_MAL_TYPE_S; 1583 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1584 GL_MDET_RX_QNUM_S); 1585 1586 if (netif_msg_rx_err(pf)) 1587 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1588 event, queue, pf_num, vf_num); 1589 wr32(hw, GL_MDET_RX, 0xffffffff); 1590 } 1591 1592 /* check to see if this PF caused an MDD event */ 1593 reg = rd32(hw, PF_MDET_TX_PQM); 1594 if (reg & PF_MDET_TX_PQM_VALID_M) { 1595 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1596 if (netif_msg_tx_err(pf)) 1597 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1598 } 1599 1600 reg = rd32(hw, PF_MDET_TX_TCLAN); 1601 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1602 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 1603 if (netif_msg_tx_err(pf)) 1604 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1605 } 1606 1607 reg = rd32(hw, PF_MDET_RX); 1608 if (reg & PF_MDET_RX_VALID_M) { 1609 wr32(hw, PF_MDET_RX, 0xFFFF); 1610 if (netif_msg_rx_err(pf)) 1611 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1612 } 1613 1614 /* Check to see if one of the VFs caused an MDD event, and then 1615 * increment counters and set print pending 1616 */ 1617 ice_for_each_vf(pf, i) { 1618 struct ice_vf *vf = &pf->vf[i]; 1619 1620 reg = rd32(hw, VP_MDET_TX_PQM(i)); 1621 if (reg & VP_MDET_TX_PQM_VALID_M) { 1622 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 1623 vf->mdd_tx_events.count++; 1624 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1625 if (netif_msg_tx_err(pf)) 1626 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1627 i); 1628 } 1629 1630 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1631 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1632 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1633 vf->mdd_tx_events.count++; 1634 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1635 if (netif_msg_tx_err(pf)) 1636 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1637 i); 1638 } 1639 1640 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1641 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1642 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1643 vf->mdd_tx_events.count++; 1644 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1645 if (netif_msg_tx_err(pf)) 1646 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1647 i); 1648 } 1649 1650 reg = rd32(hw, VP_MDET_RX(i)); 1651 if (reg & VP_MDET_RX_VALID_M) { 1652 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1653 vf->mdd_rx_events.count++; 1654 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1655 if (netif_msg_rx_err(pf)) 1656 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1657 i); 1658 1659 /* Since the queue is disabled on VF Rx MDD events, the 1660 * PF can be configured to reset the VF through ethtool 1661 * private flag mdd-auto-reset-vf. 1662 */ 1663 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1664 /* VF MDD event counters will be cleared by 1665 * reset, so print the event prior to reset. 1666 */ 1667 ice_print_vf_rx_mdd_event(vf); 1668 ice_reset_vf(&pf->vf[i], false); 1669 } 1670 } 1671 } 1672 1673 ice_print_vfs_mdd_events(pf); 1674 } 1675 1676 /** 1677 * ice_force_phys_link_state - Force the physical link state 1678 * @vsi: VSI to force the physical link state to up/down 1679 * @link_up: true/false indicates to set the physical link to up/down 1680 * 1681 * Force the physical link state by getting the current PHY capabilities from 1682 * hardware and setting the PHY config based on the determined capabilities. If 1683 * link changes a link event will be triggered because both the Enable Automatic 1684 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1685 * 1686 * Returns 0 on success, negative on failure 1687 */ 1688 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1689 { 1690 struct ice_aqc_get_phy_caps_data *pcaps; 1691 struct ice_aqc_set_phy_cfg_data *cfg; 1692 struct ice_port_info *pi; 1693 struct device *dev; 1694 int retcode; 1695 1696 if (!vsi || !vsi->port_info || !vsi->back) 1697 return -EINVAL; 1698 if (vsi->type != ICE_VSI_PF) 1699 return 0; 1700 1701 dev = ice_pf_to_dev(vsi->back); 1702 1703 pi = vsi->port_info; 1704 1705 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1706 if (!pcaps) 1707 return -ENOMEM; 1708 1709 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1710 NULL); 1711 if (retcode) { 1712 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1713 vsi->vsi_num, retcode); 1714 retcode = -EIO; 1715 goto out; 1716 } 1717 1718 /* No change in link */ 1719 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1720 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1721 goto out; 1722 1723 /* Use the current user PHY configuration. The current user PHY 1724 * configuration is initialized during probe from PHY capabilities 1725 * software mode, and updated on set PHY configuration. 1726 */ 1727 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1728 if (!cfg) { 1729 retcode = -ENOMEM; 1730 goto out; 1731 } 1732 1733 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1734 if (link_up) 1735 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1736 else 1737 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1738 1739 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1740 if (retcode) { 1741 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1742 vsi->vsi_num, retcode); 1743 retcode = -EIO; 1744 } 1745 1746 kfree(cfg); 1747 out: 1748 kfree(pcaps); 1749 return retcode; 1750 } 1751 1752 /** 1753 * ice_init_nvm_phy_type - Initialize the NVM PHY type 1754 * @pi: port info structure 1755 * 1756 * Initialize nvm_phy_type_[low|high] for link lenient mode support 1757 */ 1758 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1759 { 1760 struct ice_aqc_get_phy_caps_data *pcaps; 1761 struct ice_pf *pf = pi->hw->back; 1762 enum ice_status status; 1763 int err = 0; 1764 1765 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1766 if (!pcaps) 1767 return -ENOMEM; 1768 1769 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, 1770 NULL); 1771 1772 if (status) { 1773 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1774 err = -EIO; 1775 goto out; 1776 } 1777 1778 pf->nvm_phy_type_hi = pcaps->phy_type_high; 1779 pf->nvm_phy_type_lo = pcaps->phy_type_low; 1780 1781 out: 1782 kfree(pcaps); 1783 return err; 1784 } 1785 1786 /** 1787 * ice_init_link_dflt_override - Initialize link default override 1788 * @pi: port info structure 1789 * 1790 * Initialize link default override and PHY total port shutdown during probe 1791 */ 1792 static void ice_init_link_dflt_override(struct ice_port_info *pi) 1793 { 1794 struct ice_link_default_override_tlv *ldo; 1795 struct ice_pf *pf = pi->hw->back; 1796 1797 ldo = &pf->link_dflt_override; 1798 if (ice_get_link_default_override(ldo, pi)) 1799 return; 1800 1801 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1802 return; 1803 1804 /* Enable Total Port Shutdown (override/replace link-down-on-close 1805 * ethtool private flag) for ports with Port Disable bit set. 1806 */ 1807 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1808 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1809 } 1810 1811 /** 1812 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1813 * @pi: port info structure 1814 * 1815 * If default override is enabled, initialize the user PHY cfg speed and FEC 1816 * settings using the default override mask from the NVM. 1817 * 1818 * The PHY should only be configured with the default override settings the 1819 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1820 * is used to indicate that the user PHY cfg default override is initialized 1821 * and the PHY has not been configured with the default override settings. The 1822 * state is set here, and cleared in ice_configure_phy the first time the PHY is 1823 * configured. 1824 * 1825 * This function should be called only if the FW doesn't support default 1826 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 1827 */ 1828 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1829 { 1830 struct ice_link_default_override_tlv *ldo; 1831 struct ice_aqc_set_phy_cfg_data *cfg; 1832 struct ice_phy_info *phy = &pi->phy; 1833 struct ice_pf *pf = pi->hw->back; 1834 1835 ldo = &pf->link_dflt_override; 1836 1837 /* If link default override is enabled, use to mask NVM PHY capabilities 1838 * for speed and FEC default configuration. 1839 */ 1840 cfg = &phy->curr_user_phy_cfg; 1841 1842 if (ldo->phy_type_low || ldo->phy_type_high) { 1843 cfg->phy_type_low = pf->nvm_phy_type_lo & 1844 cpu_to_le64(ldo->phy_type_low); 1845 cfg->phy_type_high = pf->nvm_phy_type_hi & 1846 cpu_to_le64(ldo->phy_type_high); 1847 } 1848 cfg->link_fec_opt = ldo->fec_options; 1849 phy->curr_user_fec_req = ICE_FEC_AUTO; 1850 1851 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 1852 } 1853 1854 /** 1855 * ice_init_phy_user_cfg - Initialize the PHY user configuration 1856 * @pi: port info structure 1857 * 1858 * Initialize the current user PHY configuration, speed, FEC, and FC requested 1859 * mode to default. The PHY defaults are from get PHY capabilities topology 1860 * with media so call when media is first available. An error is returned if 1861 * called when media is not available. The PHY initialization completed state is 1862 * set here. 1863 * 1864 * These configurations are used when setting PHY 1865 * configuration. The user PHY configuration is updated on set PHY 1866 * configuration. Returns 0 on success, negative on failure 1867 */ 1868 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 1869 { 1870 struct ice_aqc_get_phy_caps_data *pcaps; 1871 struct ice_phy_info *phy = &pi->phy; 1872 struct ice_pf *pf = pi->hw->back; 1873 enum ice_status status; 1874 int err = 0; 1875 1876 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 1877 return -EIO; 1878 1879 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1880 if (!pcaps) 1881 return -ENOMEM; 1882 1883 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 1884 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 1885 pcaps, NULL); 1886 else 1887 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 1888 pcaps, NULL); 1889 if (status) { 1890 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1891 err = -EIO; 1892 goto err_out; 1893 } 1894 1895 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 1896 1897 /* check if lenient mode is supported and enabled */ 1898 if (ice_fw_supports_link_override(pi->hw) && 1899 !(pcaps->module_compliance_enforcement & 1900 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 1901 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 1902 1903 /* if the FW supports default PHY configuration mode, then the driver 1904 * does not have to apply link override settings. If not, 1905 * initialize user PHY configuration with link override values 1906 */ 1907 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 1908 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 1909 ice_init_phy_cfg_dflt_override(pi); 1910 goto out; 1911 } 1912 } 1913 1914 /* if link default override is not enabled, set user flow control and 1915 * FEC settings based on what get_phy_caps returned 1916 */ 1917 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 1918 pcaps->link_fec_options); 1919 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 1920 1921 out: 1922 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 1923 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 1924 err_out: 1925 kfree(pcaps); 1926 return err; 1927 } 1928 1929 /** 1930 * ice_configure_phy - configure PHY 1931 * @vsi: VSI of PHY 1932 * 1933 * Set the PHY configuration. If the current PHY configuration is the same as 1934 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 1935 * configure the based get PHY capabilities for topology with media. 1936 */ 1937 static int ice_configure_phy(struct ice_vsi *vsi) 1938 { 1939 struct device *dev = ice_pf_to_dev(vsi->back); 1940 struct ice_port_info *pi = vsi->port_info; 1941 struct ice_aqc_get_phy_caps_data *pcaps; 1942 struct ice_aqc_set_phy_cfg_data *cfg; 1943 struct ice_phy_info *phy = &pi->phy; 1944 struct ice_pf *pf = vsi->back; 1945 enum ice_status status; 1946 int err = 0; 1947 1948 /* Ensure we have media as we cannot configure a medialess port */ 1949 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 1950 return -EPERM; 1951 1952 ice_print_topo_conflict(vsi); 1953 1954 if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 1955 return -EPERM; 1956 1957 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 1958 return ice_force_phys_link_state(vsi, true); 1959 1960 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1961 if (!pcaps) 1962 return -ENOMEM; 1963 1964 /* Get current PHY config */ 1965 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1966 NULL); 1967 if (status) { 1968 dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", 1969 vsi->vsi_num, ice_stat_str(status)); 1970 err = -EIO; 1971 goto done; 1972 } 1973 1974 /* If PHY enable link is configured and configuration has not changed, 1975 * there's nothing to do 1976 */ 1977 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 1978 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 1979 goto done; 1980 1981 /* Use PHY topology as baseline for configuration */ 1982 memset(pcaps, 0, sizeof(*pcaps)); 1983 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 1984 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 1985 pcaps, NULL); 1986 else 1987 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 1988 pcaps, NULL); 1989 if (status) { 1990 dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", 1991 vsi->vsi_num, ice_stat_str(status)); 1992 err = -EIO; 1993 goto done; 1994 } 1995 1996 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 1997 if (!cfg) { 1998 err = -ENOMEM; 1999 goto done; 2000 } 2001 2002 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2003 2004 /* Speed - If default override pending, use curr_user_phy_cfg set in 2005 * ice_init_phy_user_cfg_ldo. 2006 */ 2007 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2008 vsi->back->state)) { 2009 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2010 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2011 } else { 2012 u64 phy_low = 0, phy_high = 0; 2013 2014 ice_update_phy_type(&phy_low, &phy_high, 2015 pi->phy.curr_user_speed_req); 2016 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2017 cfg->phy_type_high = pcaps->phy_type_high & 2018 cpu_to_le64(phy_high); 2019 } 2020 2021 /* Can't provide what was requested; use PHY capabilities */ 2022 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2023 cfg->phy_type_low = pcaps->phy_type_low; 2024 cfg->phy_type_high = pcaps->phy_type_high; 2025 } 2026 2027 /* FEC */ 2028 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2029 2030 /* Can't provide what was requested; use PHY capabilities */ 2031 if (cfg->link_fec_opt != 2032 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2033 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2034 cfg->link_fec_opt = pcaps->link_fec_options; 2035 } 2036 2037 /* Flow Control - always supported; no need to check against 2038 * capabilities 2039 */ 2040 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2041 2042 /* Enable link and link update */ 2043 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2044 2045 status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2046 if (status) { 2047 dev_err(dev, "Failed to set phy config, VSI %d error %s\n", 2048 vsi->vsi_num, ice_stat_str(status)); 2049 err = -EIO; 2050 } 2051 2052 kfree(cfg); 2053 done: 2054 kfree(pcaps); 2055 return err; 2056 } 2057 2058 /** 2059 * ice_check_media_subtask - Check for media 2060 * @pf: pointer to PF struct 2061 * 2062 * If media is available, then initialize PHY user configuration if it is not 2063 * been, and configure the PHY if the interface is up. 2064 */ 2065 static void ice_check_media_subtask(struct ice_pf *pf) 2066 { 2067 struct ice_port_info *pi; 2068 struct ice_vsi *vsi; 2069 int err; 2070 2071 /* No need to check for media if it's already present */ 2072 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2073 return; 2074 2075 vsi = ice_get_main_vsi(pf); 2076 if (!vsi) 2077 return; 2078 2079 /* Refresh link info and check if media is present */ 2080 pi = vsi->port_info; 2081 err = ice_update_link_info(pi); 2082 if (err) 2083 return; 2084 2085 ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); 2086 2087 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2088 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2089 ice_init_phy_user_cfg(pi); 2090 2091 /* PHY settings are reset on media insertion, reconfigure 2092 * PHY to preserve settings. 2093 */ 2094 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2095 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2096 return; 2097 2098 err = ice_configure_phy(vsi); 2099 if (!err) 2100 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2101 2102 /* A Link Status Event will be generated; the event handler 2103 * will complete bringing the interface up 2104 */ 2105 } 2106 } 2107 2108 /** 2109 * ice_service_task - manage and run subtasks 2110 * @work: pointer to work_struct contained by the PF struct 2111 */ 2112 static void ice_service_task(struct work_struct *work) 2113 { 2114 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2115 unsigned long start_time = jiffies; 2116 2117 /* subtasks */ 2118 2119 /* process reset requests first */ 2120 ice_reset_subtask(pf); 2121 2122 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2123 if (ice_is_reset_in_progress(pf->state) || 2124 test_bit(ICE_SUSPENDED, pf->state) || 2125 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2126 ice_service_task_complete(pf); 2127 return; 2128 } 2129 2130 ice_clean_adminq_subtask(pf); 2131 ice_check_media_subtask(pf); 2132 ice_check_for_hang_subtask(pf); 2133 ice_sync_fltr_subtask(pf); 2134 ice_handle_mdd_event(pf); 2135 ice_watchdog_subtask(pf); 2136 2137 if (ice_is_safe_mode(pf)) { 2138 ice_service_task_complete(pf); 2139 return; 2140 } 2141 2142 ice_process_vflr_event(pf); 2143 ice_clean_mailboxq_subtask(pf); 2144 ice_clean_sbq_subtask(pf); 2145 ice_sync_arfs_fltrs(pf); 2146 ice_flush_fdir_ctx(pf); 2147 2148 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2149 ice_service_task_complete(pf); 2150 2151 /* If the tasks have taken longer than one service timer period 2152 * or there is more work to be done, reset the service timer to 2153 * schedule the service task now. 2154 */ 2155 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2156 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2157 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2158 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2159 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2160 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2161 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2162 mod_timer(&pf->serv_tmr, jiffies); 2163 } 2164 2165 /** 2166 * ice_set_ctrlq_len - helper function to set controlq length 2167 * @hw: pointer to the HW instance 2168 */ 2169 static void ice_set_ctrlq_len(struct ice_hw *hw) 2170 { 2171 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2172 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2173 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2174 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2175 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2176 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2177 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2178 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2179 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2180 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2181 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2182 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2183 } 2184 2185 /** 2186 * ice_schedule_reset - schedule a reset 2187 * @pf: board private structure 2188 * @reset: reset being requested 2189 */ 2190 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2191 { 2192 struct device *dev = ice_pf_to_dev(pf); 2193 2194 /* bail out if earlier reset has failed */ 2195 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2196 dev_dbg(dev, "earlier reset has failed\n"); 2197 return -EIO; 2198 } 2199 /* bail if reset/recovery already in progress */ 2200 if (ice_is_reset_in_progress(pf->state)) { 2201 dev_dbg(dev, "Reset already in progress\n"); 2202 return -EBUSY; 2203 } 2204 2205 ice_unplug_aux_dev(pf); 2206 2207 switch (reset) { 2208 case ICE_RESET_PFR: 2209 set_bit(ICE_PFR_REQ, pf->state); 2210 break; 2211 case ICE_RESET_CORER: 2212 set_bit(ICE_CORER_REQ, pf->state); 2213 break; 2214 case ICE_RESET_GLOBR: 2215 set_bit(ICE_GLOBR_REQ, pf->state); 2216 break; 2217 default: 2218 return -EINVAL; 2219 } 2220 2221 ice_service_task_schedule(pf); 2222 return 0; 2223 } 2224 2225 /** 2226 * ice_irq_affinity_notify - Callback for affinity changes 2227 * @notify: context as to what irq was changed 2228 * @mask: the new affinity mask 2229 * 2230 * This is a callback function used by the irq_set_affinity_notifier function 2231 * so that we may register to receive changes to the irq affinity masks. 2232 */ 2233 static void 2234 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2235 const cpumask_t *mask) 2236 { 2237 struct ice_q_vector *q_vector = 2238 container_of(notify, struct ice_q_vector, affinity_notify); 2239 2240 cpumask_copy(&q_vector->affinity_mask, mask); 2241 } 2242 2243 /** 2244 * ice_irq_affinity_release - Callback for affinity notifier release 2245 * @ref: internal core kernel usage 2246 * 2247 * This is a callback function used by the irq_set_affinity_notifier function 2248 * to inform the current notification subscriber that they will no longer 2249 * receive notifications. 2250 */ 2251 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2252 2253 /** 2254 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2255 * @vsi: the VSI being configured 2256 */ 2257 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2258 { 2259 struct ice_hw *hw = &vsi->back->hw; 2260 int i; 2261 2262 ice_for_each_q_vector(vsi, i) 2263 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2264 2265 ice_flush(hw); 2266 return 0; 2267 } 2268 2269 /** 2270 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2271 * @vsi: the VSI being configured 2272 * @basename: name for the vector 2273 */ 2274 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2275 { 2276 int q_vectors = vsi->num_q_vectors; 2277 struct ice_pf *pf = vsi->back; 2278 int base = vsi->base_vector; 2279 struct device *dev; 2280 int rx_int_idx = 0; 2281 int tx_int_idx = 0; 2282 int vector, err; 2283 int irq_num; 2284 2285 dev = ice_pf_to_dev(pf); 2286 for (vector = 0; vector < q_vectors; vector++) { 2287 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2288 2289 irq_num = pf->msix_entries[base + vector].vector; 2290 2291 if (q_vector->tx.ring && q_vector->rx.ring) { 2292 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2293 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2294 tx_int_idx++; 2295 } else if (q_vector->rx.ring) { 2296 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2297 "%s-%s-%d", basename, "rx", rx_int_idx++); 2298 } else if (q_vector->tx.ring) { 2299 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2300 "%s-%s-%d", basename, "tx", tx_int_idx++); 2301 } else { 2302 /* skip this unused q_vector */ 2303 continue; 2304 } 2305 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) 2306 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2307 IRQF_SHARED, q_vector->name, 2308 q_vector); 2309 else 2310 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2311 0, q_vector->name, q_vector); 2312 if (err) { 2313 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2314 err); 2315 goto free_q_irqs; 2316 } 2317 2318 /* register for affinity change notifications */ 2319 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2320 struct irq_affinity_notify *affinity_notify; 2321 2322 affinity_notify = &q_vector->affinity_notify; 2323 affinity_notify->notify = ice_irq_affinity_notify; 2324 affinity_notify->release = ice_irq_affinity_release; 2325 irq_set_affinity_notifier(irq_num, affinity_notify); 2326 } 2327 2328 /* assign the mask for this irq */ 2329 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2330 } 2331 2332 vsi->irqs_ready = true; 2333 return 0; 2334 2335 free_q_irqs: 2336 while (vector) { 2337 vector--; 2338 irq_num = pf->msix_entries[base + vector].vector; 2339 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2340 irq_set_affinity_notifier(irq_num, NULL); 2341 irq_set_affinity_hint(irq_num, NULL); 2342 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2343 } 2344 return err; 2345 } 2346 2347 /** 2348 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2349 * @vsi: VSI to setup Tx rings used by XDP 2350 * 2351 * Return 0 on success and negative value on error 2352 */ 2353 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2354 { 2355 struct device *dev = ice_pf_to_dev(vsi->back); 2356 int i; 2357 2358 for (i = 0; i < vsi->num_xdp_txq; i++) { 2359 u16 xdp_q_idx = vsi->alloc_txq + i; 2360 struct ice_ring *xdp_ring; 2361 2362 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2363 2364 if (!xdp_ring) 2365 goto free_xdp_rings; 2366 2367 xdp_ring->q_index = xdp_q_idx; 2368 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2369 xdp_ring->ring_active = false; 2370 xdp_ring->vsi = vsi; 2371 xdp_ring->netdev = NULL; 2372 xdp_ring->dev = dev; 2373 xdp_ring->count = vsi->num_tx_desc; 2374 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2375 if (ice_setup_tx_ring(xdp_ring)) 2376 goto free_xdp_rings; 2377 ice_set_ring_xdp(xdp_ring); 2378 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); 2379 } 2380 2381 return 0; 2382 2383 free_xdp_rings: 2384 for (; i >= 0; i--) 2385 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 2386 ice_free_tx_ring(vsi->xdp_rings[i]); 2387 return -ENOMEM; 2388 } 2389 2390 /** 2391 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2392 * @vsi: VSI to set the bpf prog on 2393 * @prog: the bpf prog pointer 2394 */ 2395 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2396 { 2397 struct bpf_prog *old_prog; 2398 int i; 2399 2400 old_prog = xchg(&vsi->xdp_prog, prog); 2401 if (old_prog) 2402 bpf_prog_put(old_prog); 2403 2404 ice_for_each_rxq(vsi, i) 2405 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2406 } 2407 2408 /** 2409 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2410 * @vsi: VSI to bring up Tx rings used by XDP 2411 * @prog: bpf program that will be assigned to VSI 2412 * 2413 * Return 0 on success and negative value on error 2414 */ 2415 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2416 { 2417 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2418 int xdp_rings_rem = vsi->num_xdp_txq; 2419 struct ice_pf *pf = vsi->back; 2420 struct ice_qs_cfg xdp_qs_cfg = { 2421 .qs_mutex = &pf->avail_q_mutex, 2422 .pf_map = pf->avail_txqs, 2423 .pf_map_size = pf->max_pf_txqs, 2424 .q_count = vsi->num_xdp_txq, 2425 .scatter_count = ICE_MAX_SCATTER_TXQS, 2426 .vsi_map = vsi->txq_map, 2427 .vsi_map_offset = vsi->alloc_txq, 2428 .mapping_mode = ICE_VSI_MAP_CONTIG 2429 }; 2430 enum ice_status status; 2431 struct device *dev; 2432 int i, v_idx; 2433 2434 dev = ice_pf_to_dev(pf); 2435 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2436 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2437 if (!vsi->xdp_rings) 2438 return -ENOMEM; 2439 2440 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2441 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2442 goto err_map_xdp; 2443 2444 if (ice_xdp_alloc_setup_rings(vsi)) 2445 goto clear_xdp_rings; 2446 2447 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2448 ice_for_each_q_vector(vsi, v_idx) { 2449 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2450 int xdp_rings_per_v, q_id, q_base; 2451 2452 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2453 vsi->num_q_vectors - v_idx); 2454 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2455 2456 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2457 struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; 2458 2459 xdp_ring->q_vector = q_vector; 2460 xdp_ring->next = q_vector->tx.ring; 2461 q_vector->tx.ring = xdp_ring; 2462 } 2463 xdp_rings_rem -= xdp_rings_per_v; 2464 } 2465 2466 /* omit the scheduler update if in reset path; XDP queues will be 2467 * taken into account at the end of ice_vsi_rebuild, where 2468 * ice_cfg_vsi_lan is being called 2469 */ 2470 if (ice_is_reset_in_progress(pf->state)) 2471 return 0; 2472 2473 /* tell the Tx scheduler that right now we have 2474 * additional queues 2475 */ 2476 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2477 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2478 2479 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2480 max_txqs); 2481 if (status) { 2482 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", 2483 ice_stat_str(status)); 2484 goto clear_xdp_rings; 2485 } 2486 ice_vsi_assign_bpf_prog(vsi, prog); 2487 2488 return 0; 2489 clear_xdp_rings: 2490 for (i = 0; i < vsi->num_xdp_txq; i++) 2491 if (vsi->xdp_rings[i]) { 2492 kfree_rcu(vsi->xdp_rings[i], rcu); 2493 vsi->xdp_rings[i] = NULL; 2494 } 2495 2496 err_map_xdp: 2497 mutex_lock(&pf->avail_q_mutex); 2498 for (i = 0; i < vsi->num_xdp_txq; i++) { 2499 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2500 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2501 } 2502 mutex_unlock(&pf->avail_q_mutex); 2503 2504 devm_kfree(dev, vsi->xdp_rings); 2505 return -ENOMEM; 2506 } 2507 2508 /** 2509 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2510 * @vsi: VSI to remove XDP rings 2511 * 2512 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2513 * resources 2514 */ 2515 int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2516 { 2517 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2518 struct ice_pf *pf = vsi->back; 2519 int i, v_idx; 2520 2521 /* q_vectors are freed in reset path so there's no point in detaching 2522 * rings; in case of rebuild being triggered not from reset bits 2523 * in pf->state won't be set, so additionally check first q_vector 2524 * against NULL 2525 */ 2526 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2527 goto free_qmap; 2528 2529 ice_for_each_q_vector(vsi, v_idx) { 2530 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2531 struct ice_ring *ring; 2532 2533 ice_for_each_ring(ring, q_vector->tx) 2534 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2535 break; 2536 2537 /* restore the value of last node prior to XDP setup */ 2538 q_vector->tx.ring = ring; 2539 } 2540 2541 free_qmap: 2542 mutex_lock(&pf->avail_q_mutex); 2543 for (i = 0; i < vsi->num_xdp_txq; i++) { 2544 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2545 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2546 } 2547 mutex_unlock(&pf->avail_q_mutex); 2548 2549 for (i = 0; i < vsi->num_xdp_txq; i++) 2550 if (vsi->xdp_rings[i]) { 2551 if (vsi->xdp_rings[i]->desc) 2552 ice_free_tx_ring(vsi->xdp_rings[i]); 2553 kfree_rcu(vsi->xdp_rings[i], rcu); 2554 vsi->xdp_rings[i] = NULL; 2555 } 2556 2557 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2558 vsi->xdp_rings = NULL; 2559 2560 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2561 return 0; 2562 2563 ice_vsi_assign_bpf_prog(vsi, NULL); 2564 2565 /* notify Tx scheduler that we destroyed XDP queues and bring 2566 * back the old number of child nodes 2567 */ 2568 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2569 max_txqs[i] = vsi->num_txq; 2570 2571 /* change number of XDP Tx queues to 0 */ 2572 vsi->num_xdp_txq = 0; 2573 2574 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2575 max_txqs); 2576 } 2577 2578 /** 2579 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2580 * @vsi: VSI to schedule napi on 2581 */ 2582 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2583 { 2584 int i; 2585 2586 ice_for_each_rxq(vsi, i) { 2587 struct ice_ring *rx_ring = vsi->rx_rings[i]; 2588 2589 if (rx_ring->xsk_pool) 2590 napi_schedule(&rx_ring->q_vector->napi); 2591 } 2592 } 2593 2594 /** 2595 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2596 * @vsi: VSI to setup XDP for 2597 * @prog: XDP program 2598 * @extack: netlink extended ack 2599 */ 2600 static int 2601 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2602 struct netlink_ext_ack *extack) 2603 { 2604 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2605 bool if_running = netif_running(vsi->netdev); 2606 int ret = 0, xdp_ring_err = 0; 2607 2608 if (frame_size > vsi->rx_buf_len) { 2609 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 2610 return -EOPNOTSUPP; 2611 } 2612 2613 /* need to stop netdev while setting up the program for Rx rings */ 2614 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2615 ret = ice_down(vsi); 2616 if (ret) { 2617 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2618 return ret; 2619 } 2620 } 2621 2622 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 2623 vsi->num_xdp_txq = vsi->alloc_rxq; 2624 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2625 if (xdp_ring_err) 2626 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 2627 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2628 xdp_ring_err = ice_destroy_xdp_rings(vsi); 2629 if (xdp_ring_err) 2630 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2631 } else { 2632 ice_vsi_assign_bpf_prog(vsi, prog); 2633 } 2634 2635 if (if_running) 2636 ret = ice_up(vsi); 2637 2638 if (!ret && prog) 2639 ice_vsi_rx_napi_schedule(vsi); 2640 2641 return (ret || xdp_ring_err) ? -ENOMEM : 0; 2642 } 2643 2644 /** 2645 * ice_xdp - implements XDP handler 2646 * @dev: netdevice 2647 * @xdp: XDP command 2648 */ 2649 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2650 { 2651 struct ice_netdev_priv *np = netdev_priv(dev); 2652 struct ice_vsi *vsi = np->vsi; 2653 2654 if (vsi->type != ICE_VSI_PF) { 2655 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 2656 return -EINVAL; 2657 } 2658 2659 switch (xdp->command) { 2660 case XDP_SETUP_PROG: 2661 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 2662 case XDP_SETUP_XSK_POOL: 2663 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 2664 xdp->xsk.queue_id); 2665 default: 2666 return -EINVAL; 2667 } 2668 } 2669 2670 /** 2671 * ice_ena_misc_vector - enable the non-queue interrupts 2672 * @pf: board private structure 2673 */ 2674 static void ice_ena_misc_vector(struct ice_pf *pf) 2675 { 2676 struct ice_hw *hw = &pf->hw; 2677 u32 val; 2678 2679 /* Disable anti-spoof detection interrupt to prevent spurious event 2680 * interrupts during a function reset. Anti-spoof functionally is 2681 * still supported. 2682 */ 2683 val = rd32(hw, GL_MDCK_TX_TDPU); 2684 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 2685 wr32(hw, GL_MDCK_TX_TDPU, val); 2686 2687 /* clear things first */ 2688 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2689 rd32(hw, PFINT_OICR); /* read to clear */ 2690 2691 val = (PFINT_OICR_ECC_ERR_M | 2692 PFINT_OICR_MAL_DETECT_M | 2693 PFINT_OICR_GRST_M | 2694 PFINT_OICR_PCI_EXCEPTION_M | 2695 PFINT_OICR_VFLR_M | 2696 PFINT_OICR_HMC_ERR_M | 2697 PFINT_OICR_PE_PUSH_M | 2698 PFINT_OICR_PE_CRITERR_M); 2699 2700 wr32(hw, PFINT_OICR_ENA, val); 2701 2702 /* SW_ITR_IDX = 0, but don't change INTENA */ 2703 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 2704 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 2705 } 2706 2707 /** 2708 * ice_misc_intr - misc interrupt handler 2709 * @irq: interrupt number 2710 * @data: pointer to a q_vector 2711 */ 2712 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 2713 { 2714 struct ice_pf *pf = (struct ice_pf *)data; 2715 struct ice_hw *hw = &pf->hw; 2716 irqreturn_t ret = IRQ_NONE; 2717 struct device *dev; 2718 u32 oicr, ena_mask; 2719 2720 dev = ice_pf_to_dev(pf); 2721 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2722 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 2723 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 2724 2725 oicr = rd32(hw, PFINT_OICR); 2726 ena_mask = rd32(hw, PFINT_OICR_ENA); 2727 2728 if (oicr & PFINT_OICR_SWINT_M) { 2729 ena_mask &= ~PFINT_OICR_SWINT_M; 2730 pf->sw_int_count++; 2731 } 2732 2733 if (oicr & PFINT_OICR_MAL_DETECT_M) { 2734 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 2735 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 2736 } 2737 if (oicr & PFINT_OICR_VFLR_M) { 2738 /* disable any further VFLR event notifications */ 2739 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 2740 u32 reg = rd32(hw, PFINT_OICR_ENA); 2741 2742 reg &= ~PFINT_OICR_VFLR_M; 2743 wr32(hw, PFINT_OICR_ENA, reg); 2744 } else { 2745 ena_mask &= ~PFINT_OICR_VFLR_M; 2746 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 2747 } 2748 } 2749 2750 if (oicr & PFINT_OICR_GRST_M) { 2751 u32 reset; 2752 2753 /* we have a reset warning */ 2754 ena_mask &= ~PFINT_OICR_GRST_M; 2755 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 2756 GLGEN_RSTAT_RESET_TYPE_S; 2757 2758 if (reset == ICE_RESET_CORER) 2759 pf->corer_count++; 2760 else if (reset == ICE_RESET_GLOBR) 2761 pf->globr_count++; 2762 else if (reset == ICE_RESET_EMPR) 2763 pf->empr_count++; 2764 else 2765 dev_dbg(dev, "Invalid reset type %d\n", reset); 2766 2767 /* If a reset cycle isn't already in progress, we set a bit in 2768 * pf->state so that the service task can start a reset/rebuild. 2769 */ 2770 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 2771 if (reset == ICE_RESET_CORER) 2772 set_bit(ICE_CORER_RECV, pf->state); 2773 else if (reset == ICE_RESET_GLOBR) 2774 set_bit(ICE_GLOBR_RECV, pf->state); 2775 else 2776 set_bit(ICE_EMPR_RECV, pf->state); 2777 2778 /* There are couple of different bits at play here. 2779 * hw->reset_ongoing indicates whether the hardware is 2780 * in reset. This is set to true when a reset interrupt 2781 * is received and set back to false after the driver 2782 * has determined that the hardware is out of reset. 2783 * 2784 * ICE_RESET_OICR_RECV in pf->state indicates 2785 * that a post reset rebuild is required before the 2786 * driver is operational again. This is set above. 2787 * 2788 * As this is the start of the reset/rebuild cycle, set 2789 * both to indicate that. 2790 */ 2791 hw->reset_ongoing = true; 2792 } 2793 } 2794 2795 if (oicr & PFINT_OICR_TSYN_TX_M) { 2796 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 2797 ice_ptp_process_ts(pf); 2798 } 2799 2800 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 2801 if (oicr & ICE_AUX_CRIT_ERR) { 2802 struct iidc_event *event; 2803 2804 ena_mask &= ~ICE_AUX_CRIT_ERR; 2805 event = kzalloc(sizeof(*event), GFP_KERNEL); 2806 if (event) { 2807 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 2808 /* report the entire OICR value to AUX driver */ 2809 event->reg = oicr; 2810 ice_send_event_to_aux(pf, event); 2811 kfree(event); 2812 } 2813 } 2814 2815 /* Report any remaining unexpected interrupts */ 2816 oicr &= ena_mask; 2817 if (oicr) { 2818 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 2819 /* If a critical error is pending there is no choice but to 2820 * reset the device. 2821 */ 2822 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 2823 PFINT_OICR_ECC_ERR_M)) { 2824 set_bit(ICE_PFR_REQ, pf->state); 2825 ice_service_task_schedule(pf); 2826 } 2827 } 2828 ret = IRQ_HANDLED; 2829 2830 ice_service_task_schedule(pf); 2831 ice_irq_dynamic_ena(hw, NULL, NULL); 2832 2833 return ret; 2834 } 2835 2836 /** 2837 * ice_dis_ctrlq_interrupts - disable control queue interrupts 2838 * @hw: pointer to HW structure 2839 */ 2840 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 2841 { 2842 /* disable Admin queue Interrupt causes */ 2843 wr32(hw, PFINT_FW_CTL, 2844 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 2845 2846 /* disable Mailbox queue Interrupt causes */ 2847 wr32(hw, PFINT_MBX_CTL, 2848 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 2849 2850 wr32(hw, PFINT_SB_CTL, 2851 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 2852 2853 /* disable Control queue Interrupt causes */ 2854 wr32(hw, PFINT_OICR_CTL, 2855 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 2856 2857 ice_flush(hw); 2858 } 2859 2860 /** 2861 * ice_free_irq_msix_misc - Unroll misc vector setup 2862 * @pf: board private structure 2863 */ 2864 static void ice_free_irq_msix_misc(struct ice_pf *pf) 2865 { 2866 struct ice_hw *hw = &pf->hw; 2867 2868 ice_dis_ctrlq_interrupts(hw); 2869 2870 /* disable OICR interrupt */ 2871 wr32(hw, PFINT_OICR_ENA, 0); 2872 ice_flush(hw); 2873 2874 if (pf->msix_entries) { 2875 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 2876 devm_free_irq(ice_pf_to_dev(pf), 2877 pf->msix_entries[pf->oicr_idx].vector, pf); 2878 } 2879 2880 pf->num_avail_sw_msix += 1; 2881 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 2882 } 2883 2884 /** 2885 * ice_ena_ctrlq_interrupts - enable control queue interrupts 2886 * @hw: pointer to HW structure 2887 * @reg_idx: HW vector index to associate the control queue interrupts with 2888 */ 2889 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 2890 { 2891 u32 val; 2892 2893 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 2894 PFINT_OICR_CTL_CAUSE_ENA_M); 2895 wr32(hw, PFINT_OICR_CTL, val); 2896 2897 /* enable Admin queue Interrupt causes */ 2898 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 2899 PFINT_FW_CTL_CAUSE_ENA_M); 2900 wr32(hw, PFINT_FW_CTL, val); 2901 2902 /* enable Mailbox queue Interrupt causes */ 2903 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 2904 PFINT_MBX_CTL_CAUSE_ENA_M); 2905 wr32(hw, PFINT_MBX_CTL, val); 2906 2907 /* This enables Sideband queue Interrupt causes */ 2908 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 2909 PFINT_SB_CTL_CAUSE_ENA_M); 2910 wr32(hw, PFINT_SB_CTL, val); 2911 2912 ice_flush(hw); 2913 } 2914 2915 /** 2916 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 2917 * @pf: board private structure 2918 * 2919 * This sets up the handler for MSIX 0, which is used to manage the 2920 * non-queue interrupts, e.g. AdminQ and errors. This is not used 2921 * when in MSI or Legacy interrupt mode. 2922 */ 2923 static int ice_req_irq_msix_misc(struct ice_pf *pf) 2924 { 2925 struct device *dev = ice_pf_to_dev(pf); 2926 struct ice_hw *hw = &pf->hw; 2927 int oicr_idx, err = 0; 2928 2929 if (!pf->int_name[0]) 2930 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 2931 dev_driver_string(dev), dev_name(dev)); 2932 2933 /* Do not request IRQ but do enable OICR interrupt since settings are 2934 * lost during reset. Note that this function is called only during 2935 * rebuild path and not while reset is in progress. 2936 */ 2937 if (ice_is_reset_in_progress(pf->state)) 2938 goto skip_req_irq; 2939 2940 /* reserve one vector in irq_tracker for misc interrupts */ 2941 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 2942 if (oicr_idx < 0) 2943 return oicr_idx; 2944 2945 pf->num_avail_sw_msix -= 1; 2946 pf->oicr_idx = (u16)oicr_idx; 2947 2948 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, 2949 ice_misc_intr, 0, pf->int_name, pf); 2950 if (err) { 2951 dev_err(dev, "devm_request_irq for %s failed: %d\n", 2952 pf->int_name, err); 2953 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 2954 pf->num_avail_sw_msix += 1; 2955 return err; 2956 } 2957 2958 skip_req_irq: 2959 ice_ena_misc_vector(pf); 2960 2961 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 2962 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 2963 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 2964 2965 ice_flush(hw); 2966 ice_irq_dynamic_ena(hw, NULL, NULL); 2967 2968 return 0; 2969 } 2970 2971 /** 2972 * ice_napi_add - register NAPI handler for the VSI 2973 * @vsi: VSI for which NAPI handler is to be registered 2974 * 2975 * This function is only called in the driver's load path. Registering the NAPI 2976 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 2977 * reset/rebuild, etc.) 2978 */ 2979 static void ice_napi_add(struct ice_vsi *vsi) 2980 { 2981 int v_idx; 2982 2983 if (!vsi->netdev) 2984 return; 2985 2986 ice_for_each_q_vector(vsi, v_idx) 2987 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 2988 ice_napi_poll, NAPI_POLL_WEIGHT); 2989 } 2990 2991 /** 2992 * ice_set_ops - set netdev and ethtools ops for the given netdev 2993 * @netdev: netdev instance 2994 */ 2995 static void ice_set_ops(struct net_device *netdev) 2996 { 2997 struct ice_pf *pf = ice_netdev_to_pf(netdev); 2998 2999 if (ice_is_safe_mode(pf)) { 3000 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3001 ice_set_ethtool_safe_mode_ops(netdev); 3002 return; 3003 } 3004 3005 netdev->netdev_ops = &ice_netdev_ops; 3006 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3007 ice_set_ethtool_ops(netdev); 3008 } 3009 3010 /** 3011 * ice_set_netdev_features - set features for the given netdev 3012 * @netdev: netdev instance 3013 */ 3014 static void ice_set_netdev_features(struct net_device *netdev) 3015 { 3016 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3017 netdev_features_t csumo_features; 3018 netdev_features_t vlano_features; 3019 netdev_features_t dflt_features; 3020 netdev_features_t tso_features; 3021 3022 if (ice_is_safe_mode(pf)) { 3023 /* safe mode */ 3024 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3025 netdev->hw_features = netdev->features; 3026 return; 3027 } 3028 3029 dflt_features = NETIF_F_SG | 3030 NETIF_F_HIGHDMA | 3031 NETIF_F_NTUPLE | 3032 NETIF_F_RXHASH; 3033 3034 csumo_features = NETIF_F_RXCSUM | 3035 NETIF_F_IP_CSUM | 3036 NETIF_F_SCTP_CRC | 3037 NETIF_F_IPV6_CSUM; 3038 3039 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3040 NETIF_F_HW_VLAN_CTAG_TX | 3041 NETIF_F_HW_VLAN_CTAG_RX; 3042 3043 tso_features = NETIF_F_TSO | 3044 NETIF_F_TSO_ECN | 3045 NETIF_F_TSO6 | 3046 NETIF_F_GSO_GRE | 3047 NETIF_F_GSO_UDP_TUNNEL | 3048 NETIF_F_GSO_GRE_CSUM | 3049 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3050 NETIF_F_GSO_PARTIAL | 3051 NETIF_F_GSO_IPXIP4 | 3052 NETIF_F_GSO_IPXIP6 | 3053 NETIF_F_GSO_UDP_L4; 3054 3055 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3056 NETIF_F_GSO_GRE_CSUM; 3057 /* set features that user can change */ 3058 netdev->hw_features = dflt_features | csumo_features | 3059 vlano_features | tso_features; 3060 3061 /* add support for HW_CSUM on packets with MPLS header */ 3062 netdev->mpls_features = NETIF_F_HW_CSUM; 3063 3064 /* enable features */ 3065 netdev->features |= netdev->hw_features; 3066 /* encap and VLAN devices inherit default, csumo and tso features */ 3067 netdev->hw_enc_features |= dflt_features | csumo_features | 3068 tso_features; 3069 netdev->vlan_features |= dflt_features | csumo_features | 3070 tso_features; 3071 } 3072 3073 /** 3074 * ice_cfg_netdev - Allocate, configure and register a netdev 3075 * @vsi: the VSI associated with the new netdev 3076 * 3077 * Returns 0 on success, negative value on failure 3078 */ 3079 static int ice_cfg_netdev(struct ice_vsi *vsi) 3080 { 3081 struct ice_netdev_priv *np; 3082 struct net_device *netdev; 3083 u8 mac_addr[ETH_ALEN]; 3084 3085 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 3086 vsi->alloc_rxq); 3087 if (!netdev) 3088 return -ENOMEM; 3089 3090 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3091 vsi->netdev = netdev; 3092 np = netdev_priv(netdev); 3093 np->vsi = vsi; 3094 3095 ice_set_netdev_features(netdev); 3096 3097 ice_set_ops(netdev); 3098 3099 if (vsi->type == ICE_VSI_PF) { 3100 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 3101 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3102 ether_addr_copy(netdev->dev_addr, mac_addr); 3103 ether_addr_copy(netdev->perm_addr, mac_addr); 3104 } 3105 3106 netdev->priv_flags |= IFF_UNICAST_FLT; 3107 3108 /* Setup netdev TC information */ 3109 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 3110 3111 /* setup watchdog timeout value to be 5 second */ 3112 netdev->watchdog_timeo = 5 * HZ; 3113 3114 netdev->min_mtu = ETH_MIN_MTU; 3115 netdev->max_mtu = ICE_MAX_MTU; 3116 3117 return 0; 3118 } 3119 3120 /** 3121 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3122 * @lut: Lookup table 3123 * @rss_table_size: Lookup table size 3124 * @rss_size: Range of queue number for hashing 3125 */ 3126 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3127 { 3128 u16 i; 3129 3130 for (i = 0; i < rss_table_size; i++) 3131 lut[i] = i % rss_size; 3132 } 3133 3134 /** 3135 * ice_pf_vsi_setup - Set up a PF VSI 3136 * @pf: board private structure 3137 * @pi: pointer to the port_info instance 3138 * 3139 * Returns pointer to the successfully allocated VSI software struct 3140 * on success, otherwise returns NULL on failure. 3141 */ 3142 static struct ice_vsi * 3143 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3144 { 3145 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 3146 } 3147 3148 /** 3149 * ice_ctrl_vsi_setup - Set up a control VSI 3150 * @pf: board private structure 3151 * @pi: pointer to the port_info instance 3152 * 3153 * Returns pointer to the successfully allocated VSI software struct 3154 * on success, otherwise returns NULL on failure. 3155 */ 3156 static struct ice_vsi * 3157 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3158 { 3159 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); 3160 } 3161 3162 /** 3163 * ice_lb_vsi_setup - Set up a loopback VSI 3164 * @pf: board private structure 3165 * @pi: pointer to the port_info instance 3166 * 3167 * Returns pointer to the successfully allocated VSI software struct 3168 * on success, otherwise returns NULL on failure. 3169 */ 3170 struct ice_vsi * 3171 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3172 { 3173 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); 3174 } 3175 3176 /** 3177 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3178 * @netdev: network interface to be adjusted 3179 * @proto: unused protocol 3180 * @vid: VLAN ID to be added 3181 * 3182 * net_device_ops implementation for adding VLAN IDs 3183 */ 3184 static int 3185 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, 3186 u16 vid) 3187 { 3188 struct ice_netdev_priv *np = netdev_priv(netdev); 3189 struct ice_vsi *vsi = np->vsi; 3190 int ret; 3191 3192 /* VLAN 0 is added by default during load/reset */ 3193 if (!vid) 3194 return 0; 3195 3196 /* Enable VLAN pruning when a VLAN other than 0 is added */ 3197 if (!ice_vsi_is_vlan_pruning_ena(vsi)) { 3198 ret = ice_cfg_vlan_pruning(vsi, true, false); 3199 if (ret) 3200 return ret; 3201 } 3202 3203 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3204 * packets aren't pruned by the device's internal switch on Rx 3205 */ 3206 ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); 3207 if (!ret) 3208 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3209 3210 return ret; 3211 } 3212 3213 /** 3214 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3215 * @netdev: network interface to be adjusted 3216 * @proto: unused protocol 3217 * @vid: VLAN ID to be removed 3218 * 3219 * net_device_ops implementation for removing VLAN IDs 3220 */ 3221 static int 3222 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, 3223 u16 vid) 3224 { 3225 struct ice_netdev_priv *np = netdev_priv(netdev); 3226 struct ice_vsi *vsi = np->vsi; 3227 int ret; 3228 3229 /* don't allow removal of VLAN 0 */ 3230 if (!vid) 3231 return 0; 3232 3233 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 3234 * information 3235 */ 3236 ret = ice_vsi_kill_vlan(vsi, vid); 3237 if (ret) 3238 return ret; 3239 3240 /* Disable pruning when VLAN 0 is the only VLAN rule */ 3241 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) 3242 ret = ice_cfg_vlan_pruning(vsi, false, false); 3243 3244 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3245 return ret; 3246 } 3247 3248 /** 3249 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 3250 * @pf: board private structure 3251 * 3252 * Returns 0 on success, negative value on failure 3253 */ 3254 static int ice_setup_pf_sw(struct ice_pf *pf) 3255 { 3256 struct ice_vsi *vsi; 3257 int status = 0; 3258 3259 if (ice_is_reset_in_progress(pf->state)) 3260 return -EBUSY; 3261 3262 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 3263 if (!vsi) 3264 return -ENOMEM; 3265 3266 status = ice_cfg_netdev(vsi); 3267 if (status) { 3268 status = -ENODEV; 3269 goto unroll_vsi_setup; 3270 } 3271 /* netdev has to be configured before setting frame size */ 3272 ice_vsi_cfg_frame_size(vsi); 3273 3274 /* Setup DCB netlink interface */ 3275 ice_dcbnl_setup(vsi); 3276 3277 /* registering the NAPI handler requires both the queues and 3278 * netdev to be created, which are done in ice_pf_vsi_setup() 3279 * and ice_cfg_netdev() respectively 3280 */ 3281 ice_napi_add(vsi); 3282 3283 status = ice_set_cpu_rx_rmap(vsi); 3284 if (status) { 3285 dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", 3286 vsi->vsi_num, status); 3287 status = -EINVAL; 3288 goto unroll_napi_add; 3289 } 3290 status = ice_init_mac_fltr(pf); 3291 if (status) 3292 goto free_cpu_rx_map; 3293 3294 return status; 3295 3296 free_cpu_rx_map: 3297 ice_free_cpu_rx_rmap(vsi); 3298 3299 unroll_napi_add: 3300 if (vsi) { 3301 ice_napi_del(vsi); 3302 if (vsi->netdev) { 3303 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3304 free_netdev(vsi->netdev); 3305 vsi->netdev = NULL; 3306 } 3307 } 3308 3309 unroll_vsi_setup: 3310 ice_vsi_release(vsi); 3311 return status; 3312 } 3313 3314 /** 3315 * ice_get_avail_q_count - Get count of queues in use 3316 * @pf_qmap: bitmap to get queue use count from 3317 * @lock: pointer to a mutex that protects access to pf_qmap 3318 * @size: size of the bitmap 3319 */ 3320 static u16 3321 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3322 { 3323 unsigned long bit; 3324 u16 count = 0; 3325 3326 mutex_lock(lock); 3327 for_each_clear_bit(bit, pf_qmap, size) 3328 count++; 3329 mutex_unlock(lock); 3330 3331 return count; 3332 } 3333 3334 /** 3335 * ice_get_avail_txq_count - Get count of Tx queues in use 3336 * @pf: pointer to an ice_pf instance 3337 */ 3338 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3339 { 3340 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3341 pf->max_pf_txqs); 3342 } 3343 3344 /** 3345 * ice_get_avail_rxq_count - Get count of Rx queues in use 3346 * @pf: pointer to an ice_pf instance 3347 */ 3348 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3349 { 3350 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3351 pf->max_pf_rxqs); 3352 } 3353 3354 /** 3355 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3356 * @pf: board private structure to initialize 3357 */ 3358 static void ice_deinit_pf(struct ice_pf *pf) 3359 { 3360 ice_service_task_stop(pf); 3361 mutex_destroy(&pf->sw_mutex); 3362 mutex_destroy(&pf->tc_mutex); 3363 mutex_destroy(&pf->avail_q_mutex); 3364 3365 if (pf->avail_txqs) { 3366 bitmap_free(pf->avail_txqs); 3367 pf->avail_txqs = NULL; 3368 } 3369 3370 if (pf->avail_rxqs) { 3371 bitmap_free(pf->avail_rxqs); 3372 pf->avail_rxqs = NULL; 3373 } 3374 3375 if (pf->ptp.clock) 3376 ptp_clock_unregister(pf->ptp.clock); 3377 } 3378 3379 /** 3380 * ice_set_pf_caps - set PFs capability flags 3381 * @pf: pointer to the PF instance 3382 */ 3383 static void ice_set_pf_caps(struct ice_pf *pf) 3384 { 3385 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3386 3387 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3388 clear_bit(ICE_FLAG_AUX_ENA, pf->flags); 3389 if (func_caps->common_cap.rdma) { 3390 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3391 set_bit(ICE_FLAG_AUX_ENA, pf->flags); 3392 } 3393 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3394 if (func_caps->common_cap.dcb) 3395 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3396 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3397 if (func_caps->common_cap.sr_iov_1_1) { 3398 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3399 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, 3400 ICE_MAX_VF_COUNT); 3401 } 3402 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3403 if (func_caps->common_cap.rss_table_size) 3404 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3405 3406 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3407 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3408 u16 unused; 3409 3410 /* ctrl_vsi_idx will be set to a valid value when flow director 3411 * is setup by ice_init_fdir 3412 */ 3413 pf->ctrl_vsi_idx = ICE_NO_VSI; 3414 set_bit(ICE_FLAG_FD_ENA, pf->flags); 3415 /* force guaranteed filter pool for PF */ 3416 ice_alloc_fd_guar_item(&pf->hw, &unused, 3417 func_caps->fd_fltr_guar); 3418 /* force shared filter pool for PF */ 3419 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3420 func_caps->fd_fltr_best_effort); 3421 } 3422 3423 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3424 if (func_caps->common_cap.ieee_1588) 3425 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3426 3427 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3428 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3429 } 3430 3431 /** 3432 * ice_init_pf - Initialize general software structures (struct ice_pf) 3433 * @pf: board private structure to initialize 3434 */ 3435 static int ice_init_pf(struct ice_pf *pf) 3436 { 3437 ice_set_pf_caps(pf); 3438 3439 mutex_init(&pf->sw_mutex); 3440 mutex_init(&pf->tc_mutex); 3441 3442 INIT_HLIST_HEAD(&pf->aq_wait_list); 3443 spin_lock_init(&pf->aq_wait_lock); 3444 init_waitqueue_head(&pf->aq_wait_queue); 3445 3446 init_waitqueue_head(&pf->reset_wait_queue); 3447 3448 /* setup service timer and periodic service task */ 3449 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3450 pf->serv_tmr_period = HZ; 3451 INIT_WORK(&pf->serv_task, ice_service_task); 3452 clear_bit(ICE_SERVICE_SCHED, pf->state); 3453 3454 mutex_init(&pf->avail_q_mutex); 3455 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 3456 if (!pf->avail_txqs) 3457 return -ENOMEM; 3458 3459 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 3460 if (!pf->avail_rxqs) { 3461 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); 3462 pf->avail_txqs = NULL; 3463 return -ENOMEM; 3464 } 3465 3466 return 0; 3467 } 3468 3469 /** 3470 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 3471 * @pf: board private structure 3472 * 3473 * compute the number of MSIX vectors required (v_budget) and request from 3474 * the OS. Return the number of vectors reserved or negative on failure 3475 */ 3476 static int ice_ena_msix_range(struct ice_pf *pf) 3477 { 3478 int num_cpus, v_left, v_actual, v_other, v_budget = 0; 3479 struct device *dev = ice_pf_to_dev(pf); 3480 int needed, err, i; 3481 3482 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 3483 num_cpus = num_online_cpus(); 3484 3485 /* reserve for LAN miscellaneous handler */ 3486 needed = ICE_MIN_LAN_OICR_MSIX; 3487 if (v_left < needed) 3488 goto no_hw_vecs_left_err; 3489 v_budget += needed; 3490 v_left -= needed; 3491 3492 /* reserve for flow director */ 3493 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 3494 needed = ICE_FDIR_MSIX; 3495 if (v_left < needed) 3496 goto no_hw_vecs_left_err; 3497 v_budget += needed; 3498 v_left -= needed; 3499 } 3500 3501 /* total used for non-traffic vectors */ 3502 v_other = v_budget; 3503 3504 /* reserve vectors for LAN traffic */ 3505 needed = num_cpus; 3506 if (v_left < needed) 3507 goto no_hw_vecs_left_err; 3508 pf->num_lan_msix = needed; 3509 v_budget += needed; 3510 v_left -= needed; 3511 3512 /* reserve vectors for RDMA auxiliary driver */ 3513 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3514 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; 3515 if (v_left < needed) 3516 goto no_hw_vecs_left_err; 3517 pf->num_rdma_msix = needed; 3518 v_budget += needed; 3519 v_left -= needed; 3520 } 3521 3522 pf->msix_entries = devm_kcalloc(dev, v_budget, 3523 sizeof(*pf->msix_entries), GFP_KERNEL); 3524 if (!pf->msix_entries) { 3525 err = -ENOMEM; 3526 goto exit_err; 3527 } 3528 3529 for (i = 0; i < v_budget; i++) 3530 pf->msix_entries[i].entry = i; 3531 3532 /* actually reserve the vectors */ 3533 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 3534 ICE_MIN_MSIX, v_budget); 3535 if (v_actual < 0) { 3536 dev_err(dev, "unable to reserve MSI-X vectors\n"); 3537 err = v_actual; 3538 goto msix_err; 3539 } 3540 3541 if (v_actual < v_budget) { 3542 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 3543 v_budget, v_actual); 3544 3545 if (v_actual < ICE_MIN_MSIX) { 3546 /* error if we can't get minimum vectors */ 3547 pci_disable_msix(pf->pdev); 3548 err = -ERANGE; 3549 goto msix_err; 3550 } else { 3551 int v_remain = v_actual - v_other; 3552 int v_rdma = 0, v_min_rdma = 0; 3553 3554 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3555 /* Need at least 1 interrupt in addition to 3556 * AEQ MSIX 3557 */ 3558 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; 3559 v_min_rdma = ICE_MIN_RDMA_MSIX; 3560 } 3561 3562 if (v_actual == ICE_MIN_MSIX || 3563 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { 3564 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); 3565 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3566 3567 pf->num_rdma_msix = 0; 3568 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; 3569 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || 3570 (v_remain - v_rdma < v_rdma)) { 3571 /* Support minimum RDMA and give remaining 3572 * vectors to LAN MSIX 3573 */ 3574 pf->num_rdma_msix = v_min_rdma; 3575 pf->num_lan_msix = v_remain - v_min_rdma; 3576 } else { 3577 /* Split remaining MSIX with RDMA after 3578 * accounting for AEQ MSIX 3579 */ 3580 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + 3581 ICE_RDMA_NUM_AEQ_MSIX; 3582 pf->num_lan_msix = v_remain - pf->num_rdma_msix; 3583 } 3584 3585 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", 3586 pf->num_lan_msix); 3587 3588 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 3589 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", 3590 pf->num_rdma_msix); 3591 } 3592 } 3593 3594 return v_actual; 3595 3596 msix_err: 3597 devm_kfree(dev, pf->msix_entries); 3598 goto exit_err; 3599 3600 no_hw_vecs_left_err: 3601 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", 3602 needed, v_left); 3603 err = -ERANGE; 3604 exit_err: 3605 pf->num_rdma_msix = 0; 3606 pf->num_lan_msix = 0; 3607 return err; 3608 } 3609 3610 /** 3611 * ice_dis_msix - Disable MSI-X interrupt setup in OS 3612 * @pf: board private structure 3613 */ 3614 static void ice_dis_msix(struct ice_pf *pf) 3615 { 3616 pci_disable_msix(pf->pdev); 3617 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 3618 pf->msix_entries = NULL; 3619 } 3620 3621 /** 3622 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 3623 * @pf: board private structure 3624 */ 3625 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 3626 { 3627 ice_dis_msix(pf); 3628 3629 if (pf->irq_tracker) { 3630 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 3631 pf->irq_tracker = NULL; 3632 } 3633 } 3634 3635 /** 3636 * ice_init_interrupt_scheme - Determine proper interrupt scheme 3637 * @pf: board private structure to initialize 3638 */ 3639 static int ice_init_interrupt_scheme(struct ice_pf *pf) 3640 { 3641 int vectors; 3642 3643 vectors = ice_ena_msix_range(pf); 3644 3645 if (vectors < 0) 3646 return vectors; 3647 3648 /* set up vector assignment tracking */ 3649 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), 3650 struct_size(pf->irq_tracker, list, vectors), 3651 GFP_KERNEL); 3652 if (!pf->irq_tracker) { 3653 ice_dis_msix(pf); 3654 return -ENOMEM; 3655 } 3656 3657 /* populate SW interrupts pool with number of OS granted IRQs. */ 3658 pf->num_avail_sw_msix = (u16)vectors; 3659 pf->irq_tracker->num_entries = (u16)vectors; 3660 pf->irq_tracker->end = pf->irq_tracker->num_entries; 3661 3662 return 0; 3663 } 3664 3665 /** 3666 * ice_is_wol_supported - check if WoL is supported 3667 * @hw: pointer to hardware info 3668 * 3669 * Check if WoL is supported based on the HW configuration. 3670 * Returns true if NVM supports and enables WoL for this port, false otherwise 3671 */ 3672 bool ice_is_wol_supported(struct ice_hw *hw) 3673 { 3674 u16 wol_ctrl; 3675 3676 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 3677 * word) indicates WoL is not supported on the corresponding PF ID. 3678 */ 3679 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 3680 return false; 3681 3682 return !(BIT(hw->port_info->lport) & wol_ctrl); 3683 } 3684 3685 /** 3686 * ice_vsi_recfg_qs - Change the number of queues on a VSI 3687 * @vsi: VSI being changed 3688 * @new_rx: new number of Rx queues 3689 * @new_tx: new number of Tx queues 3690 * 3691 * Only change the number of queues if new_tx, or new_rx is non-0. 3692 * 3693 * Returns 0 on success. 3694 */ 3695 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 3696 { 3697 struct ice_pf *pf = vsi->back; 3698 int err = 0, timeout = 50; 3699 3700 if (!new_rx && !new_tx) 3701 return -EINVAL; 3702 3703 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 3704 timeout--; 3705 if (!timeout) 3706 return -EBUSY; 3707 usleep_range(1000, 2000); 3708 } 3709 3710 if (new_tx) 3711 vsi->req_txq = (u16)new_tx; 3712 if (new_rx) 3713 vsi->req_rxq = (u16)new_rx; 3714 3715 /* set for the next time the netdev is started */ 3716 if (!netif_running(vsi->netdev)) { 3717 ice_vsi_rebuild(vsi, false); 3718 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 3719 goto done; 3720 } 3721 3722 ice_vsi_close(vsi); 3723 ice_vsi_rebuild(vsi, false); 3724 ice_pf_dcb_recfg(pf); 3725 ice_vsi_open(vsi); 3726 done: 3727 clear_bit(ICE_CFG_BUSY, pf->state); 3728 return err; 3729 } 3730 3731 /** 3732 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 3733 * @pf: PF to configure 3734 * 3735 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 3736 * VSI can still Tx/Rx VLAN tagged packets. 3737 */ 3738 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 3739 { 3740 struct ice_vsi *vsi = ice_get_main_vsi(pf); 3741 struct ice_vsi_ctx *ctxt; 3742 enum ice_status status; 3743 struct ice_hw *hw; 3744 3745 if (!vsi) 3746 return; 3747 3748 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 3749 if (!ctxt) 3750 return; 3751 3752 hw = &pf->hw; 3753 ctxt->info = vsi->info; 3754 3755 ctxt->info.valid_sections = 3756 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 3757 ICE_AQ_VSI_PROP_SECURITY_VALID | 3758 ICE_AQ_VSI_PROP_SW_VALID); 3759 3760 /* disable VLAN anti-spoof */ 3761 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 3762 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 3763 3764 /* disable VLAN pruning and keep all other settings */ 3765 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 3766 3767 /* allow all VLANs on Tx and don't strip on Rx */ 3768 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | 3769 ICE_AQ_VSI_VLAN_EMOD_NOTHING; 3770 3771 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 3772 if (status) { 3773 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", 3774 ice_stat_str(status), 3775 ice_aq_str(hw->adminq.sq_last_status)); 3776 } else { 3777 vsi->info.sec_flags = ctxt->info.sec_flags; 3778 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 3779 vsi->info.vlan_flags = ctxt->info.vlan_flags; 3780 } 3781 3782 kfree(ctxt); 3783 } 3784 3785 /** 3786 * ice_log_pkg_init - log result of DDP package load 3787 * @hw: pointer to hardware info 3788 * @status: status of package load 3789 */ 3790 static void 3791 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) 3792 { 3793 struct ice_pf *pf = (struct ice_pf *)hw->back; 3794 struct device *dev = ice_pf_to_dev(pf); 3795 3796 switch (*status) { 3797 case ICE_SUCCESS: 3798 /* The package download AdminQ command returned success because 3799 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 3800 * already a package loaded on the device. 3801 */ 3802 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 3803 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 3804 hw->pkg_ver.update == hw->active_pkg_ver.update && 3805 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 3806 !memcmp(hw->pkg_name, hw->active_pkg_name, 3807 sizeof(hw->pkg_name))) { 3808 if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) 3809 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 3810 hw->active_pkg_name, 3811 hw->active_pkg_ver.major, 3812 hw->active_pkg_ver.minor, 3813 hw->active_pkg_ver.update, 3814 hw->active_pkg_ver.draft); 3815 else 3816 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 3817 hw->active_pkg_name, 3818 hw->active_pkg_ver.major, 3819 hw->active_pkg_ver.minor, 3820 hw->active_pkg_ver.update, 3821 hw->active_pkg_ver.draft); 3822 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || 3823 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { 3824 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 3825 hw->active_pkg_name, 3826 hw->active_pkg_ver.major, 3827 hw->active_pkg_ver.minor, 3828 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 3829 *status = ICE_ERR_NOT_SUPPORTED; 3830 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 3831 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { 3832 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 3833 hw->active_pkg_name, 3834 hw->active_pkg_ver.major, 3835 hw->active_pkg_ver.minor, 3836 hw->active_pkg_ver.update, 3837 hw->active_pkg_ver.draft, 3838 hw->pkg_name, 3839 hw->pkg_ver.major, 3840 hw->pkg_ver.minor, 3841 hw->pkg_ver.update, 3842 hw->pkg_ver.draft); 3843 } else { 3844 dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); 3845 *status = ICE_ERR_NOT_SUPPORTED; 3846 } 3847 break; 3848 case ICE_ERR_FW_DDP_MISMATCH: 3849 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 3850 break; 3851 case ICE_ERR_BUF_TOO_SHORT: 3852 case ICE_ERR_CFG: 3853 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 3854 break; 3855 case ICE_ERR_NOT_SUPPORTED: 3856 /* Package File version not supported */ 3857 if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || 3858 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 3859 hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) 3860 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 3861 else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || 3862 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 3863 hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) 3864 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 3865 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 3866 break; 3867 case ICE_ERR_AQ_ERROR: 3868 switch (hw->pkg_dwnld_status) { 3869 case ICE_AQ_RC_ENOSEC: 3870 case ICE_AQ_RC_EBADSIG: 3871 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 3872 return; 3873 case ICE_AQ_RC_ESVN: 3874 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 3875 return; 3876 case ICE_AQ_RC_EBADMAN: 3877 case ICE_AQ_RC_EBADBUF: 3878 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 3879 /* poll for reset to complete */ 3880 if (ice_check_reset(hw)) 3881 dev_err(dev, "Error resetting device. Please reload the driver\n"); 3882 return; 3883 default: 3884 break; 3885 } 3886 fallthrough; 3887 default: 3888 dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", 3889 *status); 3890 break; 3891 } 3892 } 3893 3894 /** 3895 * ice_load_pkg - load/reload the DDP Package file 3896 * @firmware: firmware structure when firmware requested or NULL for reload 3897 * @pf: pointer to the PF instance 3898 * 3899 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 3900 * initialize HW tables. 3901 */ 3902 static void 3903 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 3904 { 3905 enum ice_status status = ICE_ERR_PARAM; 3906 struct device *dev = ice_pf_to_dev(pf); 3907 struct ice_hw *hw = &pf->hw; 3908 3909 /* Load DDP Package */ 3910 if (firmware && !hw->pkg_copy) { 3911 status = ice_copy_and_init_pkg(hw, firmware->data, 3912 firmware->size); 3913 ice_log_pkg_init(hw, &status); 3914 } else if (!firmware && hw->pkg_copy) { 3915 /* Reload package during rebuild after CORER/GLOBR reset */ 3916 status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 3917 ice_log_pkg_init(hw, &status); 3918 } else { 3919 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 3920 } 3921 3922 if (status) { 3923 /* Safe Mode */ 3924 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 3925 return; 3926 } 3927 3928 /* Successful download package is the precondition for advanced 3929 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 3930 */ 3931 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 3932 } 3933 3934 /** 3935 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 3936 * @pf: pointer to the PF structure 3937 * 3938 * There is no error returned here because the driver should be able to handle 3939 * 128 Byte cache lines, so we only print a warning in case issues are seen, 3940 * specifically with Tx. 3941 */ 3942 static void ice_verify_cacheline_size(struct ice_pf *pf) 3943 { 3944 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 3945 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 3946 ICE_CACHE_LINE_BYTES); 3947 } 3948 3949 /** 3950 * ice_send_version - update firmware with driver version 3951 * @pf: PF struct 3952 * 3953 * Returns ICE_SUCCESS on success, else error code 3954 */ 3955 static enum ice_status ice_send_version(struct ice_pf *pf) 3956 { 3957 struct ice_driver_ver dv; 3958 3959 dv.major_ver = 0xff; 3960 dv.minor_ver = 0xff; 3961 dv.build_ver = 0xff; 3962 dv.subbuild_ver = 0; 3963 strscpy((char *)dv.driver_string, UTS_RELEASE, 3964 sizeof(dv.driver_string)); 3965 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 3966 } 3967 3968 /** 3969 * ice_init_fdir - Initialize flow director VSI and configuration 3970 * @pf: pointer to the PF instance 3971 * 3972 * returns 0 on success, negative on error 3973 */ 3974 static int ice_init_fdir(struct ice_pf *pf) 3975 { 3976 struct device *dev = ice_pf_to_dev(pf); 3977 struct ice_vsi *ctrl_vsi; 3978 int err; 3979 3980 /* Side Band Flow Director needs to have a control VSI. 3981 * Allocate it and store it in the PF. 3982 */ 3983 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 3984 if (!ctrl_vsi) { 3985 dev_dbg(dev, "could not create control VSI\n"); 3986 return -ENOMEM; 3987 } 3988 3989 err = ice_vsi_open_ctrl(ctrl_vsi); 3990 if (err) { 3991 dev_dbg(dev, "could not open control VSI\n"); 3992 goto err_vsi_open; 3993 } 3994 3995 mutex_init(&pf->hw.fdir_fltr_lock); 3996 3997 err = ice_fdir_create_dflt_rules(pf); 3998 if (err) 3999 goto err_fdir_rule; 4000 4001 return 0; 4002 4003 err_fdir_rule: 4004 ice_fdir_release_flows(&pf->hw); 4005 ice_vsi_close(ctrl_vsi); 4006 err_vsi_open: 4007 ice_vsi_release(ctrl_vsi); 4008 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4009 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4010 pf->ctrl_vsi_idx = ICE_NO_VSI; 4011 } 4012 return err; 4013 } 4014 4015 /** 4016 * ice_get_opt_fw_name - return optional firmware file name or NULL 4017 * @pf: pointer to the PF instance 4018 */ 4019 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4020 { 4021 /* Optional firmware name same as default with additional dash 4022 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4023 */ 4024 struct pci_dev *pdev = pf->pdev; 4025 char *opt_fw_filename; 4026 u64 dsn; 4027 4028 /* Determine the name of the optional file using the DSN (two 4029 * dwords following the start of the DSN Capability). 4030 */ 4031 dsn = pci_get_dsn(pdev); 4032 if (!dsn) 4033 return NULL; 4034 4035 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4036 if (!opt_fw_filename) 4037 return NULL; 4038 4039 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4040 ICE_DDP_PKG_PATH, dsn); 4041 4042 return opt_fw_filename; 4043 } 4044 4045 /** 4046 * ice_request_fw - Device initialization routine 4047 * @pf: pointer to the PF instance 4048 */ 4049 static void ice_request_fw(struct ice_pf *pf) 4050 { 4051 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4052 const struct firmware *firmware = NULL; 4053 struct device *dev = ice_pf_to_dev(pf); 4054 int err = 0; 4055 4056 /* optional device-specific DDP (if present) overrides the default DDP 4057 * package file. kernel logs a debug message if the file doesn't exist, 4058 * and warning messages for other errors. 4059 */ 4060 if (opt_fw_filename) { 4061 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4062 if (err) { 4063 kfree(opt_fw_filename); 4064 goto dflt_pkg_load; 4065 } 4066 4067 /* request for firmware was successful. Download to device */ 4068 ice_load_pkg(firmware, pf); 4069 kfree(opt_fw_filename); 4070 release_firmware(firmware); 4071 return; 4072 } 4073 4074 dflt_pkg_load: 4075 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4076 if (err) { 4077 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4078 return; 4079 } 4080 4081 /* request for firmware was successful. Download to device */ 4082 ice_load_pkg(firmware, pf); 4083 release_firmware(firmware); 4084 } 4085 4086 /** 4087 * ice_print_wake_reason - show the wake up cause in the log 4088 * @pf: pointer to the PF struct 4089 */ 4090 static void ice_print_wake_reason(struct ice_pf *pf) 4091 { 4092 u32 wus = pf->wakeup_reason; 4093 const char *wake_str; 4094 4095 /* if no wake event, nothing to print */ 4096 if (!wus) 4097 return; 4098 4099 if (wus & PFPM_WUS_LNKC_M) 4100 wake_str = "Link\n"; 4101 else if (wus & PFPM_WUS_MAG_M) 4102 wake_str = "Magic Packet\n"; 4103 else if (wus & PFPM_WUS_MNG_M) 4104 wake_str = "Management\n"; 4105 else if (wus & PFPM_WUS_FW_RST_WK_M) 4106 wake_str = "Firmware Reset\n"; 4107 else 4108 wake_str = "Unknown\n"; 4109 4110 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4111 } 4112 4113 /** 4114 * ice_register_netdev - register netdev and devlink port 4115 * @pf: pointer to the PF struct 4116 */ 4117 static int ice_register_netdev(struct ice_pf *pf) 4118 { 4119 struct ice_vsi *vsi; 4120 int err = 0; 4121 4122 vsi = ice_get_main_vsi(pf); 4123 if (!vsi || !vsi->netdev) 4124 return -EIO; 4125 4126 err = register_netdev(vsi->netdev); 4127 if (err) 4128 goto err_register_netdev; 4129 4130 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4131 netif_carrier_off(vsi->netdev); 4132 netif_tx_stop_all_queues(vsi->netdev); 4133 err = ice_devlink_create_port(vsi); 4134 if (err) 4135 goto err_devlink_create; 4136 4137 devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); 4138 4139 return 0; 4140 err_devlink_create: 4141 unregister_netdev(vsi->netdev); 4142 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4143 err_register_netdev: 4144 free_netdev(vsi->netdev); 4145 vsi->netdev = NULL; 4146 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4147 return err; 4148 } 4149 4150 /** 4151 * ice_probe - Device initialization routine 4152 * @pdev: PCI device information struct 4153 * @ent: entry in ice_pci_tbl 4154 * 4155 * Returns 0 on success, negative on failure 4156 */ 4157 static int 4158 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4159 { 4160 struct device *dev = &pdev->dev; 4161 struct ice_pf *pf; 4162 struct ice_hw *hw; 4163 int i, err; 4164 4165 /* this driver uses devres, see 4166 * Documentation/driver-api/driver-model/devres.rst 4167 */ 4168 err = pcim_enable_device(pdev); 4169 if (err) 4170 return err; 4171 4172 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 4173 if (err) { 4174 dev_err(dev, "BAR0 I/O map error %d\n", err); 4175 return err; 4176 } 4177 4178 pf = ice_allocate_pf(dev); 4179 if (!pf) 4180 return -ENOMEM; 4181 4182 /* set up for high or low DMA */ 4183 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4184 if (err) 4185 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4186 if (err) { 4187 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 4188 return err; 4189 } 4190 4191 pci_enable_pcie_error_reporting(pdev); 4192 pci_set_master(pdev); 4193 4194 pf->pdev = pdev; 4195 pci_set_drvdata(pdev, pf); 4196 set_bit(ICE_DOWN, pf->state); 4197 /* Disable service task until DOWN bit is cleared */ 4198 set_bit(ICE_SERVICE_DIS, pf->state); 4199 4200 hw = &pf->hw; 4201 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 4202 pci_save_state(pdev); 4203 4204 hw->back = pf; 4205 hw->vendor_id = pdev->vendor; 4206 hw->device_id = pdev->device; 4207 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4208 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4209 hw->subsystem_device_id = pdev->subsystem_device; 4210 hw->bus.device = PCI_SLOT(pdev->devfn); 4211 hw->bus.func = PCI_FUNC(pdev->devfn); 4212 ice_set_ctrlq_len(hw); 4213 4214 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 4215 4216 err = ice_devlink_register(pf); 4217 if (err) { 4218 dev_err(dev, "ice_devlink_register failed: %d\n", err); 4219 goto err_exit_unroll; 4220 } 4221 4222 #ifndef CONFIG_DYNAMIC_DEBUG 4223 if (debug < -1) 4224 hw->debug_mask = debug; 4225 #endif 4226 4227 err = ice_init_hw(hw); 4228 if (err) { 4229 dev_err(dev, "ice_init_hw failed: %d\n", err); 4230 err = -EIO; 4231 goto err_exit_unroll; 4232 } 4233 4234 ice_request_fw(pf); 4235 4236 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4237 * set in pf->state, which will cause ice_is_safe_mode to return 4238 * true 4239 */ 4240 if (ice_is_safe_mode(pf)) { 4241 dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); 4242 /* we already got function/device capabilities but these don't 4243 * reflect what the driver needs to do in safe mode. Instead of 4244 * adding conditional logic everywhere to ignore these 4245 * device/function capabilities, override them. 4246 */ 4247 ice_set_safe_mode_caps(hw); 4248 } 4249 4250 err = ice_init_pf(pf); 4251 if (err) { 4252 dev_err(dev, "ice_init_pf failed: %d\n", err); 4253 goto err_init_pf_unroll; 4254 } 4255 4256 ice_devlink_init_regions(pf); 4257 4258 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4259 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4260 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4261 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4262 i = 0; 4263 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4264 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4265 pf->hw.tnl.valid_count[TNL_VXLAN]; 4266 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4267 UDP_TUNNEL_TYPE_VXLAN; 4268 i++; 4269 } 4270 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4271 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4272 pf->hw.tnl.valid_count[TNL_GENEVE]; 4273 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4274 UDP_TUNNEL_TYPE_GENEVE; 4275 i++; 4276 } 4277 4278 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 4279 if (!pf->num_alloc_vsi) { 4280 err = -EIO; 4281 goto err_init_pf_unroll; 4282 } 4283 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4284 dev_warn(&pf->pdev->dev, 4285 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4286 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4287 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4288 } 4289 4290 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4291 GFP_KERNEL); 4292 if (!pf->vsi) { 4293 err = -ENOMEM; 4294 goto err_init_pf_unroll; 4295 } 4296 4297 err = ice_init_interrupt_scheme(pf); 4298 if (err) { 4299 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4300 err = -EIO; 4301 goto err_init_vsi_unroll; 4302 } 4303 4304 /* In case of MSIX we are going to setup the misc vector right here 4305 * to handle admin queue events etc. In case of legacy and MSI 4306 * the misc functionality and queue processing is combined in 4307 * the same vector and that gets setup at open. 4308 */ 4309 err = ice_req_irq_msix_misc(pf); 4310 if (err) { 4311 dev_err(dev, "setup of misc vector failed: %d\n", err); 4312 goto err_init_interrupt_unroll; 4313 } 4314 4315 /* create switch struct for the switch element created by FW on boot */ 4316 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 4317 if (!pf->first_sw) { 4318 err = -ENOMEM; 4319 goto err_msix_misc_unroll; 4320 } 4321 4322 if (hw->evb_veb) 4323 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4324 else 4325 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4326 4327 pf->first_sw->pf = pf; 4328 4329 /* record the sw_id available for later use */ 4330 pf->first_sw->sw_id = hw->port_info->sw_id; 4331 4332 err = ice_setup_pf_sw(pf); 4333 if (err) { 4334 dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 4335 goto err_alloc_sw_unroll; 4336 } 4337 4338 clear_bit(ICE_SERVICE_DIS, pf->state); 4339 4340 /* tell the firmware we are up */ 4341 err = ice_send_version(pf); 4342 if (err) { 4343 dev_err(dev, "probe failed sending driver version %s. error: %d\n", 4344 UTS_RELEASE, err); 4345 goto err_send_version_unroll; 4346 } 4347 4348 /* since everything is good, start the service timer */ 4349 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4350 4351 err = ice_init_link_events(pf->hw.port_info); 4352 if (err) { 4353 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4354 goto err_send_version_unroll; 4355 } 4356 4357 /* not a fatal error if this fails */ 4358 err = ice_init_nvm_phy_type(pf->hw.port_info); 4359 if (err) 4360 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4361 4362 /* not a fatal error if this fails */ 4363 err = ice_update_link_info(pf->hw.port_info); 4364 if (err) 4365 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4366 4367 ice_init_link_dflt_override(pf->hw.port_info); 4368 4369 ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); 4370 4371 /* if media available, initialize PHY settings */ 4372 if (pf->hw.port_info->phy.link_info.link_info & 4373 ICE_AQ_MEDIA_AVAILABLE) { 4374 /* not a fatal error if this fails */ 4375 err = ice_init_phy_user_cfg(pf->hw.port_info); 4376 if (err) 4377 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4378 4379 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4380 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4381 4382 if (vsi) 4383 ice_configure_phy(vsi); 4384 } 4385 } else { 4386 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4387 } 4388 4389 ice_verify_cacheline_size(pf); 4390 4391 /* Save wakeup reason register for later use */ 4392 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4393 4394 /* check for a power management event */ 4395 ice_print_wake_reason(pf); 4396 4397 /* clear wake status, all bits */ 4398 wr32(hw, PFPM_WUS, U32_MAX); 4399 4400 /* Disable WoL at init, wait for user to enable */ 4401 device_set_wakeup_enable(dev, false); 4402 4403 if (ice_is_safe_mode(pf)) { 4404 ice_set_safe_mode_vlan_cfg(pf); 4405 goto probe_done; 4406 } 4407 4408 /* initialize DDP driven features */ 4409 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4410 ice_ptp_init(pf); 4411 4412 /* Note: Flow director init failure is non-fatal to load */ 4413 if (ice_init_fdir(pf)) 4414 dev_err(dev, "could not initialize flow director\n"); 4415 4416 /* Note: DCB init failure is non-fatal to load */ 4417 if (ice_init_pf_dcb(pf, false)) { 4418 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4419 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4420 } else { 4421 ice_cfg_lldp_mib_change(&pf->hw, true); 4422 } 4423 4424 if (ice_init_lag(pf)) 4425 dev_warn(dev, "Failed to init link aggregation support\n"); 4426 4427 /* print PCI link speed and width */ 4428 pcie_print_link_status(pf->pdev); 4429 4430 probe_done: 4431 err = ice_register_netdev(pf); 4432 if (err) 4433 goto err_netdev_reg; 4434 4435 /* ready to go, so clear down state bit */ 4436 clear_bit(ICE_DOWN, pf->state); 4437 if (ice_is_aux_ena(pf)) { 4438 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); 4439 if (pf->aux_idx < 0) { 4440 dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 4441 err = -ENOMEM; 4442 goto err_netdev_reg; 4443 } 4444 4445 err = ice_init_rdma(pf); 4446 if (err) { 4447 dev_err(dev, "Failed to initialize RDMA: %d\n", err); 4448 err = -EIO; 4449 goto err_init_aux_unroll; 4450 } 4451 } else { 4452 dev_warn(dev, "RDMA is not supported on this device\n"); 4453 } 4454 4455 return 0; 4456 4457 err_init_aux_unroll: 4458 pf->adev = NULL; 4459 ida_free(&ice_aux_ida, pf->aux_idx); 4460 err_netdev_reg: 4461 err_send_version_unroll: 4462 ice_vsi_release_all(pf); 4463 err_alloc_sw_unroll: 4464 set_bit(ICE_SERVICE_DIS, pf->state); 4465 set_bit(ICE_DOWN, pf->state); 4466 devm_kfree(dev, pf->first_sw); 4467 err_msix_misc_unroll: 4468 ice_free_irq_msix_misc(pf); 4469 err_init_interrupt_unroll: 4470 ice_clear_interrupt_scheme(pf); 4471 err_init_vsi_unroll: 4472 devm_kfree(dev, pf->vsi); 4473 err_init_pf_unroll: 4474 ice_deinit_pf(pf); 4475 ice_devlink_destroy_regions(pf); 4476 ice_deinit_hw(hw); 4477 err_exit_unroll: 4478 ice_devlink_unregister(pf); 4479 pci_disable_pcie_error_reporting(pdev); 4480 pci_disable_device(pdev); 4481 return err; 4482 } 4483 4484 /** 4485 * ice_set_wake - enable or disable Wake on LAN 4486 * @pf: pointer to the PF struct 4487 * 4488 * Simple helper for WoL control 4489 */ 4490 static void ice_set_wake(struct ice_pf *pf) 4491 { 4492 struct ice_hw *hw = &pf->hw; 4493 bool wol = pf->wol_ena; 4494 4495 /* clear wake state, otherwise new wake events won't fire */ 4496 wr32(hw, PFPM_WUS, U32_MAX); 4497 4498 /* enable / disable APM wake up, no RMW needed */ 4499 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 4500 4501 /* set magic packet filter enabled */ 4502 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 4503 } 4504 4505 /** 4506 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 4507 * @pf: pointer to the PF struct 4508 * 4509 * Issue firmware command to enable multicast magic wake, making 4510 * sure that any locally administered address (LAA) is used for 4511 * wake, and that PF reset doesn't undo the LAA. 4512 */ 4513 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 4514 { 4515 struct device *dev = ice_pf_to_dev(pf); 4516 struct ice_hw *hw = &pf->hw; 4517 enum ice_status status; 4518 u8 mac_addr[ETH_ALEN]; 4519 struct ice_vsi *vsi; 4520 u8 flags; 4521 4522 if (!pf->wol_ena) 4523 return; 4524 4525 vsi = ice_get_main_vsi(pf); 4526 if (!vsi) 4527 return; 4528 4529 /* Get current MAC address in case it's an LAA */ 4530 if (vsi->netdev) 4531 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 4532 else 4533 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4534 4535 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 4536 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 4537 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 4538 4539 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 4540 if (status) 4541 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", 4542 ice_stat_str(status), 4543 ice_aq_str(hw->adminq.sq_last_status)); 4544 } 4545 4546 /** 4547 * ice_remove - Device removal routine 4548 * @pdev: PCI device information struct 4549 */ 4550 static void ice_remove(struct pci_dev *pdev) 4551 { 4552 struct ice_pf *pf = pci_get_drvdata(pdev); 4553 int i; 4554 4555 if (!pf) 4556 return; 4557 4558 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 4559 if (!ice_is_reset_in_progress(pf->state)) 4560 break; 4561 msleep(100); 4562 } 4563 4564 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 4565 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 4566 ice_free_vfs(pf); 4567 } 4568 4569 ice_service_task_stop(pf); 4570 4571 ice_aq_cancel_waiting_tasks(pf); 4572 ice_unplug_aux_dev(pf); 4573 ida_free(&ice_aux_ida, pf->aux_idx); 4574 set_bit(ICE_DOWN, pf->state); 4575 4576 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4577 ice_deinit_lag(pf); 4578 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4579 ice_ptp_release(pf); 4580 if (!ice_is_safe_mode(pf)) 4581 ice_remove_arfs(pf); 4582 ice_setup_mc_magic_wake(pf); 4583 ice_vsi_release_all(pf); 4584 ice_set_wake(pf); 4585 ice_free_irq_msix_misc(pf); 4586 ice_for_each_vsi(pf, i) { 4587 if (!pf->vsi[i]) 4588 continue; 4589 ice_vsi_free_q_vectors(pf->vsi[i]); 4590 } 4591 ice_deinit_pf(pf); 4592 ice_devlink_destroy_regions(pf); 4593 ice_deinit_hw(&pf->hw); 4594 ice_devlink_unregister(pf); 4595 4596 /* Issue a PFR as part of the prescribed driver unload flow. Do not 4597 * do it via ice_schedule_reset() since there is no need to rebuild 4598 * and the service task is already stopped. 4599 */ 4600 ice_reset(&pf->hw, ICE_RESET_PFR); 4601 pci_wait_for_pending_transaction(pdev); 4602 ice_clear_interrupt_scheme(pf); 4603 pci_disable_pcie_error_reporting(pdev); 4604 pci_disable_device(pdev); 4605 } 4606 4607 /** 4608 * ice_shutdown - PCI callback for shutting down device 4609 * @pdev: PCI device information struct 4610 */ 4611 static void ice_shutdown(struct pci_dev *pdev) 4612 { 4613 struct ice_pf *pf = pci_get_drvdata(pdev); 4614 4615 ice_remove(pdev); 4616 4617 if (system_state == SYSTEM_POWER_OFF) { 4618 pci_wake_from_d3(pdev, pf->wol_ena); 4619 pci_set_power_state(pdev, PCI_D3hot); 4620 } 4621 } 4622 4623 #ifdef CONFIG_PM 4624 /** 4625 * ice_prepare_for_shutdown - prep for PCI shutdown 4626 * @pf: board private structure 4627 * 4628 * Inform or close all dependent features in prep for PCI device shutdown 4629 */ 4630 static void ice_prepare_for_shutdown(struct ice_pf *pf) 4631 { 4632 struct ice_hw *hw = &pf->hw; 4633 u32 v; 4634 4635 /* Notify VFs of impending reset */ 4636 if (ice_check_sq_alive(hw, &hw->mailboxq)) 4637 ice_vc_notify_reset(pf); 4638 4639 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 4640 4641 /* disable the VSIs and their queues that are not already DOWN */ 4642 ice_pf_dis_all_vsi(pf, false); 4643 4644 ice_for_each_vsi(pf, v) 4645 if (pf->vsi[v]) 4646 pf->vsi[v]->vsi_num = 0; 4647 4648 ice_shutdown_all_ctrlq(hw); 4649 } 4650 4651 /** 4652 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 4653 * @pf: board private structure to reinitialize 4654 * 4655 * This routine reinitialize interrupt scheme that was cleared during 4656 * power management suspend callback. 4657 * 4658 * This should be called during resume routine to re-allocate the q_vectors 4659 * and reacquire interrupts. 4660 */ 4661 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 4662 { 4663 struct device *dev = ice_pf_to_dev(pf); 4664 int ret, v; 4665 4666 /* Since we clear MSIX flag during suspend, we need to 4667 * set it back during resume... 4668 */ 4669 4670 ret = ice_init_interrupt_scheme(pf); 4671 if (ret) { 4672 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 4673 return ret; 4674 } 4675 4676 /* Remap vectors and rings, after successful re-init interrupts */ 4677 ice_for_each_vsi(pf, v) { 4678 if (!pf->vsi[v]) 4679 continue; 4680 4681 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 4682 if (ret) 4683 goto err_reinit; 4684 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 4685 } 4686 4687 ret = ice_req_irq_msix_misc(pf); 4688 if (ret) { 4689 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 4690 ret); 4691 goto err_reinit; 4692 } 4693 4694 return 0; 4695 4696 err_reinit: 4697 while (v--) 4698 if (pf->vsi[v]) 4699 ice_vsi_free_q_vectors(pf->vsi[v]); 4700 4701 return ret; 4702 } 4703 4704 /** 4705 * ice_suspend 4706 * @dev: generic device information structure 4707 * 4708 * Power Management callback to quiesce the device and prepare 4709 * for D3 transition. 4710 */ 4711 static int __maybe_unused ice_suspend(struct device *dev) 4712 { 4713 struct pci_dev *pdev = to_pci_dev(dev); 4714 struct ice_pf *pf; 4715 int disabled, v; 4716 4717 pf = pci_get_drvdata(pdev); 4718 4719 if (!ice_pf_state_is_nominal(pf)) { 4720 dev_err(dev, "Device is not ready, no need to suspend it\n"); 4721 return -EBUSY; 4722 } 4723 4724 /* Stop watchdog tasks until resume completion. 4725 * Even though it is most likely that the service task is 4726 * disabled if the device is suspended or down, the service task's 4727 * state is controlled by a different state bit, and we should 4728 * store and honor whatever state that bit is in at this point. 4729 */ 4730 disabled = ice_service_task_stop(pf); 4731 4732 ice_unplug_aux_dev(pf); 4733 4734 /* Already suspended?, then there is nothing to do */ 4735 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 4736 if (!disabled) 4737 ice_service_task_restart(pf); 4738 return 0; 4739 } 4740 4741 if (test_bit(ICE_DOWN, pf->state) || 4742 ice_is_reset_in_progress(pf->state)) { 4743 dev_err(dev, "can't suspend device in reset or already down\n"); 4744 if (!disabled) 4745 ice_service_task_restart(pf); 4746 return 0; 4747 } 4748 4749 ice_setup_mc_magic_wake(pf); 4750 4751 ice_prepare_for_shutdown(pf); 4752 4753 ice_set_wake(pf); 4754 4755 /* Free vectors, clear the interrupt scheme and release IRQs 4756 * for proper hibernation, especially with large number of CPUs. 4757 * Otherwise hibernation might fail when mapping all the vectors back 4758 * to CPU0. 4759 */ 4760 ice_free_irq_msix_misc(pf); 4761 ice_for_each_vsi(pf, v) { 4762 if (!pf->vsi[v]) 4763 continue; 4764 ice_vsi_free_q_vectors(pf->vsi[v]); 4765 } 4766 ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); 4767 ice_clear_interrupt_scheme(pf); 4768 4769 pci_save_state(pdev); 4770 pci_wake_from_d3(pdev, pf->wol_ena); 4771 pci_set_power_state(pdev, PCI_D3hot); 4772 return 0; 4773 } 4774 4775 /** 4776 * ice_resume - PM callback for waking up from D3 4777 * @dev: generic device information structure 4778 */ 4779 static int __maybe_unused ice_resume(struct device *dev) 4780 { 4781 struct pci_dev *pdev = to_pci_dev(dev); 4782 enum ice_reset_req reset_type; 4783 struct ice_pf *pf; 4784 struct ice_hw *hw; 4785 int ret; 4786 4787 pci_set_power_state(pdev, PCI_D0); 4788 pci_restore_state(pdev); 4789 pci_save_state(pdev); 4790 4791 if (!pci_device_is_present(pdev)) 4792 return -ENODEV; 4793 4794 ret = pci_enable_device_mem(pdev); 4795 if (ret) { 4796 dev_err(dev, "Cannot enable device after suspend\n"); 4797 return ret; 4798 } 4799 4800 pf = pci_get_drvdata(pdev); 4801 hw = &pf->hw; 4802 4803 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4804 ice_print_wake_reason(pf); 4805 4806 /* We cleared the interrupt scheme when we suspended, so we need to 4807 * restore it now to resume device functionality. 4808 */ 4809 ret = ice_reinit_interrupt_scheme(pf); 4810 if (ret) 4811 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 4812 4813 clear_bit(ICE_DOWN, pf->state); 4814 /* Now perform PF reset and rebuild */ 4815 reset_type = ICE_RESET_PFR; 4816 /* re-enable service task for reset, but allow reset to schedule it */ 4817 clear_bit(ICE_SERVICE_DIS, pf->state); 4818 4819 if (ice_schedule_reset(pf, reset_type)) 4820 dev_err(dev, "Reset during resume failed.\n"); 4821 4822 clear_bit(ICE_SUSPENDED, pf->state); 4823 ice_service_task_restart(pf); 4824 4825 /* Restart the service task */ 4826 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4827 4828 return 0; 4829 } 4830 #endif /* CONFIG_PM */ 4831 4832 /** 4833 * ice_pci_err_detected - warning that PCI error has been detected 4834 * @pdev: PCI device information struct 4835 * @err: the type of PCI error 4836 * 4837 * Called to warn that something happened on the PCI bus and the error handling 4838 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 4839 */ 4840 static pci_ers_result_t 4841 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 4842 { 4843 struct ice_pf *pf = pci_get_drvdata(pdev); 4844 4845 if (!pf) { 4846 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 4847 __func__, err); 4848 return PCI_ERS_RESULT_DISCONNECT; 4849 } 4850 4851 if (!test_bit(ICE_SUSPENDED, pf->state)) { 4852 ice_service_task_stop(pf); 4853 4854 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 4855 set_bit(ICE_PFR_REQ, pf->state); 4856 ice_prepare_for_reset(pf); 4857 } 4858 } 4859 4860 return PCI_ERS_RESULT_NEED_RESET; 4861 } 4862 4863 /** 4864 * ice_pci_err_slot_reset - a PCI slot reset has just happened 4865 * @pdev: PCI device information struct 4866 * 4867 * Called to determine if the driver can recover from the PCI slot reset by 4868 * using a register read to determine if the device is recoverable. 4869 */ 4870 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 4871 { 4872 struct ice_pf *pf = pci_get_drvdata(pdev); 4873 pci_ers_result_t result; 4874 int err; 4875 u32 reg; 4876 4877 err = pci_enable_device_mem(pdev); 4878 if (err) { 4879 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 4880 err); 4881 result = PCI_ERS_RESULT_DISCONNECT; 4882 } else { 4883 pci_set_master(pdev); 4884 pci_restore_state(pdev); 4885 pci_save_state(pdev); 4886 pci_wake_from_d3(pdev, false); 4887 4888 /* Check for life */ 4889 reg = rd32(&pf->hw, GLGEN_RTRIG); 4890 if (!reg) 4891 result = PCI_ERS_RESULT_RECOVERED; 4892 else 4893 result = PCI_ERS_RESULT_DISCONNECT; 4894 } 4895 4896 err = pci_aer_clear_nonfatal_status(pdev); 4897 if (err) 4898 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", 4899 err); 4900 /* non-fatal, continue */ 4901 4902 return result; 4903 } 4904 4905 /** 4906 * ice_pci_err_resume - restart operations after PCI error recovery 4907 * @pdev: PCI device information struct 4908 * 4909 * Called to allow the driver to bring things back up after PCI error and/or 4910 * reset recovery have finished 4911 */ 4912 static void ice_pci_err_resume(struct pci_dev *pdev) 4913 { 4914 struct ice_pf *pf = pci_get_drvdata(pdev); 4915 4916 if (!pf) { 4917 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 4918 __func__); 4919 return; 4920 } 4921 4922 if (test_bit(ICE_SUSPENDED, pf->state)) { 4923 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 4924 __func__); 4925 return; 4926 } 4927 4928 ice_restore_all_vfs_msi_state(pdev); 4929 4930 ice_do_reset(pf, ICE_RESET_PFR); 4931 ice_service_task_restart(pf); 4932 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4933 } 4934 4935 /** 4936 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 4937 * @pdev: PCI device information struct 4938 */ 4939 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 4940 { 4941 struct ice_pf *pf = pci_get_drvdata(pdev); 4942 4943 if (!test_bit(ICE_SUSPENDED, pf->state)) { 4944 ice_service_task_stop(pf); 4945 4946 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 4947 set_bit(ICE_PFR_REQ, pf->state); 4948 ice_prepare_for_reset(pf); 4949 } 4950 } 4951 } 4952 4953 /** 4954 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 4955 * @pdev: PCI device information struct 4956 */ 4957 static void ice_pci_err_reset_done(struct pci_dev *pdev) 4958 { 4959 ice_pci_err_resume(pdev); 4960 } 4961 4962 /* ice_pci_tbl - PCI Device ID Table 4963 * 4964 * Wildcard entries (PCI_ANY_ID) should come last 4965 * Last entry must be all 0s 4966 * 4967 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 4968 * Class, Class Mask, private data (not used) } 4969 */ 4970 static const struct pci_device_id ice_pci_tbl[] = { 4971 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 4972 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 4973 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 4974 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, 4975 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, 4976 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, 4977 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, 4978 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, 4979 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, 4980 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 4981 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 4982 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 4983 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 4984 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 4985 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, 4986 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 4987 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 4988 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 4989 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, 4990 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, 4991 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, 4992 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, 4993 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, 4994 /* required last entry */ 4995 { 0, } 4996 }; 4997 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 4998 4999 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5000 5001 static const struct pci_error_handlers ice_pci_err_handler = { 5002 .error_detected = ice_pci_err_detected, 5003 .slot_reset = ice_pci_err_slot_reset, 5004 .reset_prepare = ice_pci_err_reset_prepare, 5005 .reset_done = ice_pci_err_reset_done, 5006 .resume = ice_pci_err_resume 5007 }; 5008 5009 static struct pci_driver ice_driver = { 5010 .name = KBUILD_MODNAME, 5011 .id_table = ice_pci_tbl, 5012 .probe = ice_probe, 5013 .remove = ice_remove, 5014 #ifdef CONFIG_PM 5015 .driver.pm = &ice_pm_ops, 5016 #endif /* CONFIG_PM */ 5017 .shutdown = ice_shutdown, 5018 .sriov_configure = ice_sriov_configure, 5019 .err_handler = &ice_pci_err_handler 5020 }; 5021 5022 /** 5023 * ice_module_init - Driver registration routine 5024 * 5025 * ice_module_init is the first routine called when the driver is 5026 * loaded. All it does is register with the PCI subsystem. 5027 */ 5028 static int __init ice_module_init(void) 5029 { 5030 int status; 5031 5032 pr_info("%s\n", ice_driver_string); 5033 pr_info("%s\n", ice_copyright); 5034 5035 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 5036 if (!ice_wq) { 5037 pr_err("Failed to create workqueue\n"); 5038 return -ENOMEM; 5039 } 5040 5041 status = pci_register_driver(&ice_driver); 5042 if (status) { 5043 pr_err("failed to register PCI driver, err %d\n", status); 5044 destroy_workqueue(ice_wq); 5045 } 5046 5047 return status; 5048 } 5049 module_init(ice_module_init); 5050 5051 /** 5052 * ice_module_exit - Driver exit cleanup routine 5053 * 5054 * ice_module_exit is called just before the driver is removed 5055 * from memory. 5056 */ 5057 static void __exit ice_module_exit(void) 5058 { 5059 pci_unregister_driver(&ice_driver); 5060 destroy_workqueue(ice_wq); 5061 pr_info("module unloaded\n"); 5062 } 5063 module_exit(ice_module_exit); 5064 5065 /** 5066 * ice_set_mac_address - NDO callback to set MAC address 5067 * @netdev: network interface device structure 5068 * @pi: pointer to an address structure 5069 * 5070 * Returns 0 on success, negative on failure 5071 */ 5072 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5073 { 5074 struct ice_netdev_priv *np = netdev_priv(netdev); 5075 struct ice_vsi *vsi = np->vsi; 5076 struct ice_pf *pf = vsi->back; 5077 struct ice_hw *hw = &pf->hw; 5078 struct sockaddr *addr = pi; 5079 enum ice_status status; 5080 u8 flags = 0; 5081 int err = 0; 5082 u8 *mac; 5083 5084 mac = (u8 *)addr->sa_data; 5085 5086 if (!is_valid_ether_addr(mac)) 5087 return -EADDRNOTAVAIL; 5088 5089 if (ether_addr_equal(netdev->dev_addr, mac)) { 5090 netdev_warn(netdev, "already using mac %pM\n", mac); 5091 return 0; 5092 } 5093 5094 if (test_bit(ICE_DOWN, pf->state) || 5095 ice_is_reset_in_progress(pf->state)) { 5096 netdev_err(netdev, "can't set mac %pM. device not ready\n", 5097 mac); 5098 return -EBUSY; 5099 } 5100 5101 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 5102 status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); 5103 if (status && status != ICE_ERR_DOES_NOT_EXIST) { 5104 err = -EADDRNOTAVAIL; 5105 goto err_update_filters; 5106 } 5107 5108 /* Add filter for new MAC. If filter exists, return success */ 5109 status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 5110 if (status == ICE_ERR_ALREADY_EXISTS) { 5111 /* Although this MAC filter is already present in hardware it's 5112 * possible in some cases (e.g. bonding) that dev_addr was 5113 * modified outside of the driver and needs to be restored back 5114 * to this value. 5115 */ 5116 memcpy(netdev->dev_addr, mac, netdev->addr_len); 5117 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 5118 return 0; 5119 } 5120 5121 /* error if the new filter addition failed */ 5122 if (status) 5123 err = -EADDRNOTAVAIL; 5124 5125 err_update_filters: 5126 if (err) { 5127 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5128 mac); 5129 return err; 5130 } 5131 5132 /* change the netdev's MAC address */ 5133 memcpy(netdev->dev_addr, mac, netdev->addr_len); 5134 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5135 netdev->dev_addr); 5136 5137 /* write new MAC address to the firmware */ 5138 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 5139 status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 5140 if (status) { 5141 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", 5142 mac, ice_stat_str(status)); 5143 } 5144 return 0; 5145 } 5146 5147 /** 5148 * ice_set_rx_mode - NDO callback to set the netdev filters 5149 * @netdev: network interface device structure 5150 */ 5151 static void ice_set_rx_mode(struct net_device *netdev) 5152 { 5153 struct ice_netdev_priv *np = netdev_priv(netdev); 5154 struct ice_vsi *vsi = np->vsi; 5155 5156 if (!vsi) 5157 return; 5158 5159 /* Set the flags to synchronize filters 5160 * ndo_set_rx_mode may be triggered even without a change in netdev 5161 * flags 5162 */ 5163 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5164 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5165 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5166 5167 /* schedule our worker thread which will take care of 5168 * applying the new filter changes 5169 */ 5170 ice_service_task_schedule(vsi->back); 5171 } 5172 5173 /** 5174 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 5175 * @netdev: network interface device structure 5176 * @queue_index: Queue ID 5177 * @maxrate: maximum bandwidth in Mbps 5178 */ 5179 static int 5180 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 5181 { 5182 struct ice_netdev_priv *np = netdev_priv(netdev); 5183 struct ice_vsi *vsi = np->vsi; 5184 enum ice_status status; 5185 u16 q_handle; 5186 u8 tc; 5187 5188 /* Validate maxrate requested is within permitted range */ 5189 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 5190 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 5191 maxrate, queue_index); 5192 return -EINVAL; 5193 } 5194 5195 q_handle = vsi->tx_rings[queue_index]->q_handle; 5196 tc = ice_dcb_get_tc(vsi, queue_index); 5197 5198 /* Set BW back to default, when user set maxrate to 0 */ 5199 if (!maxrate) 5200 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 5201 q_handle, ICE_MAX_BW); 5202 else 5203 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 5204 q_handle, ICE_MAX_BW, maxrate * 1000); 5205 if (status) { 5206 netdev_err(netdev, "Unable to set Tx max rate, error %s\n", 5207 ice_stat_str(status)); 5208 return -EIO; 5209 } 5210 5211 return 0; 5212 } 5213 5214 /** 5215 * ice_fdb_add - add an entry to the hardware database 5216 * @ndm: the input from the stack 5217 * @tb: pointer to array of nladdr (unused) 5218 * @dev: the net device pointer 5219 * @addr: the MAC address entry being added 5220 * @vid: VLAN ID 5221 * @flags: instructions from stack about fdb operation 5222 * @extack: netlink extended ack 5223 */ 5224 static int 5225 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 5226 struct net_device *dev, const unsigned char *addr, u16 vid, 5227 u16 flags, struct netlink_ext_ack __always_unused *extack) 5228 { 5229 int err; 5230 5231 if (vid) { 5232 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5233 return -EINVAL; 5234 } 5235 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5236 netdev_err(dev, "FDB only supports static addresses\n"); 5237 return -EINVAL; 5238 } 5239 5240 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5241 err = dev_uc_add_excl(dev, addr); 5242 else if (is_multicast_ether_addr(addr)) 5243 err = dev_mc_add_excl(dev, addr); 5244 else 5245 err = -EINVAL; 5246 5247 /* Only return duplicate errors if NLM_F_EXCL is set */ 5248 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5249 err = 0; 5250 5251 return err; 5252 } 5253 5254 /** 5255 * ice_fdb_del - delete an entry from the hardware database 5256 * @ndm: the input from the stack 5257 * @tb: pointer to array of nladdr (unused) 5258 * @dev: the net device pointer 5259 * @addr: the MAC address entry being added 5260 * @vid: VLAN ID 5261 */ 5262 static int 5263 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5264 struct net_device *dev, const unsigned char *addr, 5265 __always_unused u16 vid) 5266 { 5267 int err; 5268 5269 if (ndm->ndm_state & NUD_PERMANENT) { 5270 netdev_err(dev, "FDB only supports static addresses\n"); 5271 return -EINVAL; 5272 } 5273 5274 if (is_unicast_ether_addr(addr)) 5275 err = dev_uc_del(dev, addr); 5276 else if (is_multicast_ether_addr(addr)) 5277 err = dev_mc_del(dev, addr); 5278 else 5279 err = -EINVAL; 5280 5281 return err; 5282 } 5283 5284 /** 5285 * ice_set_features - set the netdev feature flags 5286 * @netdev: ptr to the netdev being adjusted 5287 * @features: the feature set that the stack is suggesting 5288 */ 5289 static int 5290 ice_set_features(struct net_device *netdev, netdev_features_t features) 5291 { 5292 struct ice_netdev_priv *np = netdev_priv(netdev); 5293 struct ice_vsi *vsi = np->vsi; 5294 struct ice_pf *pf = vsi->back; 5295 int ret = 0; 5296 5297 /* Don't set any netdev advanced features with device in Safe Mode */ 5298 if (ice_is_safe_mode(vsi->back)) { 5299 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); 5300 return ret; 5301 } 5302 5303 /* Do not change setting during reset */ 5304 if (ice_is_reset_in_progress(pf->state)) { 5305 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 5306 return -EBUSY; 5307 } 5308 5309 /* Multiple features can be changed in one call so keep features in 5310 * separate if/else statements to guarantee each feature is checked 5311 */ 5312 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 5313 ice_vsi_manage_rss_lut(vsi, true); 5314 else if (!(features & NETIF_F_RXHASH) && 5315 netdev->features & NETIF_F_RXHASH) 5316 ice_vsi_manage_rss_lut(vsi, false); 5317 5318 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 5319 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5320 ret = ice_vsi_manage_vlan_stripping(vsi, true); 5321 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 5322 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5323 ret = ice_vsi_manage_vlan_stripping(vsi, false); 5324 5325 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 5326 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5327 ret = ice_vsi_manage_vlan_insertion(vsi); 5328 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 5329 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5330 ret = ice_vsi_manage_vlan_insertion(vsi); 5331 5332 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 5333 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 5334 ret = ice_cfg_vlan_pruning(vsi, true, false); 5335 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 5336 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 5337 ret = ice_cfg_vlan_pruning(vsi, false, false); 5338 5339 if ((features & NETIF_F_NTUPLE) && 5340 !(netdev->features & NETIF_F_NTUPLE)) { 5341 ice_vsi_manage_fdir(vsi, true); 5342 ice_init_arfs(vsi); 5343 } else if (!(features & NETIF_F_NTUPLE) && 5344 (netdev->features & NETIF_F_NTUPLE)) { 5345 ice_vsi_manage_fdir(vsi, false); 5346 ice_clear_arfs(vsi); 5347 } 5348 5349 return ret; 5350 } 5351 5352 /** 5353 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI 5354 * @vsi: VSI to setup VLAN properties for 5355 */ 5356 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 5357 { 5358 int ret = 0; 5359 5360 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 5361 ret = ice_vsi_manage_vlan_stripping(vsi, true); 5362 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 5363 ret = ice_vsi_manage_vlan_insertion(vsi); 5364 5365 return ret; 5366 } 5367 5368 /** 5369 * ice_vsi_cfg - Setup the VSI 5370 * @vsi: the VSI being configured 5371 * 5372 * Return 0 on success and negative value on error 5373 */ 5374 int ice_vsi_cfg(struct ice_vsi *vsi) 5375 { 5376 int err; 5377 5378 if (vsi->netdev) { 5379 ice_set_rx_mode(vsi->netdev); 5380 5381 err = ice_vsi_vlan_setup(vsi); 5382 5383 if (err) 5384 return err; 5385 } 5386 ice_vsi_cfg_dcb_rings(vsi); 5387 5388 err = ice_vsi_cfg_lan_txqs(vsi); 5389 if (!err && ice_is_xdp_ena_vsi(vsi)) 5390 err = ice_vsi_cfg_xdp_txqs(vsi); 5391 if (!err) 5392 err = ice_vsi_cfg_rxqs(vsi); 5393 5394 return err; 5395 } 5396 5397 /* THEORY OF MODERATION: 5398 * The below code creates custom DIM profiles for use by this driver, because 5399 * the ice driver hardware works differently than the hardware that DIMLIB was 5400 * originally made for. ice hardware doesn't have packet count limits that 5401 * can trigger an interrupt, but it *does* have interrupt rate limit support, 5402 * and this code adds that capability to be used by the driver when it's using 5403 * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver 5404 * for how to "respond" to traffic and interrupts, so this driver uses a 5405 * slightly different set of moderation parameters to get best performance. 5406 */ 5407 struct ice_dim { 5408 /* the throttle rate for interrupts, basically worst case delay before 5409 * an initial interrupt fires, value is stored in microseconds. 5410 */ 5411 u16 itr; 5412 /* the rate limit for interrupts, which can cap a delay from a small 5413 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR 5414 * could yield as much as 500,000 interrupts per second, but with a 5415 * 10us rate limit, it limits to 100,000 interrupts per second. Value 5416 * is stored in microseconds. 5417 */ 5418 u16 intrl; 5419 }; 5420 5421 /* Make a different profile for Rx that doesn't allow quite so aggressive 5422 * moderation at the high end (it maxes out at 128us or about 8k interrupts a 5423 * second. The INTRL/rate parameters here are only useful to cap small ITR 5424 * values, which is why for larger ITR's - like 128, which can only generate 5425 * 8k interrupts per second, there is no point to rate limit and the values 5426 * are set to zero. The rate limit values do affect latency, and so must 5427 * be reasonably small so to not impact latency sensitive tests. 5428 */ 5429 static const struct ice_dim rx_profile[] = { 5430 {2, 10}, 5431 {8, 16}, 5432 {32, 0}, 5433 {96, 0}, 5434 {128, 0} 5435 }; 5436 5437 /* The transmit profile, which has the same sorts of values 5438 * as the previous struct 5439 */ 5440 static const struct ice_dim tx_profile[] = { 5441 {2, 10}, 5442 {8, 16}, 5443 {64, 0}, 5444 {128, 0}, 5445 {256, 0} 5446 }; 5447 5448 static void ice_tx_dim_work(struct work_struct *work) 5449 { 5450 struct ice_ring_container *rc; 5451 struct ice_q_vector *q_vector; 5452 struct dim *dim; 5453 u16 itr, intrl; 5454 5455 dim = container_of(work, struct dim, work); 5456 rc = container_of(dim, struct ice_ring_container, dim); 5457 q_vector = container_of(rc, struct ice_q_vector, tx); 5458 5459 if (dim->profile_ix >= ARRAY_SIZE(tx_profile)) 5460 dim->profile_ix = ARRAY_SIZE(tx_profile) - 1; 5461 5462 /* look up the values in our local table */ 5463 itr = tx_profile[dim->profile_ix].itr; 5464 intrl = tx_profile[dim->profile_ix].intrl; 5465 5466 ice_write_itr(rc, itr); 5467 ice_write_intrl(q_vector, intrl); 5468 5469 dim->state = DIM_START_MEASURE; 5470 } 5471 5472 static void ice_rx_dim_work(struct work_struct *work) 5473 { 5474 struct ice_ring_container *rc; 5475 struct ice_q_vector *q_vector; 5476 struct dim *dim; 5477 u16 itr, intrl; 5478 5479 dim = container_of(work, struct dim, work); 5480 rc = container_of(dim, struct ice_ring_container, dim); 5481 q_vector = container_of(rc, struct ice_q_vector, rx); 5482 5483 if (dim->profile_ix >= ARRAY_SIZE(rx_profile)) 5484 dim->profile_ix = ARRAY_SIZE(rx_profile) - 1; 5485 5486 /* look up the values in our local table */ 5487 itr = rx_profile[dim->profile_ix].itr; 5488 intrl = rx_profile[dim->profile_ix].intrl; 5489 5490 ice_write_itr(rc, itr); 5491 ice_write_intrl(q_vector, intrl); 5492 5493 dim->state = DIM_START_MEASURE; 5494 } 5495 5496 /** 5497 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 5498 * @vsi: the VSI being configured 5499 */ 5500 static void ice_napi_enable_all(struct ice_vsi *vsi) 5501 { 5502 int q_idx; 5503 5504 if (!vsi->netdev) 5505 return; 5506 5507 ice_for_each_q_vector(vsi, q_idx) { 5508 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 5509 5510 INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work); 5511 q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5512 5513 INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work); 5514 q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5515 5516 if (q_vector->rx.ring || q_vector->tx.ring) 5517 napi_enable(&q_vector->napi); 5518 } 5519 } 5520 5521 /** 5522 * ice_up_complete - Finish the last steps of bringing up a connection 5523 * @vsi: The VSI being configured 5524 * 5525 * Return 0 on success and negative value on error 5526 */ 5527 static int ice_up_complete(struct ice_vsi *vsi) 5528 { 5529 struct ice_pf *pf = vsi->back; 5530 int err; 5531 5532 ice_vsi_cfg_msix(vsi); 5533 5534 /* Enable only Rx rings, Tx rings were enabled by the FW when the 5535 * Tx queue group list was configured and the context bits were 5536 * programmed using ice_vsi_cfg_txqs 5537 */ 5538 err = ice_vsi_start_all_rx_rings(vsi); 5539 if (err) 5540 return err; 5541 5542 clear_bit(ICE_VSI_DOWN, vsi->state); 5543 ice_napi_enable_all(vsi); 5544 ice_vsi_ena_irq(vsi); 5545 5546 if (vsi->port_info && 5547 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 5548 vsi->netdev) { 5549 ice_print_link_msg(vsi, true); 5550 netif_tx_start_all_queues(vsi->netdev); 5551 netif_carrier_on(vsi->netdev); 5552 } 5553 5554 ice_service_task_schedule(pf); 5555 5556 return 0; 5557 } 5558 5559 /** 5560 * ice_up - Bring the connection back up after being down 5561 * @vsi: VSI being configured 5562 */ 5563 int ice_up(struct ice_vsi *vsi) 5564 { 5565 int err; 5566 5567 err = ice_vsi_cfg(vsi); 5568 if (!err) 5569 err = ice_up_complete(vsi); 5570 5571 return err; 5572 } 5573 5574 /** 5575 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 5576 * @ring: Tx or Rx ring to read stats from 5577 * @pkts: packets stats counter 5578 * @bytes: bytes stats counter 5579 * 5580 * This function fetches stats from the ring considering the atomic operations 5581 * that needs to be performed to read u64 values in 32 bit machine. 5582 */ 5583 static void 5584 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) 5585 { 5586 unsigned int start; 5587 *pkts = 0; 5588 *bytes = 0; 5589 5590 if (!ring) 5591 return; 5592 do { 5593 start = u64_stats_fetch_begin_irq(&ring->syncp); 5594 *pkts = ring->stats.pkts; 5595 *bytes = ring->stats.bytes; 5596 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 5597 } 5598 5599 /** 5600 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 5601 * @vsi: the VSI to be updated 5602 * @rings: rings to work on 5603 * @count: number of rings 5604 */ 5605 static void 5606 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, 5607 u16 count) 5608 { 5609 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 5610 u16 i; 5611 5612 for (i = 0; i < count; i++) { 5613 struct ice_ring *ring; 5614 u64 pkts, bytes; 5615 5616 ring = READ_ONCE(rings[i]); 5617 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 5618 vsi_stats->tx_packets += pkts; 5619 vsi_stats->tx_bytes += bytes; 5620 vsi->tx_restart += ring->tx_stats.restart_q; 5621 vsi->tx_busy += ring->tx_stats.tx_busy; 5622 vsi->tx_linearize += ring->tx_stats.tx_linearize; 5623 } 5624 } 5625 5626 /** 5627 * ice_update_vsi_ring_stats - Update VSI stats counters 5628 * @vsi: the VSI to be updated 5629 */ 5630 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 5631 { 5632 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 5633 u64 pkts, bytes; 5634 int i; 5635 5636 /* reset netdev stats */ 5637 vsi_stats->tx_packets = 0; 5638 vsi_stats->tx_bytes = 0; 5639 vsi_stats->rx_packets = 0; 5640 vsi_stats->rx_bytes = 0; 5641 5642 /* reset non-netdev (extended) stats */ 5643 vsi->tx_restart = 0; 5644 vsi->tx_busy = 0; 5645 vsi->tx_linearize = 0; 5646 vsi->rx_buf_failed = 0; 5647 vsi->rx_page_failed = 0; 5648 5649 rcu_read_lock(); 5650 5651 /* update Tx rings counters */ 5652 ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); 5653 5654 /* update Rx rings counters */ 5655 ice_for_each_rxq(vsi, i) { 5656 struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]); 5657 5658 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 5659 vsi_stats->rx_packets += pkts; 5660 vsi_stats->rx_bytes += bytes; 5661 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 5662 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 5663 } 5664 5665 /* update XDP Tx rings counters */ 5666 if (ice_is_xdp_ena_vsi(vsi)) 5667 ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, 5668 vsi->num_xdp_txq); 5669 5670 rcu_read_unlock(); 5671 } 5672 5673 /** 5674 * ice_update_vsi_stats - Update VSI stats counters 5675 * @vsi: the VSI to be updated 5676 */ 5677 void ice_update_vsi_stats(struct ice_vsi *vsi) 5678 { 5679 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 5680 struct ice_eth_stats *cur_es = &vsi->eth_stats; 5681 struct ice_pf *pf = vsi->back; 5682 5683 if (test_bit(ICE_VSI_DOWN, vsi->state) || 5684 test_bit(ICE_CFG_BUSY, pf->state)) 5685 return; 5686 5687 /* get stats as recorded by Tx/Rx rings */ 5688 ice_update_vsi_ring_stats(vsi); 5689 5690 /* get VSI stats as recorded by the hardware */ 5691 ice_update_eth_stats(vsi); 5692 5693 cur_ns->tx_errors = cur_es->tx_errors; 5694 cur_ns->rx_dropped = cur_es->rx_discards; 5695 cur_ns->tx_dropped = cur_es->tx_discards; 5696 cur_ns->multicast = cur_es->rx_multicast; 5697 5698 /* update some more netdev stats if this is main VSI */ 5699 if (vsi->type == ICE_VSI_PF) { 5700 cur_ns->rx_crc_errors = pf->stats.crc_errors; 5701 cur_ns->rx_errors = pf->stats.crc_errors + 5702 pf->stats.illegal_bytes + 5703 pf->stats.rx_len_errors + 5704 pf->stats.rx_undersize + 5705 pf->hw_csum_rx_error + 5706 pf->stats.rx_jabber + 5707 pf->stats.rx_fragments + 5708 pf->stats.rx_oversize; 5709 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 5710 /* record drops from the port level */ 5711 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 5712 } 5713 } 5714 5715 /** 5716 * ice_update_pf_stats - Update PF port stats counters 5717 * @pf: PF whose stats needs to be updated 5718 */ 5719 void ice_update_pf_stats(struct ice_pf *pf) 5720 { 5721 struct ice_hw_port_stats *prev_ps, *cur_ps; 5722 struct ice_hw *hw = &pf->hw; 5723 u16 fd_ctr_base; 5724 u8 port; 5725 5726 port = hw->port_info->lport; 5727 prev_ps = &pf->stats_prev; 5728 cur_ps = &pf->stats; 5729 5730 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 5731 &prev_ps->eth.rx_bytes, 5732 &cur_ps->eth.rx_bytes); 5733 5734 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 5735 &prev_ps->eth.rx_unicast, 5736 &cur_ps->eth.rx_unicast); 5737 5738 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 5739 &prev_ps->eth.rx_multicast, 5740 &cur_ps->eth.rx_multicast); 5741 5742 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 5743 &prev_ps->eth.rx_broadcast, 5744 &cur_ps->eth.rx_broadcast); 5745 5746 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 5747 &prev_ps->eth.rx_discards, 5748 &cur_ps->eth.rx_discards); 5749 5750 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 5751 &prev_ps->eth.tx_bytes, 5752 &cur_ps->eth.tx_bytes); 5753 5754 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 5755 &prev_ps->eth.tx_unicast, 5756 &cur_ps->eth.tx_unicast); 5757 5758 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 5759 &prev_ps->eth.tx_multicast, 5760 &cur_ps->eth.tx_multicast); 5761 5762 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 5763 &prev_ps->eth.tx_broadcast, 5764 &cur_ps->eth.tx_broadcast); 5765 5766 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 5767 &prev_ps->tx_dropped_link_down, 5768 &cur_ps->tx_dropped_link_down); 5769 5770 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 5771 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 5772 5773 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 5774 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 5775 5776 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 5777 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 5778 5779 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 5780 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 5781 5782 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 5783 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 5784 5785 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 5786 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 5787 5788 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 5789 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 5790 5791 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 5792 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 5793 5794 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 5795 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 5796 5797 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 5798 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 5799 5800 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 5801 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 5802 5803 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 5804 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 5805 5806 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 5807 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 5808 5809 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 5810 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 5811 5812 fd_ctr_base = hw->fd_ctr_base; 5813 5814 ice_stat_update40(hw, 5815 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 5816 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 5817 &cur_ps->fd_sb_match); 5818 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 5819 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 5820 5821 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 5822 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 5823 5824 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 5825 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 5826 5827 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 5828 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 5829 5830 ice_update_dcb_stats(pf); 5831 5832 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 5833 &prev_ps->crc_errors, &cur_ps->crc_errors); 5834 5835 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 5836 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 5837 5838 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 5839 &prev_ps->mac_local_faults, 5840 &cur_ps->mac_local_faults); 5841 5842 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 5843 &prev_ps->mac_remote_faults, 5844 &cur_ps->mac_remote_faults); 5845 5846 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 5847 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 5848 5849 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 5850 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 5851 5852 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 5853 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 5854 5855 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 5856 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 5857 5858 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 5859 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 5860 5861 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 5862 5863 pf->stat_prev_loaded = true; 5864 } 5865 5866 /** 5867 * ice_get_stats64 - get statistics for network device structure 5868 * @netdev: network interface device structure 5869 * @stats: main device statistics structure 5870 */ 5871 static 5872 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 5873 { 5874 struct ice_netdev_priv *np = netdev_priv(netdev); 5875 struct rtnl_link_stats64 *vsi_stats; 5876 struct ice_vsi *vsi = np->vsi; 5877 5878 vsi_stats = &vsi->net_stats; 5879 5880 if (!vsi->num_txq || !vsi->num_rxq) 5881 return; 5882 5883 /* netdev packet/byte stats come from ring counter. These are obtained 5884 * by summing up ring counters (done by ice_update_vsi_ring_stats). 5885 * But, only call the update routine and read the registers if VSI is 5886 * not down. 5887 */ 5888 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 5889 ice_update_vsi_ring_stats(vsi); 5890 stats->tx_packets = vsi_stats->tx_packets; 5891 stats->tx_bytes = vsi_stats->tx_bytes; 5892 stats->rx_packets = vsi_stats->rx_packets; 5893 stats->rx_bytes = vsi_stats->rx_bytes; 5894 5895 /* The rest of the stats can be read from the hardware but instead we 5896 * just return values that the watchdog task has already obtained from 5897 * the hardware. 5898 */ 5899 stats->multicast = vsi_stats->multicast; 5900 stats->tx_errors = vsi_stats->tx_errors; 5901 stats->tx_dropped = vsi_stats->tx_dropped; 5902 stats->rx_errors = vsi_stats->rx_errors; 5903 stats->rx_dropped = vsi_stats->rx_dropped; 5904 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 5905 stats->rx_length_errors = vsi_stats->rx_length_errors; 5906 } 5907 5908 /** 5909 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 5910 * @vsi: VSI having NAPI disabled 5911 */ 5912 static void ice_napi_disable_all(struct ice_vsi *vsi) 5913 { 5914 int q_idx; 5915 5916 if (!vsi->netdev) 5917 return; 5918 5919 ice_for_each_q_vector(vsi, q_idx) { 5920 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 5921 5922 if (q_vector->rx.ring || q_vector->tx.ring) 5923 napi_disable(&q_vector->napi); 5924 5925 cancel_work_sync(&q_vector->tx.dim.work); 5926 cancel_work_sync(&q_vector->rx.dim.work); 5927 } 5928 } 5929 5930 /** 5931 * ice_down - Shutdown the connection 5932 * @vsi: The VSI being stopped 5933 */ 5934 int ice_down(struct ice_vsi *vsi) 5935 { 5936 int i, tx_err, rx_err, link_err = 0; 5937 5938 /* Caller of this function is expected to set the 5939 * vsi->state ICE_DOWN bit 5940 */ 5941 if (vsi->netdev) { 5942 netif_carrier_off(vsi->netdev); 5943 netif_tx_disable(vsi->netdev); 5944 } 5945 5946 ice_vsi_dis_irq(vsi); 5947 5948 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 5949 if (tx_err) 5950 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 5951 vsi->vsi_num, tx_err); 5952 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 5953 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 5954 if (tx_err) 5955 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 5956 vsi->vsi_num, tx_err); 5957 } 5958 5959 rx_err = ice_vsi_stop_all_rx_rings(vsi); 5960 if (rx_err) 5961 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 5962 vsi->vsi_num, rx_err); 5963 5964 ice_napi_disable_all(vsi); 5965 5966 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 5967 link_err = ice_force_phys_link_state(vsi, false); 5968 if (link_err) 5969 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 5970 vsi->vsi_num, link_err); 5971 } 5972 5973 ice_for_each_txq(vsi, i) 5974 ice_clean_tx_ring(vsi->tx_rings[i]); 5975 5976 ice_for_each_rxq(vsi, i) 5977 ice_clean_rx_ring(vsi->rx_rings[i]); 5978 5979 if (tx_err || rx_err || link_err) { 5980 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 5981 vsi->vsi_num, vsi->vsw->sw_id); 5982 return -EIO; 5983 } 5984 5985 return 0; 5986 } 5987 5988 /** 5989 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 5990 * @vsi: VSI having resources allocated 5991 * 5992 * Return 0 on success, negative on failure 5993 */ 5994 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 5995 { 5996 int i, err = 0; 5997 5998 if (!vsi->num_txq) { 5999 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 6000 vsi->vsi_num); 6001 return -EINVAL; 6002 } 6003 6004 ice_for_each_txq(vsi, i) { 6005 struct ice_ring *ring = vsi->tx_rings[i]; 6006 6007 if (!ring) 6008 return -EINVAL; 6009 6010 ring->netdev = vsi->netdev; 6011 err = ice_setup_tx_ring(ring); 6012 if (err) 6013 break; 6014 } 6015 6016 return err; 6017 } 6018 6019 /** 6020 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 6021 * @vsi: VSI having resources allocated 6022 * 6023 * Return 0 on success, negative on failure 6024 */ 6025 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 6026 { 6027 int i, err = 0; 6028 6029 if (!vsi->num_rxq) { 6030 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 6031 vsi->vsi_num); 6032 return -EINVAL; 6033 } 6034 6035 ice_for_each_rxq(vsi, i) { 6036 struct ice_ring *ring = vsi->rx_rings[i]; 6037 6038 if (!ring) 6039 return -EINVAL; 6040 6041 ring->netdev = vsi->netdev; 6042 err = ice_setup_rx_ring(ring); 6043 if (err) 6044 break; 6045 } 6046 6047 return err; 6048 } 6049 6050 /** 6051 * ice_vsi_open_ctrl - open control VSI for use 6052 * @vsi: the VSI to open 6053 * 6054 * Initialization of the Control VSI 6055 * 6056 * Returns 0 on success, negative value on error 6057 */ 6058 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 6059 { 6060 char int_name[ICE_INT_NAME_STR_LEN]; 6061 struct ice_pf *pf = vsi->back; 6062 struct device *dev; 6063 int err; 6064 6065 dev = ice_pf_to_dev(pf); 6066 /* allocate descriptors */ 6067 err = ice_vsi_setup_tx_rings(vsi); 6068 if (err) 6069 goto err_setup_tx; 6070 6071 err = ice_vsi_setup_rx_rings(vsi); 6072 if (err) 6073 goto err_setup_rx; 6074 6075 err = ice_vsi_cfg(vsi); 6076 if (err) 6077 goto err_setup_rx; 6078 6079 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 6080 dev_driver_string(dev), dev_name(dev)); 6081 err = ice_vsi_req_irq_msix(vsi, int_name); 6082 if (err) 6083 goto err_setup_rx; 6084 6085 ice_vsi_cfg_msix(vsi); 6086 6087 err = ice_vsi_start_all_rx_rings(vsi); 6088 if (err) 6089 goto err_up_complete; 6090 6091 clear_bit(ICE_VSI_DOWN, vsi->state); 6092 ice_vsi_ena_irq(vsi); 6093 6094 return 0; 6095 6096 err_up_complete: 6097 ice_down(vsi); 6098 err_setup_rx: 6099 ice_vsi_free_rx_rings(vsi); 6100 err_setup_tx: 6101 ice_vsi_free_tx_rings(vsi); 6102 6103 return err; 6104 } 6105 6106 /** 6107 * ice_vsi_open - Called when a network interface is made active 6108 * @vsi: the VSI to open 6109 * 6110 * Initialization of the VSI 6111 * 6112 * Returns 0 on success, negative value on error 6113 */ 6114 static int ice_vsi_open(struct ice_vsi *vsi) 6115 { 6116 char int_name[ICE_INT_NAME_STR_LEN]; 6117 struct ice_pf *pf = vsi->back; 6118 int err; 6119 6120 /* allocate descriptors */ 6121 err = ice_vsi_setup_tx_rings(vsi); 6122 if (err) 6123 goto err_setup_tx; 6124 6125 err = ice_vsi_setup_rx_rings(vsi); 6126 if (err) 6127 goto err_setup_rx; 6128 6129 err = ice_vsi_cfg(vsi); 6130 if (err) 6131 goto err_setup_rx; 6132 6133 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 6134 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 6135 err = ice_vsi_req_irq_msix(vsi, int_name); 6136 if (err) 6137 goto err_setup_rx; 6138 6139 /* Notify the stack of the actual queue counts. */ 6140 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 6141 if (err) 6142 goto err_set_qs; 6143 6144 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 6145 if (err) 6146 goto err_set_qs; 6147 6148 err = ice_up_complete(vsi); 6149 if (err) 6150 goto err_up_complete; 6151 6152 return 0; 6153 6154 err_up_complete: 6155 ice_down(vsi); 6156 err_set_qs: 6157 ice_vsi_free_irq(vsi); 6158 err_setup_rx: 6159 ice_vsi_free_rx_rings(vsi); 6160 err_setup_tx: 6161 ice_vsi_free_tx_rings(vsi); 6162 6163 return err; 6164 } 6165 6166 /** 6167 * ice_vsi_release_all - Delete all VSIs 6168 * @pf: PF from which all VSIs are being removed 6169 */ 6170 static void ice_vsi_release_all(struct ice_pf *pf) 6171 { 6172 int err, i; 6173 6174 if (!pf->vsi) 6175 return; 6176 6177 ice_for_each_vsi(pf, i) { 6178 if (!pf->vsi[i]) 6179 continue; 6180 6181 err = ice_vsi_release(pf->vsi[i]); 6182 if (err) 6183 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 6184 i, err, pf->vsi[i]->vsi_num); 6185 } 6186 } 6187 6188 /** 6189 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 6190 * @pf: pointer to the PF instance 6191 * @type: VSI type to rebuild 6192 * 6193 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 6194 */ 6195 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 6196 { 6197 struct device *dev = ice_pf_to_dev(pf); 6198 enum ice_status status; 6199 int i, err; 6200 6201 ice_for_each_vsi(pf, i) { 6202 struct ice_vsi *vsi = pf->vsi[i]; 6203 6204 if (!vsi || vsi->type != type) 6205 continue; 6206 6207 /* rebuild the VSI */ 6208 err = ice_vsi_rebuild(vsi, true); 6209 if (err) { 6210 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 6211 err, vsi->idx, ice_vsi_type_str(type)); 6212 return err; 6213 } 6214 6215 /* replay filters for the VSI */ 6216 status = ice_replay_vsi(&pf->hw, vsi->idx); 6217 if (status) { 6218 dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", 6219 ice_stat_str(status), vsi->idx, 6220 ice_vsi_type_str(type)); 6221 return -EIO; 6222 } 6223 6224 /* Re-map HW VSI number, using VSI handle that has been 6225 * previously validated in ice_replay_vsi() call above 6226 */ 6227 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 6228 6229 /* enable the VSI */ 6230 err = ice_ena_vsi(vsi, false); 6231 if (err) { 6232 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 6233 err, vsi->idx, ice_vsi_type_str(type)); 6234 return err; 6235 } 6236 6237 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 6238 ice_vsi_type_str(type)); 6239 } 6240 6241 return 0; 6242 } 6243 6244 /** 6245 * ice_update_pf_netdev_link - Update PF netdev link status 6246 * @pf: pointer to the PF instance 6247 */ 6248 static void ice_update_pf_netdev_link(struct ice_pf *pf) 6249 { 6250 bool link_up; 6251 int i; 6252 6253 ice_for_each_vsi(pf, i) { 6254 struct ice_vsi *vsi = pf->vsi[i]; 6255 6256 if (!vsi || vsi->type != ICE_VSI_PF) 6257 return; 6258 6259 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 6260 if (link_up) { 6261 netif_carrier_on(pf->vsi[i]->netdev); 6262 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 6263 } else { 6264 netif_carrier_off(pf->vsi[i]->netdev); 6265 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 6266 } 6267 } 6268 } 6269 6270 /** 6271 * ice_rebuild - rebuild after reset 6272 * @pf: PF to rebuild 6273 * @reset_type: type of reset 6274 * 6275 * Do not rebuild VF VSI in this flow because that is already handled via 6276 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 6277 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 6278 * to reset/rebuild all the VF VSI twice. 6279 */ 6280 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 6281 { 6282 struct device *dev = ice_pf_to_dev(pf); 6283 struct ice_hw *hw = &pf->hw; 6284 enum ice_status ret; 6285 int err; 6286 6287 if (test_bit(ICE_DOWN, pf->state)) 6288 goto clear_recovery; 6289 6290 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 6291 6292 ret = ice_init_all_ctrlq(hw); 6293 if (ret) { 6294 dev_err(dev, "control queues init failed %s\n", 6295 ice_stat_str(ret)); 6296 goto err_init_ctrlq; 6297 } 6298 6299 /* if DDP was previously loaded successfully */ 6300 if (!ice_is_safe_mode(pf)) { 6301 /* reload the SW DB of filter tables */ 6302 if (reset_type == ICE_RESET_PFR) 6303 ice_fill_blk_tbls(hw); 6304 else 6305 /* Reload DDP Package after CORER/GLOBR reset */ 6306 ice_load_pkg(NULL, pf); 6307 } 6308 6309 ret = ice_clear_pf_cfg(hw); 6310 if (ret) { 6311 dev_err(dev, "clear PF configuration failed %s\n", 6312 ice_stat_str(ret)); 6313 goto err_init_ctrlq; 6314 } 6315 6316 if (pf->first_sw->dflt_vsi_ena) 6317 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); 6318 /* clear the default VSI configuration if it exists */ 6319 pf->first_sw->dflt_vsi = NULL; 6320 pf->first_sw->dflt_vsi_ena = false; 6321 6322 ice_clear_pxe_mode(hw); 6323 6324 ret = ice_init_nvm(hw); 6325 if (ret) { 6326 dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); 6327 goto err_init_ctrlq; 6328 } 6329 6330 ret = ice_get_caps(hw); 6331 if (ret) { 6332 dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); 6333 goto err_init_ctrlq; 6334 } 6335 6336 ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 6337 if (ret) { 6338 dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); 6339 goto err_init_ctrlq; 6340 } 6341 6342 err = ice_sched_init_port(hw->port_info); 6343 if (err) 6344 goto err_sched_init_port; 6345 6346 /* start misc vector */ 6347 err = ice_req_irq_msix_misc(pf); 6348 if (err) { 6349 dev_err(dev, "misc vector setup failed: %d\n", err); 6350 goto err_sched_init_port; 6351 } 6352 6353 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6354 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 6355 if (!rd32(hw, PFQF_FD_SIZE)) { 6356 u16 unused, guar, b_effort; 6357 6358 guar = hw->func_caps.fd_fltr_guar; 6359 b_effort = hw->func_caps.fd_fltr_best_effort; 6360 6361 /* force guaranteed filter pool for PF */ 6362 ice_alloc_fd_guar_item(hw, &unused, guar); 6363 /* force shared filter pool for PF */ 6364 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 6365 } 6366 } 6367 6368 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 6369 ice_dcb_rebuild(pf); 6370 6371 /* If the PF previously had enabled PTP, PTP init needs to happen before 6372 * the VSI rebuild. If not, this causes the PTP link status events to 6373 * fail. 6374 */ 6375 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6376 ice_ptp_init(pf); 6377 6378 /* rebuild PF VSI */ 6379 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 6380 if (err) { 6381 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 6382 goto err_vsi_rebuild; 6383 } 6384 6385 /* If Flow Director is active */ 6386 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6387 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 6388 if (err) { 6389 dev_err(dev, "control VSI rebuild failed: %d\n", err); 6390 goto err_vsi_rebuild; 6391 } 6392 6393 /* replay HW Flow Director recipes */ 6394 if (hw->fdir_prof) 6395 ice_fdir_replay_flows(hw); 6396 6397 /* replay Flow Director filters */ 6398 ice_fdir_replay_fltrs(pf); 6399 6400 ice_rebuild_arfs(pf); 6401 } 6402 6403 ice_update_pf_netdev_link(pf); 6404 6405 /* tell the firmware we are up */ 6406 ret = ice_send_version(pf); 6407 if (ret) { 6408 dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", 6409 ice_stat_str(ret)); 6410 goto err_vsi_rebuild; 6411 } 6412 6413 ice_replay_post(hw); 6414 6415 /* if we get here, reset flow is successful */ 6416 clear_bit(ICE_RESET_FAILED, pf->state); 6417 6418 ice_plug_aux_dev(pf); 6419 return; 6420 6421 err_vsi_rebuild: 6422 err_sched_init_port: 6423 ice_sched_cleanup_all(hw); 6424 err_init_ctrlq: 6425 ice_shutdown_all_ctrlq(hw); 6426 set_bit(ICE_RESET_FAILED, pf->state); 6427 clear_recovery: 6428 /* set this bit in PF state to control service task scheduling */ 6429 set_bit(ICE_NEEDS_RESTART, pf->state); 6430 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 6431 } 6432 6433 /** 6434 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 6435 * @vsi: Pointer to VSI structure 6436 */ 6437 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 6438 { 6439 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 6440 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 6441 else 6442 return ICE_RXBUF_3072; 6443 } 6444 6445 /** 6446 * ice_change_mtu - NDO callback to change the MTU 6447 * @netdev: network interface device structure 6448 * @new_mtu: new value for maximum frame size 6449 * 6450 * Returns 0 on success, negative on failure 6451 */ 6452 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 6453 { 6454 struct ice_netdev_priv *np = netdev_priv(netdev); 6455 struct ice_vsi *vsi = np->vsi; 6456 struct ice_pf *pf = vsi->back; 6457 struct iidc_event *event; 6458 u8 count = 0; 6459 int err = 0; 6460 6461 if (new_mtu == (int)netdev->mtu) { 6462 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 6463 return 0; 6464 } 6465 6466 if (ice_is_xdp_ena_vsi(vsi)) { 6467 int frame_size = ice_max_xdp_frame_size(vsi); 6468 6469 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 6470 netdev_err(netdev, "max MTU for XDP usage is %d\n", 6471 frame_size - ICE_ETH_PKT_HDR_PAD); 6472 return -EINVAL; 6473 } 6474 } 6475 6476 /* if a reset is in progress, wait for some time for it to complete */ 6477 do { 6478 if (ice_is_reset_in_progress(pf->state)) { 6479 count++; 6480 usleep_range(1000, 2000); 6481 } else { 6482 break; 6483 } 6484 6485 } while (count < 100); 6486 6487 if (count == 100) { 6488 netdev_err(netdev, "can't change MTU. Device is busy\n"); 6489 return -EBUSY; 6490 } 6491 6492 event = kzalloc(sizeof(*event), GFP_KERNEL); 6493 if (!event) 6494 return -ENOMEM; 6495 6496 set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); 6497 ice_send_event_to_aux(pf, event); 6498 clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); 6499 6500 netdev->mtu = (unsigned int)new_mtu; 6501 6502 /* if VSI is up, bring it down and then back up */ 6503 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6504 err = ice_down(vsi); 6505 if (err) { 6506 netdev_err(netdev, "change MTU if_down err %d\n", err); 6507 goto event_after; 6508 } 6509 6510 err = ice_up(vsi); 6511 if (err) { 6512 netdev_err(netdev, "change MTU if_up err %d\n", err); 6513 goto event_after; 6514 } 6515 } 6516 6517 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 6518 event_after: 6519 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 6520 ice_send_event_to_aux(pf, event); 6521 kfree(event); 6522 6523 return err; 6524 } 6525 6526 /** 6527 * ice_do_ioctl - Access the hwtstamp interface 6528 * @netdev: network interface device structure 6529 * @ifr: interface request data 6530 * @cmd: ioctl command 6531 */ 6532 static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6533 { 6534 struct ice_netdev_priv *np = netdev_priv(netdev); 6535 struct ice_pf *pf = np->vsi->back; 6536 6537 switch (cmd) { 6538 case SIOCGHWTSTAMP: 6539 return ice_ptp_get_ts_config(pf, ifr); 6540 case SIOCSHWTSTAMP: 6541 return ice_ptp_set_ts_config(pf, ifr); 6542 default: 6543 return -EOPNOTSUPP; 6544 } 6545 } 6546 6547 /** 6548 * ice_aq_str - convert AQ err code to a string 6549 * @aq_err: the AQ error code to convert 6550 */ 6551 const char *ice_aq_str(enum ice_aq_err aq_err) 6552 { 6553 switch (aq_err) { 6554 case ICE_AQ_RC_OK: 6555 return "OK"; 6556 case ICE_AQ_RC_EPERM: 6557 return "ICE_AQ_RC_EPERM"; 6558 case ICE_AQ_RC_ENOENT: 6559 return "ICE_AQ_RC_ENOENT"; 6560 case ICE_AQ_RC_ENOMEM: 6561 return "ICE_AQ_RC_ENOMEM"; 6562 case ICE_AQ_RC_EBUSY: 6563 return "ICE_AQ_RC_EBUSY"; 6564 case ICE_AQ_RC_EEXIST: 6565 return "ICE_AQ_RC_EEXIST"; 6566 case ICE_AQ_RC_EINVAL: 6567 return "ICE_AQ_RC_EINVAL"; 6568 case ICE_AQ_RC_ENOSPC: 6569 return "ICE_AQ_RC_ENOSPC"; 6570 case ICE_AQ_RC_ENOSYS: 6571 return "ICE_AQ_RC_ENOSYS"; 6572 case ICE_AQ_RC_EMODE: 6573 return "ICE_AQ_RC_EMODE"; 6574 case ICE_AQ_RC_ENOSEC: 6575 return "ICE_AQ_RC_ENOSEC"; 6576 case ICE_AQ_RC_EBADSIG: 6577 return "ICE_AQ_RC_EBADSIG"; 6578 case ICE_AQ_RC_ESVN: 6579 return "ICE_AQ_RC_ESVN"; 6580 case ICE_AQ_RC_EBADMAN: 6581 return "ICE_AQ_RC_EBADMAN"; 6582 case ICE_AQ_RC_EBADBUF: 6583 return "ICE_AQ_RC_EBADBUF"; 6584 } 6585 6586 return "ICE_AQ_RC_UNKNOWN"; 6587 } 6588 6589 /** 6590 * ice_stat_str - convert status err code to a string 6591 * @stat_err: the status error code to convert 6592 */ 6593 const char *ice_stat_str(enum ice_status stat_err) 6594 { 6595 switch (stat_err) { 6596 case ICE_SUCCESS: 6597 return "OK"; 6598 case ICE_ERR_PARAM: 6599 return "ICE_ERR_PARAM"; 6600 case ICE_ERR_NOT_IMPL: 6601 return "ICE_ERR_NOT_IMPL"; 6602 case ICE_ERR_NOT_READY: 6603 return "ICE_ERR_NOT_READY"; 6604 case ICE_ERR_NOT_SUPPORTED: 6605 return "ICE_ERR_NOT_SUPPORTED"; 6606 case ICE_ERR_BAD_PTR: 6607 return "ICE_ERR_BAD_PTR"; 6608 case ICE_ERR_INVAL_SIZE: 6609 return "ICE_ERR_INVAL_SIZE"; 6610 case ICE_ERR_DEVICE_NOT_SUPPORTED: 6611 return "ICE_ERR_DEVICE_NOT_SUPPORTED"; 6612 case ICE_ERR_RESET_FAILED: 6613 return "ICE_ERR_RESET_FAILED"; 6614 case ICE_ERR_FW_API_VER: 6615 return "ICE_ERR_FW_API_VER"; 6616 case ICE_ERR_NO_MEMORY: 6617 return "ICE_ERR_NO_MEMORY"; 6618 case ICE_ERR_CFG: 6619 return "ICE_ERR_CFG"; 6620 case ICE_ERR_OUT_OF_RANGE: 6621 return "ICE_ERR_OUT_OF_RANGE"; 6622 case ICE_ERR_ALREADY_EXISTS: 6623 return "ICE_ERR_ALREADY_EXISTS"; 6624 case ICE_ERR_NVM: 6625 return "ICE_ERR_NVM"; 6626 case ICE_ERR_NVM_CHECKSUM: 6627 return "ICE_ERR_NVM_CHECKSUM"; 6628 case ICE_ERR_BUF_TOO_SHORT: 6629 return "ICE_ERR_BUF_TOO_SHORT"; 6630 case ICE_ERR_NVM_BLANK_MODE: 6631 return "ICE_ERR_NVM_BLANK_MODE"; 6632 case ICE_ERR_IN_USE: 6633 return "ICE_ERR_IN_USE"; 6634 case ICE_ERR_MAX_LIMIT: 6635 return "ICE_ERR_MAX_LIMIT"; 6636 case ICE_ERR_RESET_ONGOING: 6637 return "ICE_ERR_RESET_ONGOING"; 6638 case ICE_ERR_HW_TABLE: 6639 return "ICE_ERR_HW_TABLE"; 6640 case ICE_ERR_DOES_NOT_EXIST: 6641 return "ICE_ERR_DOES_NOT_EXIST"; 6642 case ICE_ERR_FW_DDP_MISMATCH: 6643 return "ICE_ERR_FW_DDP_MISMATCH"; 6644 case ICE_ERR_AQ_ERROR: 6645 return "ICE_ERR_AQ_ERROR"; 6646 case ICE_ERR_AQ_TIMEOUT: 6647 return "ICE_ERR_AQ_TIMEOUT"; 6648 case ICE_ERR_AQ_FULL: 6649 return "ICE_ERR_AQ_FULL"; 6650 case ICE_ERR_AQ_NO_WORK: 6651 return "ICE_ERR_AQ_NO_WORK"; 6652 case ICE_ERR_AQ_EMPTY: 6653 return "ICE_ERR_AQ_EMPTY"; 6654 case ICE_ERR_AQ_FW_CRITICAL: 6655 return "ICE_ERR_AQ_FW_CRITICAL"; 6656 } 6657 6658 return "ICE_ERR_UNKNOWN"; 6659 } 6660 6661 /** 6662 * ice_set_rss_lut - Set RSS LUT 6663 * @vsi: Pointer to VSI structure 6664 * @lut: Lookup table 6665 * @lut_size: Lookup table size 6666 * 6667 * Returns 0 on success, negative on failure 6668 */ 6669 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 6670 { 6671 struct ice_aq_get_set_rss_lut_params params = {}; 6672 struct ice_hw *hw = &vsi->back->hw; 6673 enum ice_status status; 6674 6675 if (!lut) 6676 return -EINVAL; 6677 6678 params.vsi_handle = vsi->idx; 6679 params.lut_size = lut_size; 6680 params.lut_type = vsi->rss_lut_type; 6681 params.lut = lut; 6682 6683 status = ice_aq_set_rss_lut(hw, ¶ms); 6684 if (status) { 6685 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", 6686 ice_stat_str(status), 6687 ice_aq_str(hw->adminq.sq_last_status)); 6688 return -EIO; 6689 } 6690 6691 return 0; 6692 } 6693 6694 /** 6695 * ice_set_rss_key - Set RSS key 6696 * @vsi: Pointer to the VSI structure 6697 * @seed: RSS hash seed 6698 * 6699 * Returns 0 on success, negative on failure 6700 */ 6701 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 6702 { 6703 struct ice_hw *hw = &vsi->back->hw; 6704 enum ice_status status; 6705 6706 if (!seed) 6707 return -EINVAL; 6708 6709 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 6710 if (status) { 6711 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", 6712 ice_stat_str(status), 6713 ice_aq_str(hw->adminq.sq_last_status)); 6714 return -EIO; 6715 } 6716 6717 return 0; 6718 } 6719 6720 /** 6721 * ice_get_rss_lut - Get RSS LUT 6722 * @vsi: Pointer to VSI structure 6723 * @lut: Buffer to store the lookup table entries 6724 * @lut_size: Size of buffer to store the lookup table entries 6725 * 6726 * Returns 0 on success, negative on failure 6727 */ 6728 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 6729 { 6730 struct ice_aq_get_set_rss_lut_params params = {}; 6731 struct ice_hw *hw = &vsi->back->hw; 6732 enum ice_status status; 6733 6734 if (!lut) 6735 return -EINVAL; 6736 6737 params.vsi_handle = vsi->idx; 6738 params.lut_size = lut_size; 6739 params.lut_type = vsi->rss_lut_type; 6740 params.lut = lut; 6741 6742 status = ice_aq_get_rss_lut(hw, ¶ms); 6743 if (status) { 6744 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", 6745 ice_stat_str(status), 6746 ice_aq_str(hw->adminq.sq_last_status)); 6747 return -EIO; 6748 } 6749 6750 return 0; 6751 } 6752 6753 /** 6754 * ice_get_rss_key - Get RSS key 6755 * @vsi: Pointer to VSI structure 6756 * @seed: Buffer to store the key in 6757 * 6758 * Returns 0 on success, negative on failure 6759 */ 6760 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 6761 { 6762 struct ice_hw *hw = &vsi->back->hw; 6763 enum ice_status status; 6764 6765 if (!seed) 6766 return -EINVAL; 6767 6768 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 6769 if (status) { 6770 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", 6771 ice_stat_str(status), 6772 ice_aq_str(hw->adminq.sq_last_status)); 6773 return -EIO; 6774 } 6775 6776 return 0; 6777 } 6778 6779 /** 6780 * ice_bridge_getlink - Get the hardware bridge mode 6781 * @skb: skb buff 6782 * @pid: process ID 6783 * @seq: RTNL message seq 6784 * @dev: the netdev being configured 6785 * @filter_mask: filter mask passed in 6786 * @nlflags: netlink flags passed in 6787 * 6788 * Return the bridge mode (VEB/VEPA) 6789 */ 6790 static int 6791 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 6792 struct net_device *dev, u32 filter_mask, int nlflags) 6793 { 6794 struct ice_netdev_priv *np = netdev_priv(dev); 6795 struct ice_vsi *vsi = np->vsi; 6796 struct ice_pf *pf = vsi->back; 6797 u16 bmode; 6798 6799 bmode = pf->first_sw->bridge_mode; 6800 6801 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 6802 filter_mask, NULL); 6803 } 6804 6805 /** 6806 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 6807 * @vsi: Pointer to VSI structure 6808 * @bmode: Hardware bridge mode (VEB/VEPA) 6809 * 6810 * Returns 0 on success, negative on failure 6811 */ 6812 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 6813 { 6814 struct ice_aqc_vsi_props *vsi_props; 6815 struct ice_hw *hw = &vsi->back->hw; 6816 struct ice_vsi_ctx *ctxt; 6817 enum ice_status status; 6818 int ret = 0; 6819 6820 vsi_props = &vsi->info; 6821 6822 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 6823 if (!ctxt) 6824 return -ENOMEM; 6825 6826 ctxt->info = vsi->info; 6827 6828 if (bmode == BRIDGE_MODE_VEB) 6829 /* change from VEPA to VEB mode */ 6830 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 6831 else 6832 /* change from VEB to VEPA mode */ 6833 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 6834 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 6835 6836 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 6837 if (status) { 6838 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", 6839 bmode, ice_stat_str(status), 6840 ice_aq_str(hw->adminq.sq_last_status)); 6841 ret = -EIO; 6842 goto out; 6843 } 6844 /* Update sw flags for book keeping */ 6845 vsi_props->sw_flags = ctxt->info.sw_flags; 6846 6847 out: 6848 kfree(ctxt); 6849 return ret; 6850 } 6851 6852 /** 6853 * ice_bridge_setlink - Set the hardware bridge mode 6854 * @dev: the netdev being configured 6855 * @nlh: RTNL message 6856 * @flags: bridge setlink flags 6857 * @extack: netlink extended ack 6858 * 6859 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 6860 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 6861 * not already set for all VSIs connected to this switch. And also update the 6862 * unicast switch filter rules for the corresponding switch of the netdev. 6863 */ 6864 static int 6865 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 6866 u16 __always_unused flags, 6867 struct netlink_ext_ack __always_unused *extack) 6868 { 6869 struct ice_netdev_priv *np = netdev_priv(dev); 6870 struct ice_pf *pf = np->vsi->back; 6871 struct nlattr *attr, *br_spec; 6872 struct ice_hw *hw = &pf->hw; 6873 enum ice_status status; 6874 struct ice_sw *pf_sw; 6875 int rem, v, err = 0; 6876 6877 pf_sw = pf->first_sw; 6878 /* find the attribute in the netlink message */ 6879 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 6880 6881 nla_for_each_nested(attr, br_spec, rem) { 6882 __u16 mode; 6883 6884 if (nla_type(attr) != IFLA_BRIDGE_MODE) 6885 continue; 6886 mode = nla_get_u16(attr); 6887 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 6888 return -EINVAL; 6889 /* Continue if bridge mode is not being flipped */ 6890 if (mode == pf_sw->bridge_mode) 6891 continue; 6892 /* Iterates through the PF VSI list and update the loopback 6893 * mode of the VSI 6894 */ 6895 ice_for_each_vsi(pf, v) { 6896 if (!pf->vsi[v]) 6897 continue; 6898 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 6899 if (err) 6900 return err; 6901 } 6902 6903 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 6904 /* Update the unicast switch filter rules for the corresponding 6905 * switch of the netdev 6906 */ 6907 status = ice_update_sw_rule_bridge_mode(hw); 6908 if (status) { 6909 netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", 6910 mode, ice_stat_str(status), 6911 ice_aq_str(hw->adminq.sq_last_status)); 6912 /* revert hw->evb_veb */ 6913 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 6914 return -EIO; 6915 } 6916 6917 pf_sw->bridge_mode = mode; 6918 } 6919 6920 return 0; 6921 } 6922 6923 /** 6924 * ice_tx_timeout - Respond to a Tx Hang 6925 * @netdev: network interface device structure 6926 * @txqueue: Tx queue 6927 */ 6928 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 6929 { 6930 struct ice_netdev_priv *np = netdev_priv(netdev); 6931 struct ice_ring *tx_ring = NULL; 6932 struct ice_vsi *vsi = np->vsi; 6933 struct ice_pf *pf = vsi->back; 6934 u32 i; 6935 6936 pf->tx_timeout_count++; 6937 6938 /* Check if PFC is enabled for the TC to which the queue belongs 6939 * to. If yes then Tx timeout is not caused by a hung queue, no 6940 * need to reset and rebuild 6941 */ 6942 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 6943 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 6944 txqueue); 6945 return; 6946 } 6947 6948 /* now that we have an index, find the tx_ring struct */ 6949 for (i = 0; i < vsi->num_txq; i++) 6950 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 6951 if (txqueue == vsi->tx_rings[i]->q_index) { 6952 tx_ring = vsi->tx_rings[i]; 6953 break; 6954 } 6955 6956 /* Reset recovery level if enough time has elapsed after last timeout. 6957 * Also ensure no new reset action happens before next timeout period. 6958 */ 6959 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 6960 pf->tx_timeout_recovery_level = 1; 6961 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 6962 netdev->watchdog_timeo))) 6963 return; 6964 6965 if (tx_ring) { 6966 struct ice_hw *hw = &pf->hw; 6967 u32 head, val = 0; 6968 6969 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 6970 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 6971 /* Read interrupt register */ 6972 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 6973 6974 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 6975 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 6976 head, tx_ring->next_to_use, val); 6977 } 6978 6979 pf->tx_timeout_last_recovery = jiffies; 6980 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 6981 pf->tx_timeout_recovery_level, txqueue); 6982 6983 switch (pf->tx_timeout_recovery_level) { 6984 case 1: 6985 set_bit(ICE_PFR_REQ, pf->state); 6986 break; 6987 case 2: 6988 set_bit(ICE_CORER_REQ, pf->state); 6989 break; 6990 case 3: 6991 set_bit(ICE_GLOBR_REQ, pf->state); 6992 break; 6993 default: 6994 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 6995 set_bit(ICE_DOWN, pf->state); 6996 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 6997 set_bit(ICE_SERVICE_DIS, pf->state); 6998 break; 6999 } 7000 7001 ice_service_task_schedule(pf); 7002 pf->tx_timeout_recovery_level++; 7003 } 7004 7005 /** 7006 * ice_open - Called when a network interface becomes active 7007 * @netdev: network interface device structure 7008 * 7009 * The open entry point is called when a network interface is made 7010 * active by the system (IFF_UP). At this point all resources needed 7011 * for transmit and receive operations are allocated, the interrupt 7012 * handler is registered with the OS, the netdev watchdog is enabled, 7013 * and the stack is notified that the interface is ready. 7014 * 7015 * Returns 0 on success, negative value on failure 7016 */ 7017 int ice_open(struct net_device *netdev) 7018 { 7019 struct ice_netdev_priv *np = netdev_priv(netdev); 7020 struct ice_pf *pf = np->vsi->back; 7021 7022 if (ice_is_reset_in_progress(pf->state)) { 7023 netdev_err(netdev, "can't open net device while reset is in progress"); 7024 return -EBUSY; 7025 } 7026 7027 return ice_open_internal(netdev); 7028 } 7029 7030 /** 7031 * ice_open_internal - Called when a network interface becomes active 7032 * @netdev: network interface device structure 7033 * 7034 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 7035 * handling routine 7036 * 7037 * Returns 0 on success, negative value on failure 7038 */ 7039 int ice_open_internal(struct net_device *netdev) 7040 { 7041 struct ice_netdev_priv *np = netdev_priv(netdev); 7042 struct ice_vsi *vsi = np->vsi; 7043 struct ice_pf *pf = vsi->back; 7044 struct ice_port_info *pi; 7045 enum ice_status status; 7046 int err; 7047 7048 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 7049 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 7050 return -EIO; 7051 } 7052 7053 netif_carrier_off(netdev); 7054 7055 pi = vsi->port_info; 7056 status = ice_update_link_info(pi); 7057 if (status) { 7058 netdev_err(netdev, "Failed to get link info, error %s\n", 7059 ice_stat_str(status)); 7060 return -EIO; 7061 } 7062 7063 ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); 7064 7065 /* Set PHY if there is media, otherwise, turn off PHY */ 7066 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 7067 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 7068 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 7069 err = ice_init_phy_user_cfg(pi); 7070 if (err) { 7071 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 7072 err); 7073 return err; 7074 } 7075 } 7076 7077 err = ice_configure_phy(vsi); 7078 if (err) { 7079 netdev_err(netdev, "Failed to set physical link up, error %d\n", 7080 err); 7081 return err; 7082 } 7083 } else { 7084 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 7085 ice_set_link(vsi, false); 7086 } 7087 7088 err = ice_vsi_open(vsi); 7089 if (err) 7090 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 7091 vsi->vsi_num, vsi->vsw->sw_id); 7092 7093 /* Update existing tunnels information */ 7094 udp_tunnel_get_rx_info(netdev); 7095 7096 return err; 7097 } 7098 7099 /** 7100 * ice_stop - Disables a network interface 7101 * @netdev: network interface device structure 7102 * 7103 * The stop entry point is called when an interface is de-activated by the OS, 7104 * and the netdevice enters the DOWN state. The hardware is still under the 7105 * driver's control, but the netdev interface is disabled. 7106 * 7107 * Returns success only - not allowed to fail 7108 */ 7109 int ice_stop(struct net_device *netdev) 7110 { 7111 struct ice_netdev_priv *np = netdev_priv(netdev); 7112 struct ice_vsi *vsi = np->vsi; 7113 struct ice_pf *pf = vsi->back; 7114 7115 if (ice_is_reset_in_progress(pf->state)) { 7116 netdev_err(netdev, "can't stop net device while reset is in progress"); 7117 return -EBUSY; 7118 } 7119 7120 ice_vsi_close(vsi); 7121 7122 return 0; 7123 } 7124 7125 /** 7126 * ice_features_check - Validate encapsulated packet conforms to limits 7127 * @skb: skb buffer 7128 * @netdev: This port's netdev 7129 * @features: Offload features that the stack believes apply 7130 */ 7131 static netdev_features_t 7132 ice_features_check(struct sk_buff *skb, 7133 struct net_device __always_unused *netdev, 7134 netdev_features_t features) 7135 { 7136 size_t len; 7137 7138 /* No point in doing any of this if neither checksum nor GSO are 7139 * being requested for this frame. We can rule out both by just 7140 * checking for CHECKSUM_PARTIAL 7141 */ 7142 if (skb->ip_summed != CHECKSUM_PARTIAL) 7143 return features; 7144 7145 /* We cannot support GSO if the MSS is going to be less than 7146 * 64 bytes. If it is then we need to drop support for GSO. 7147 */ 7148 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 7149 features &= ~NETIF_F_GSO_MASK; 7150 7151 len = skb_network_header(skb) - skb->data; 7152 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 7153 goto out_rm_features; 7154 7155 len = skb_transport_header(skb) - skb_network_header(skb); 7156 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 7157 goto out_rm_features; 7158 7159 if (skb->encapsulation) { 7160 len = skb_inner_network_header(skb) - skb_transport_header(skb); 7161 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 7162 goto out_rm_features; 7163 7164 len = skb_inner_transport_header(skb) - 7165 skb_inner_network_header(skb); 7166 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 7167 goto out_rm_features; 7168 } 7169 7170 return features; 7171 out_rm_features: 7172 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 7173 } 7174 7175 static const struct net_device_ops ice_netdev_safe_mode_ops = { 7176 .ndo_open = ice_open, 7177 .ndo_stop = ice_stop, 7178 .ndo_start_xmit = ice_start_xmit, 7179 .ndo_set_mac_address = ice_set_mac_address, 7180 .ndo_validate_addr = eth_validate_addr, 7181 .ndo_change_mtu = ice_change_mtu, 7182 .ndo_get_stats64 = ice_get_stats64, 7183 .ndo_tx_timeout = ice_tx_timeout, 7184 }; 7185 7186 static const struct net_device_ops ice_netdev_ops = { 7187 .ndo_open = ice_open, 7188 .ndo_stop = ice_stop, 7189 .ndo_start_xmit = ice_start_xmit, 7190 .ndo_features_check = ice_features_check, 7191 .ndo_set_rx_mode = ice_set_rx_mode, 7192 .ndo_set_mac_address = ice_set_mac_address, 7193 .ndo_validate_addr = eth_validate_addr, 7194 .ndo_change_mtu = ice_change_mtu, 7195 .ndo_get_stats64 = ice_get_stats64, 7196 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 7197 .ndo_do_ioctl = ice_do_ioctl, 7198 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 7199 .ndo_set_vf_mac = ice_set_vf_mac, 7200 .ndo_get_vf_config = ice_get_vf_cfg, 7201 .ndo_set_vf_trust = ice_set_vf_trust, 7202 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 7203 .ndo_set_vf_link_state = ice_set_vf_link_state, 7204 .ndo_get_vf_stats = ice_get_vf_stats, 7205 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 7206 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 7207 .ndo_set_features = ice_set_features, 7208 .ndo_bridge_getlink = ice_bridge_getlink, 7209 .ndo_bridge_setlink = ice_bridge_setlink, 7210 .ndo_fdb_add = ice_fdb_add, 7211 .ndo_fdb_del = ice_fdb_del, 7212 #ifdef CONFIG_RFS_ACCEL 7213 .ndo_rx_flow_steer = ice_rx_flow_steer, 7214 #endif 7215 .ndo_tx_timeout = ice_tx_timeout, 7216 .ndo_bpf = ice_xdp, 7217 .ndo_xdp_xmit = ice_xdp_xmit, 7218 .ndo_xsk_wakeup = ice_xsk_wakeup, 7219 }; 7220