1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include "ice.h" 9 #include "ice_lib.h" 10 11 #define DRV_VERSION "0.7.2-k" 12 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 13 const char ice_drv_ver[] = DRV_VERSION; 14 static const char ice_driver_string[] = DRV_SUMMARY; 15 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 16 17 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 18 MODULE_DESCRIPTION(DRV_SUMMARY); 19 MODULE_LICENSE("GPL v2"); 20 MODULE_VERSION(DRV_VERSION); 21 22 static int debug = -1; 23 module_param(debug, int, 0644); 24 #ifndef CONFIG_DYNAMIC_DEBUG 25 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 26 #else 27 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 28 #endif /* !CONFIG_DYNAMIC_DEBUG */ 29 30 static struct workqueue_struct *ice_wq; 31 static const struct net_device_ops ice_netdev_ops; 32 33 static void ice_pf_dis_all_vsi(struct ice_pf *pf); 34 static void ice_rebuild(struct ice_pf *pf); 35 36 static void ice_vsi_release_all(struct ice_pf *pf); 37 static void ice_update_vsi_stats(struct ice_vsi *vsi); 38 static void ice_update_pf_stats(struct ice_pf *pf); 39 40 /** 41 * ice_get_tx_pending - returns number of Tx descriptors not processed 42 * @ring: the ring of descriptors 43 */ 44 static u32 ice_get_tx_pending(struct ice_ring *ring) 45 { 46 u32 head, tail; 47 48 head = ring->next_to_clean; 49 tail = readl(ring->tail); 50 51 if (head != tail) 52 return (head < tail) ? 53 tail - head : (tail + ring->count - head); 54 return 0; 55 } 56 57 /** 58 * ice_check_for_hang_subtask - check for and recover hung queues 59 * @pf: pointer to PF struct 60 */ 61 static void ice_check_for_hang_subtask(struct ice_pf *pf) 62 { 63 struct ice_vsi *vsi = NULL; 64 unsigned int i; 65 u32 v, v_idx; 66 int packets; 67 68 ice_for_each_vsi(pf, v) 69 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 70 vsi = pf->vsi[v]; 71 break; 72 } 73 74 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 75 return; 76 77 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 78 return; 79 80 for (i = 0; i < vsi->num_txq; i++) { 81 struct ice_ring *tx_ring = vsi->tx_rings[i]; 82 83 if (tx_ring && tx_ring->desc) { 84 int itr = ICE_ITR_NONE; 85 86 /* If packet counter has not changed the queue is 87 * likely stalled, so force an interrupt for this 88 * queue. 89 * 90 * prev_pkt would be negative if there was no 91 * pending work. 92 */ 93 packets = tx_ring->stats.pkts & INT_MAX; 94 if (tx_ring->tx_stats.prev_pkt == packets) { 95 /* Trigger sw interrupt to revive the queue */ 96 v_idx = tx_ring->q_vector->v_idx; 97 wr32(&vsi->back->hw, 98 GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 99 (itr << GLINT_DYN_CTL_ITR_INDX_S) | 100 GLINT_DYN_CTL_SWINT_TRIG_M | 101 GLINT_DYN_CTL_INTENA_MSK_M); 102 continue; 103 } 104 105 /* Memory barrier between read of packet count and call 106 * to ice_get_tx_pending() 107 */ 108 smp_rmb(); 109 tx_ring->tx_stats.prev_pkt = 110 ice_get_tx_pending(tx_ring) ? packets : -1; 111 } 112 } 113 } 114 115 /** 116 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced 117 * @netdev: the net device on which the sync is happening 118 * @addr: mac address to sync 119 * 120 * This is a callback function which is called by the in kernel device sync 121 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 122 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 123 * mac filters from the hardware. 124 */ 125 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 126 { 127 struct ice_netdev_priv *np = netdev_priv(netdev); 128 struct ice_vsi *vsi = np->vsi; 129 130 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) 131 return -EINVAL; 132 133 return 0; 134 } 135 136 /** 137 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced 138 * @netdev: the net device on which the unsync is happening 139 * @addr: mac address to unsync 140 * 141 * This is a callback function which is called by the in kernel device unsync 142 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 143 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 144 * delete the mac filters from the hardware. 145 */ 146 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 147 { 148 struct ice_netdev_priv *np = netdev_priv(netdev); 149 struct ice_vsi *vsi = np->vsi; 150 151 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) 152 return -EINVAL; 153 154 return 0; 155 } 156 157 /** 158 * ice_vsi_fltr_changed - check if filter state changed 159 * @vsi: VSI to be checked 160 * 161 * returns true if filter state has changed, false otherwise. 162 */ 163 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 164 { 165 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || 166 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || 167 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 168 } 169 170 /** 171 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 172 * @vsi: ptr to the VSI 173 * 174 * Push any outstanding VSI filter changes through the AdminQ. 175 */ 176 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 177 { 178 struct device *dev = &vsi->back->pdev->dev; 179 struct net_device *netdev = vsi->netdev; 180 bool promisc_forced_on = false; 181 struct ice_pf *pf = vsi->back; 182 struct ice_hw *hw = &pf->hw; 183 enum ice_status status = 0; 184 u32 changed_flags = 0; 185 int err = 0; 186 187 if (!vsi->netdev) 188 return -EINVAL; 189 190 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) 191 usleep_range(1000, 2000); 192 193 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 194 vsi->current_netdev_flags = vsi->netdev->flags; 195 196 INIT_LIST_HEAD(&vsi->tmp_sync_list); 197 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 198 199 if (ice_vsi_fltr_changed(vsi)) { 200 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 201 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 202 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 203 204 /* grab the netdev's addr_list_lock */ 205 netif_addr_lock_bh(netdev); 206 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 207 ice_add_mac_to_unsync_list); 208 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 209 ice_add_mac_to_unsync_list); 210 /* our temp lists are populated. release lock */ 211 netif_addr_unlock_bh(netdev); 212 } 213 214 /* Remove mac addresses in the unsync list */ 215 status = ice_remove_mac(hw, &vsi->tmp_unsync_list); 216 ice_free_fltr_list(dev, &vsi->tmp_unsync_list); 217 if (status) { 218 netdev_err(netdev, "Failed to delete MAC filters\n"); 219 /* if we failed because of alloc failures, just bail */ 220 if (status == ICE_ERR_NO_MEMORY) { 221 err = -ENOMEM; 222 goto out; 223 } 224 } 225 226 /* Add mac addresses in the sync list */ 227 status = ice_add_mac(hw, &vsi->tmp_sync_list); 228 ice_free_fltr_list(dev, &vsi->tmp_sync_list); 229 if (status) { 230 netdev_err(netdev, "Failed to add MAC filters\n"); 231 /* If there is no more space for new umac filters, vsi 232 * should go into promiscuous mode. There should be some 233 * space reserved for promiscuous filters. 234 */ 235 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 236 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, 237 vsi->state)) { 238 promisc_forced_on = true; 239 netdev_warn(netdev, 240 "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 241 vsi->vsi_num); 242 } else { 243 err = -EIO; 244 goto out; 245 } 246 } 247 /* check for changes in promiscuous modes */ 248 if (changed_flags & IFF_ALLMULTI) 249 netdev_warn(netdev, "Unsupported configuration\n"); 250 251 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 252 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { 253 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 254 if (vsi->current_netdev_flags & IFF_PROMISC) { 255 /* Apply TX filter rule to get traffic from VMs */ 256 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 257 ICE_FLTR_TX); 258 if (status) { 259 netdev_err(netdev, "Error setting default VSI %i tx rule\n", 260 vsi->vsi_num); 261 vsi->current_netdev_flags &= ~IFF_PROMISC; 262 err = -EIO; 263 goto out_promisc; 264 } 265 /* Apply RX filter rule to get traffic from wire */ 266 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 267 ICE_FLTR_RX); 268 if (status) { 269 netdev_err(netdev, "Error setting default VSI %i rx rule\n", 270 vsi->vsi_num); 271 vsi->current_netdev_flags &= ~IFF_PROMISC; 272 err = -EIO; 273 goto out_promisc; 274 } 275 } else { 276 /* Clear TX filter rule to stop traffic from VMs */ 277 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 278 ICE_FLTR_TX); 279 if (status) { 280 netdev_err(netdev, "Error clearing default VSI %i tx rule\n", 281 vsi->vsi_num); 282 vsi->current_netdev_flags |= IFF_PROMISC; 283 err = -EIO; 284 goto out_promisc; 285 } 286 /* Clear RX filter to remove traffic from wire */ 287 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 288 ICE_FLTR_RX); 289 if (status) { 290 netdev_err(netdev, "Error clearing default VSI %i rx rule\n", 291 vsi->vsi_num); 292 vsi->current_netdev_flags |= IFF_PROMISC; 293 err = -EIO; 294 goto out_promisc; 295 } 296 } 297 } 298 goto exit; 299 300 out_promisc: 301 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 302 goto exit; 303 out: 304 /* if something went wrong then set the changed flag so we try again */ 305 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 306 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 307 exit: 308 clear_bit(__ICE_CFG_BUSY, vsi->state); 309 return err; 310 } 311 312 /** 313 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 314 * @pf: board private structure 315 */ 316 static void ice_sync_fltr_subtask(struct ice_pf *pf) 317 { 318 int v; 319 320 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 321 return; 322 323 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 324 325 for (v = 0; v < pf->num_alloc_vsi; v++) 326 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 327 ice_vsi_sync_fltr(pf->vsi[v])) { 328 /* come back and try again later */ 329 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 330 break; 331 } 332 } 333 334 /** 335 * ice_prepare_for_reset - prep for the core to reset 336 * @pf: board private structure 337 * 338 * Inform or close all dependent features in prep for reset. 339 */ 340 static void 341 ice_prepare_for_reset(struct ice_pf *pf) 342 { 343 struct ice_hw *hw = &pf->hw; 344 345 /* Notify VFs of impending reset */ 346 if (ice_check_sq_alive(hw, &hw->mailboxq)) 347 ice_vc_notify_reset(pf); 348 349 /* disable the VSIs and their queues that are not already DOWN */ 350 ice_pf_dis_all_vsi(pf); 351 352 if (hw->port_info) 353 ice_sched_clear_port(hw->port_info); 354 355 ice_shutdown_all_ctrlq(hw); 356 357 set_bit(__ICE_PREPARED_FOR_RESET, pf->state); 358 } 359 360 /** 361 * ice_do_reset - Initiate one of many types of resets 362 * @pf: board private structure 363 * @reset_type: reset type requested 364 * before this function was called. 365 */ 366 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 367 { 368 struct device *dev = &pf->pdev->dev; 369 struct ice_hw *hw = &pf->hw; 370 371 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 372 WARN_ON(in_interrupt()); 373 374 ice_prepare_for_reset(pf); 375 376 /* trigger the reset */ 377 if (ice_reset(hw, reset_type)) { 378 dev_err(dev, "reset %d failed\n", reset_type); 379 set_bit(__ICE_RESET_FAILED, pf->state); 380 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 381 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 382 clear_bit(__ICE_PFR_REQ, pf->state); 383 clear_bit(__ICE_CORER_REQ, pf->state); 384 clear_bit(__ICE_GLOBR_REQ, pf->state); 385 return; 386 } 387 388 /* PFR is a bit of a special case because it doesn't result in an OICR 389 * interrupt. So for PFR, rebuild after the reset and clear the reset- 390 * associated state bits. 391 */ 392 if (reset_type == ICE_RESET_PFR) { 393 pf->pfr_count++; 394 ice_rebuild(pf); 395 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 396 clear_bit(__ICE_PFR_REQ, pf->state); 397 } 398 } 399 400 /** 401 * ice_reset_subtask - Set up for resetting the device and driver 402 * @pf: board private structure 403 */ 404 static void ice_reset_subtask(struct ice_pf *pf) 405 { 406 enum ice_reset_req reset_type = ICE_RESET_INVAL; 407 408 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 409 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 410 * of reset is pending and sets bits in pf->state indicating the reset 411 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set 412 * prepare for pending reset if not already (for PF software-initiated 413 * global resets the software should already be prepared for it as 414 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated 415 * by firmware or software on other PFs, that bit is not set so prepare 416 * for the reset now), poll for reset done, rebuild and return. 417 */ 418 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { 419 clear_bit(__ICE_GLOBR_RECV, pf->state); 420 clear_bit(__ICE_CORER_RECV, pf->state); 421 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) 422 ice_prepare_for_reset(pf); 423 424 /* make sure we are ready to rebuild */ 425 if (ice_check_reset(&pf->hw)) { 426 set_bit(__ICE_RESET_FAILED, pf->state); 427 } else { 428 /* done with reset. start rebuild */ 429 pf->hw.reset_ongoing = false; 430 ice_rebuild(pf); 431 /* clear bit to resume normal operations, but 432 * ICE_NEEDS_RESTART bit is set incase rebuild failed 433 */ 434 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 435 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 436 clear_bit(__ICE_PFR_REQ, pf->state); 437 clear_bit(__ICE_CORER_REQ, pf->state); 438 clear_bit(__ICE_GLOBR_REQ, pf->state); 439 } 440 441 return; 442 } 443 444 /* No pending resets to finish processing. Check for new resets */ 445 if (test_bit(__ICE_PFR_REQ, pf->state)) 446 reset_type = ICE_RESET_PFR; 447 if (test_bit(__ICE_CORER_REQ, pf->state)) 448 reset_type = ICE_RESET_CORER; 449 if (test_bit(__ICE_GLOBR_REQ, pf->state)) 450 reset_type = ICE_RESET_GLOBR; 451 /* If no valid reset type requested just return */ 452 if (reset_type == ICE_RESET_INVAL) 453 return; 454 455 /* reset if not already down or busy */ 456 if (!test_bit(__ICE_DOWN, pf->state) && 457 !test_bit(__ICE_CFG_BUSY, pf->state)) { 458 ice_do_reset(pf, reset_type); 459 } 460 } 461 462 /** 463 * ice_print_link_msg - print link up or down message 464 * @vsi: the VSI whose link status is being queried 465 * @isup: boolean for if the link is now up or down 466 */ 467 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 468 { 469 const char *speed; 470 const char *fc; 471 472 if (vsi->current_isup == isup) 473 return; 474 475 vsi->current_isup = isup; 476 477 if (!isup) { 478 netdev_info(vsi->netdev, "NIC Link is Down\n"); 479 return; 480 } 481 482 switch (vsi->port_info->phy.link_info.link_speed) { 483 case ICE_AQ_LINK_SPEED_40GB: 484 speed = "40 G"; 485 break; 486 case ICE_AQ_LINK_SPEED_25GB: 487 speed = "25 G"; 488 break; 489 case ICE_AQ_LINK_SPEED_20GB: 490 speed = "20 G"; 491 break; 492 case ICE_AQ_LINK_SPEED_10GB: 493 speed = "10 G"; 494 break; 495 case ICE_AQ_LINK_SPEED_5GB: 496 speed = "5 G"; 497 break; 498 case ICE_AQ_LINK_SPEED_2500MB: 499 speed = "2.5 G"; 500 break; 501 case ICE_AQ_LINK_SPEED_1000MB: 502 speed = "1 G"; 503 break; 504 case ICE_AQ_LINK_SPEED_100MB: 505 speed = "100 M"; 506 break; 507 default: 508 speed = "Unknown"; 509 break; 510 } 511 512 switch (vsi->port_info->fc.current_mode) { 513 case ICE_FC_FULL: 514 fc = "RX/TX"; 515 break; 516 case ICE_FC_TX_PAUSE: 517 fc = "TX"; 518 break; 519 case ICE_FC_RX_PAUSE: 520 fc = "RX"; 521 break; 522 default: 523 fc = "Unknown"; 524 break; 525 } 526 527 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", 528 speed, fc); 529 } 530 531 /** 532 * ice_vsi_link_event - update the vsi's netdev 533 * @vsi: the vsi on which the link event occurred 534 * @link_up: whether or not the vsi needs to be set up or down 535 */ 536 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 537 { 538 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 539 return; 540 541 if (vsi->type == ICE_VSI_PF) { 542 if (!vsi->netdev) { 543 dev_dbg(&vsi->back->pdev->dev, 544 "vsi->netdev is not initialized!\n"); 545 return; 546 } 547 if (link_up) { 548 netif_carrier_on(vsi->netdev); 549 netif_tx_wake_all_queues(vsi->netdev); 550 } else { 551 netif_carrier_off(vsi->netdev); 552 netif_tx_stop_all_queues(vsi->netdev); 553 } 554 } 555 } 556 557 /** 558 * ice_link_event - process the link event 559 * @pf: pf that the link event is associated with 560 * @pi: port_info for the port that the link event is associated with 561 * 562 * Returns -EIO if ice_get_link_status() fails 563 * Returns 0 on success 564 */ 565 static int 566 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) 567 { 568 u8 new_link_speed, old_link_speed; 569 struct ice_phy_info *phy_info; 570 bool new_link_same_as_old; 571 bool new_link, old_link; 572 u8 lport; 573 u16 v; 574 575 phy_info = &pi->phy; 576 phy_info->link_info_old = phy_info->link_info; 577 /* Force ice_get_link_status() to update link info */ 578 phy_info->get_link_info = true; 579 580 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 581 old_link_speed = phy_info->link_info_old.link_speed; 582 583 lport = pi->lport; 584 if (ice_get_link_status(pi, &new_link)) { 585 dev_dbg(&pf->pdev->dev, 586 "Could not get link status for port %d\n", lport); 587 return -EIO; 588 } 589 590 new_link_speed = phy_info->link_info.link_speed; 591 592 new_link_same_as_old = (new_link == old_link && 593 new_link_speed == old_link_speed); 594 595 ice_for_each_vsi(pf, v) { 596 struct ice_vsi *vsi = pf->vsi[v]; 597 598 if (!vsi || !vsi->port_info) 599 continue; 600 601 if (new_link_same_as_old && 602 (test_bit(__ICE_DOWN, vsi->state) || 603 new_link == netif_carrier_ok(vsi->netdev))) 604 continue; 605 606 if (vsi->port_info->lport == lport) { 607 ice_print_link_msg(vsi, new_link); 608 ice_vsi_link_event(vsi, new_link); 609 } 610 } 611 612 ice_vc_notify_link_state(pf); 613 614 return 0; 615 } 616 617 /** 618 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 619 * @pf: board private structure 620 */ 621 static void ice_watchdog_subtask(struct ice_pf *pf) 622 { 623 int i; 624 625 /* if interface is down do nothing */ 626 if (test_bit(__ICE_DOWN, pf->state) || 627 test_bit(__ICE_CFG_BUSY, pf->state)) 628 return; 629 630 /* make sure we don't do these things too often */ 631 if (time_before(jiffies, 632 pf->serv_tmr_prev + pf->serv_tmr_period)) 633 return; 634 635 pf->serv_tmr_prev = jiffies; 636 637 if (ice_link_event(pf, pf->hw.port_info)) 638 dev_dbg(&pf->pdev->dev, "ice_link_event failed\n"); 639 640 /* Update the stats for active netdevs so the network stack 641 * can look at updated numbers whenever it cares to 642 */ 643 ice_update_pf_stats(pf); 644 for (i = 0; i < pf->num_alloc_vsi; i++) 645 if (pf->vsi[i] && pf->vsi[i]->netdev) 646 ice_update_vsi_stats(pf->vsi[i]); 647 } 648 649 /** 650 * __ice_clean_ctrlq - helper function to clean controlq rings 651 * @pf: ptr to struct ice_pf 652 * @q_type: specific Control queue type 653 */ 654 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 655 { 656 struct ice_rq_event_info event; 657 struct ice_hw *hw = &pf->hw; 658 struct ice_ctl_q_info *cq; 659 u16 pending, i = 0; 660 const char *qtype; 661 u32 oldval, val; 662 663 /* Do not clean control queue if/when PF reset fails */ 664 if (test_bit(__ICE_RESET_FAILED, pf->state)) 665 return 0; 666 667 switch (q_type) { 668 case ICE_CTL_Q_ADMIN: 669 cq = &hw->adminq; 670 qtype = "Admin"; 671 break; 672 case ICE_CTL_Q_MAILBOX: 673 cq = &hw->mailboxq; 674 qtype = "Mailbox"; 675 break; 676 default: 677 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", 678 q_type); 679 return 0; 680 } 681 682 /* check for error indications - PF_xx_AxQLEN register layout for 683 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 684 */ 685 val = rd32(hw, cq->rq.len); 686 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 687 PF_FW_ARQLEN_ARQCRIT_M)) { 688 oldval = val; 689 if (val & PF_FW_ARQLEN_ARQVFE_M) 690 dev_dbg(&pf->pdev->dev, 691 "%s Receive Queue VF Error detected\n", qtype); 692 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 693 dev_dbg(&pf->pdev->dev, 694 "%s Receive Queue Overflow Error detected\n", 695 qtype); 696 } 697 if (val & PF_FW_ARQLEN_ARQCRIT_M) 698 dev_dbg(&pf->pdev->dev, 699 "%s Receive Queue Critical Error detected\n", 700 qtype); 701 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 702 PF_FW_ARQLEN_ARQCRIT_M); 703 if (oldval != val) 704 wr32(hw, cq->rq.len, val); 705 } 706 707 val = rd32(hw, cq->sq.len); 708 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 709 PF_FW_ATQLEN_ATQCRIT_M)) { 710 oldval = val; 711 if (val & PF_FW_ATQLEN_ATQVFE_M) 712 dev_dbg(&pf->pdev->dev, 713 "%s Send Queue VF Error detected\n", qtype); 714 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 715 dev_dbg(&pf->pdev->dev, 716 "%s Send Queue Overflow Error detected\n", 717 qtype); 718 } 719 if (val & PF_FW_ATQLEN_ATQCRIT_M) 720 dev_dbg(&pf->pdev->dev, 721 "%s Send Queue Critical Error detected\n", 722 qtype); 723 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 724 PF_FW_ATQLEN_ATQCRIT_M); 725 if (oldval != val) 726 wr32(hw, cq->sq.len, val); 727 } 728 729 event.buf_len = cq->rq_buf_size; 730 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, 731 GFP_KERNEL); 732 if (!event.msg_buf) 733 return 0; 734 735 do { 736 enum ice_status ret; 737 u16 opcode; 738 739 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 740 if (ret == ICE_ERR_AQ_NO_WORK) 741 break; 742 if (ret) { 743 dev_err(&pf->pdev->dev, 744 "%s Receive Queue event error %d\n", qtype, 745 ret); 746 break; 747 } 748 749 opcode = le16_to_cpu(event.desc.opcode); 750 751 switch (opcode) { 752 case ice_mbx_opc_send_msg_to_pf: 753 ice_vc_process_vf_msg(pf, &event); 754 break; 755 case ice_aqc_opc_fw_logging: 756 ice_output_fw_log(hw, &event.desc, event.msg_buf); 757 break; 758 default: 759 dev_dbg(&pf->pdev->dev, 760 "%s Receive Queue unknown event 0x%04x ignored\n", 761 qtype, opcode); 762 break; 763 } 764 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 765 766 devm_kfree(&pf->pdev->dev, event.msg_buf); 767 768 return pending && (i == ICE_DFLT_IRQ_WORK); 769 } 770 771 /** 772 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 773 * @hw: pointer to hardware info 774 * @cq: control queue information 775 * 776 * returns true if there are pending messages in a queue, false if there aren't 777 */ 778 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 779 { 780 u16 ntu; 781 782 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 783 return cq->rq.next_to_clean != ntu; 784 } 785 786 /** 787 * ice_clean_adminq_subtask - clean the AdminQ rings 788 * @pf: board private structure 789 */ 790 static void ice_clean_adminq_subtask(struct ice_pf *pf) 791 { 792 struct ice_hw *hw = &pf->hw; 793 794 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 795 return; 796 797 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 798 return; 799 800 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 801 802 /* There might be a situation where new messages arrive to a control 803 * queue between processing the last message and clearing the 804 * EVENT_PENDING bit. So before exiting, check queue head again (using 805 * ice_ctrlq_pending) and process new messages if any. 806 */ 807 if (ice_ctrlq_pending(hw, &hw->adminq)) 808 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 809 810 ice_flush(hw); 811 } 812 813 /** 814 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 815 * @pf: board private structure 816 */ 817 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 818 { 819 struct ice_hw *hw = &pf->hw; 820 821 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 822 return; 823 824 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 825 return; 826 827 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 828 829 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 830 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 831 832 ice_flush(hw); 833 } 834 835 /** 836 * ice_service_task_schedule - schedule the service task to wake up 837 * @pf: board private structure 838 * 839 * If not already scheduled, this puts the task into the work queue. 840 */ 841 static void ice_service_task_schedule(struct ice_pf *pf) 842 { 843 if (!test_bit(__ICE_SERVICE_DIS, pf->state) && 844 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && 845 !test_bit(__ICE_NEEDS_RESTART, pf->state)) 846 queue_work(ice_wq, &pf->serv_task); 847 } 848 849 /** 850 * ice_service_task_complete - finish up the service task 851 * @pf: board private structure 852 */ 853 static void ice_service_task_complete(struct ice_pf *pf) 854 { 855 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); 856 857 /* force memory (pf->state) to sync before next service task */ 858 smp_mb__before_atomic(); 859 clear_bit(__ICE_SERVICE_SCHED, pf->state); 860 } 861 862 /** 863 * ice_service_task_stop - stop service task and cancel works 864 * @pf: board private structure 865 */ 866 static void ice_service_task_stop(struct ice_pf *pf) 867 { 868 set_bit(__ICE_SERVICE_DIS, pf->state); 869 870 if (pf->serv_tmr.function) 871 del_timer_sync(&pf->serv_tmr); 872 if (pf->serv_task.func) 873 cancel_work_sync(&pf->serv_task); 874 875 clear_bit(__ICE_SERVICE_SCHED, pf->state); 876 } 877 878 /** 879 * ice_service_timer - timer callback to schedule service task 880 * @t: pointer to timer_list 881 */ 882 static void ice_service_timer(struct timer_list *t) 883 { 884 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 885 886 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 887 ice_service_task_schedule(pf); 888 } 889 890 /** 891 * ice_handle_mdd_event - handle malicious driver detect event 892 * @pf: pointer to the PF structure 893 * 894 * Called from service task. OICR interrupt handler indicates MDD event 895 */ 896 static void ice_handle_mdd_event(struct ice_pf *pf) 897 { 898 struct ice_hw *hw = &pf->hw; 899 bool mdd_detected = false; 900 u32 reg; 901 int i; 902 903 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) 904 return; 905 906 /* find what triggered the MDD event */ 907 reg = rd32(hw, GL_MDET_TX_PQM); 908 if (reg & GL_MDET_TX_PQM_VALID_M) { 909 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 910 GL_MDET_TX_PQM_PF_NUM_S; 911 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 912 GL_MDET_TX_PQM_VF_NUM_S; 913 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 914 GL_MDET_TX_PQM_MAL_TYPE_S; 915 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 916 GL_MDET_TX_PQM_QNUM_S); 917 918 if (netif_msg_tx_err(pf)) 919 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 920 event, queue, pf_num, vf_num); 921 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 922 mdd_detected = true; 923 } 924 925 reg = rd32(hw, GL_MDET_TX_TCLAN); 926 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 927 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 928 GL_MDET_TX_TCLAN_PF_NUM_S; 929 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 930 GL_MDET_TX_TCLAN_VF_NUM_S; 931 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 932 GL_MDET_TX_TCLAN_MAL_TYPE_S; 933 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 934 GL_MDET_TX_TCLAN_QNUM_S); 935 936 if (netif_msg_rx_err(pf)) 937 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 938 event, queue, pf_num, vf_num); 939 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 940 mdd_detected = true; 941 } 942 943 reg = rd32(hw, GL_MDET_RX); 944 if (reg & GL_MDET_RX_VALID_M) { 945 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 946 GL_MDET_RX_PF_NUM_S; 947 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 948 GL_MDET_RX_VF_NUM_S; 949 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 950 GL_MDET_RX_MAL_TYPE_S; 951 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 952 GL_MDET_RX_QNUM_S); 953 954 if (netif_msg_rx_err(pf)) 955 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 956 event, queue, pf_num, vf_num); 957 wr32(hw, GL_MDET_RX, 0xffffffff); 958 mdd_detected = true; 959 } 960 961 if (mdd_detected) { 962 bool pf_mdd_detected = false; 963 964 reg = rd32(hw, PF_MDET_TX_PQM); 965 if (reg & PF_MDET_TX_PQM_VALID_M) { 966 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 967 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 968 pf_mdd_detected = true; 969 } 970 971 reg = rd32(hw, PF_MDET_TX_TCLAN); 972 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 973 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 974 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 975 pf_mdd_detected = true; 976 } 977 978 reg = rd32(hw, PF_MDET_RX); 979 if (reg & PF_MDET_RX_VALID_M) { 980 wr32(hw, PF_MDET_RX, 0xFFFF); 981 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 982 pf_mdd_detected = true; 983 } 984 /* Queue belongs to the PF initiate a reset */ 985 if (pf_mdd_detected) { 986 set_bit(__ICE_NEEDS_RESTART, pf->state); 987 ice_service_task_schedule(pf); 988 } 989 } 990 991 /* see if one of the VFs needs to be reset */ 992 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 993 struct ice_vf *vf = &pf->vf[i]; 994 995 reg = rd32(hw, VP_MDET_TX_PQM(i)); 996 if (reg & VP_MDET_TX_PQM_VALID_M) { 997 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 998 vf->num_mdd_events++; 999 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1000 i); 1001 } 1002 1003 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1004 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1005 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1006 vf->num_mdd_events++; 1007 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1008 i); 1009 } 1010 1011 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1012 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1013 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1014 vf->num_mdd_events++; 1015 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1016 i); 1017 } 1018 1019 reg = rd32(hw, VP_MDET_RX(i)); 1020 if (reg & VP_MDET_RX_VALID_M) { 1021 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1022 vf->num_mdd_events++; 1023 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 1024 i); 1025 } 1026 1027 if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { 1028 dev_info(&pf->pdev->dev, 1029 "Too many MDD events on VF %d, disabled\n", i); 1030 dev_info(&pf->pdev->dev, 1031 "Use PF Control I/F to re-enable the VF\n"); 1032 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 1033 } 1034 } 1035 1036 /* re-enable MDD interrupt cause */ 1037 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1038 reg = rd32(hw, PFINT_OICR_ENA); 1039 reg |= PFINT_OICR_MAL_DETECT_M; 1040 wr32(hw, PFINT_OICR_ENA, reg); 1041 ice_flush(hw); 1042 } 1043 1044 /** 1045 * ice_service_task - manage and run subtasks 1046 * @work: pointer to work_struct contained by the PF struct 1047 */ 1048 static void ice_service_task(struct work_struct *work) 1049 { 1050 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 1051 unsigned long start_time = jiffies; 1052 1053 /* subtasks */ 1054 1055 /* process reset requests first */ 1056 ice_reset_subtask(pf); 1057 1058 /* bail if a reset/recovery cycle is pending or rebuild failed */ 1059 if (ice_is_reset_in_progress(pf->state) || 1060 test_bit(__ICE_SUSPENDED, pf->state) || 1061 test_bit(__ICE_NEEDS_RESTART, pf->state)) { 1062 ice_service_task_complete(pf); 1063 return; 1064 } 1065 1066 ice_check_for_hang_subtask(pf); 1067 ice_sync_fltr_subtask(pf); 1068 ice_handle_mdd_event(pf); 1069 ice_process_vflr_event(pf); 1070 ice_watchdog_subtask(pf); 1071 ice_clean_adminq_subtask(pf); 1072 ice_clean_mailboxq_subtask(pf); 1073 1074 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1075 ice_service_task_complete(pf); 1076 1077 /* If the tasks have taken longer than one service timer period 1078 * or there is more work to be done, reset the service timer to 1079 * schedule the service task now. 1080 */ 1081 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 1082 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || 1083 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || 1084 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 1085 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1086 mod_timer(&pf->serv_tmr, jiffies); 1087 } 1088 1089 /** 1090 * ice_set_ctrlq_len - helper function to set controlq length 1091 * @hw: pointer to the hw instance 1092 */ 1093 static void ice_set_ctrlq_len(struct ice_hw *hw) 1094 { 1095 hw->adminq.num_rq_entries = ICE_AQ_LEN; 1096 hw->adminq.num_sq_entries = ICE_AQ_LEN; 1097 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 1098 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 1099 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 1100 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 1101 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1102 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1103 } 1104 1105 /** 1106 * ice_irq_affinity_notify - Callback for affinity changes 1107 * @notify: context as to what irq was changed 1108 * @mask: the new affinity mask 1109 * 1110 * This is a callback function used by the irq_set_affinity_notifier function 1111 * so that we may register to receive changes to the irq affinity masks. 1112 */ 1113 static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, 1114 const cpumask_t *mask) 1115 { 1116 struct ice_q_vector *q_vector = 1117 container_of(notify, struct ice_q_vector, affinity_notify); 1118 1119 cpumask_copy(&q_vector->affinity_mask, mask); 1120 } 1121 1122 /** 1123 * ice_irq_affinity_release - Callback for affinity notifier release 1124 * @ref: internal core kernel usage 1125 * 1126 * This is a callback function used by the irq_set_affinity_notifier function 1127 * to inform the current notification subscriber that they will no longer 1128 * receive notifications. 1129 */ 1130 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 1131 1132 /** 1133 * ice_vsi_ena_irq - Enable IRQ for the given VSI 1134 * @vsi: the VSI being configured 1135 */ 1136 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 1137 { 1138 struct ice_pf *pf = vsi->back; 1139 struct ice_hw *hw = &pf->hw; 1140 1141 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 1142 int i; 1143 1144 for (i = 0; i < vsi->num_q_vectors; i++) 1145 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 1146 } 1147 1148 ice_flush(hw); 1149 return 0; 1150 } 1151 1152 /** 1153 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 1154 * @vsi: the VSI being configured 1155 * @basename: name for the vector 1156 */ 1157 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 1158 { 1159 int q_vectors = vsi->num_q_vectors; 1160 struct ice_pf *pf = vsi->back; 1161 int base = vsi->sw_base_vector; 1162 int rx_int_idx = 0; 1163 int tx_int_idx = 0; 1164 int vector, err; 1165 int irq_num; 1166 1167 for (vector = 0; vector < q_vectors; vector++) { 1168 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 1169 1170 irq_num = pf->msix_entries[base + vector].vector; 1171 1172 if (q_vector->tx.ring && q_vector->rx.ring) { 1173 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1174 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 1175 tx_int_idx++; 1176 } else if (q_vector->rx.ring) { 1177 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1178 "%s-%s-%d", basename, "rx", rx_int_idx++); 1179 } else if (q_vector->tx.ring) { 1180 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1181 "%s-%s-%d", basename, "tx", tx_int_idx++); 1182 } else { 1183 /* skip this unused q_vector */ 1184 continue; 1185 } 1186 err = devm_request_irq(&pf->pdev->dev, 1187 pf->msix_entries[base + vector].vector, 1188 vsi->irq_handler, 0, q_vector->name, 1189 q_vector); 1190 if (err) { 1191 netdev_err(vsi->netdev, 1192 "MSIX request_irq failed, error: %d\n", err); 1193 goto free_q_irqs; 1194 } 1195 1196 /* register for affinity change notifications */ 1197 q_vector->affinity_notify.notify = ice_irq_affinity_notify; 1198 q_vector->affinity_notify.release = ice_irq_affinity_release; 1199 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 1200 1201 /* assign the mask for this irq */ 1202 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 1203 } 1204 1205 vsi->irqs_ready = true; 1206 return 0; 1207 1208 free_q_irqs: 1209 while (vector) { 1210 vector--; 1211 irq_num = pf->msix_entries[base + vector].vector, 1212 irq_set_affinity_notifier(irq_num, NULL); 1213 irq_set_affinity_hint(irq_num, NULL); 1214 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); 1215 } 1216 return err; 1217 } 1218 1219 /** 1220 * ice_ena_misc_vector - enable the non-queue interrupts 1221 * @pf: board private structure 1222 */ 1223 static void ice_ena_misc_vector(struct ice_pf *pf) 1224 { 1225 struct ice_hw *hw = &pf->hw; 1226 u32 val; 1227 1228 /* clear things first */ 1229 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1230 rd32(hw, PFINT_OICR); /* read to clear */ 1231 1232 val = (PFINT_OICR_ECC_ERR_M | 1233 PFINT_OICR_MAL_DETECT_M | 1234 PFINT_OICR_GRST_M | 1235 PFINT_OICR_PCI_EXCEPTION_M | 1236 PFINT_OICR_VFLR_M | 1237 PFINT_OICR_HMC_ERR_M | 1238 PFINT_OICR_PE_CRITERR_M); 1239 1240 wr32(hw, PFINT_OICR_ENA, val); 1241 1242 /* SW_ITR_IDX = 0, but don't change INTENA */ 1243 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), 1244 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 1245 } 1246 1247 /** 1248 * ice_misc_intr - misc interrupt handler 1249 * @irq: interrupt number 1250 * @data: pointer to a q_vector 1251 */ 1252 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 1253 { 1254 struct ice_pf *pf = (struct ice_pf *)data; 1255 struct ice_hw *hw = &pf->hw; 1256 irqreturn_t ret = IRQ_NONE; 1257 u32 oicr, ena_mask; 1258 1259 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 1260 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1261 1262 oicr = rd32(hw, PFINT_OICR); 1263 ena_mask = rd32(hw, PFINT_OICR_ENA); 1264 1265 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1266 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 1267 set_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1268 } 1269 if (oicr & PFINT_OICR_VFLR_M) { 1270 ena_mask &= ~PFINT_OICR_VFLR_M; 1271 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); 1272 } 1273 1274 if (oicr & PFINT_OICR_GRST_M) { 1275 u32 reset; 1276 1277 /* we have a reset warning */ 1278 ena_mask &= ~PFINT_OICR_GRST_M; 1279 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 1280 GLGEN_RSTAT_RESET_TYPE_S; 1281 1282 if (reset == ICE_RESET_CORER) 1283 pf->corer_count++; 1284 else if (reset == ICE_RESET_GLOBR) 1285 pf->globr_count++; 1286 else if (reset == ICE_RESET_EMPR) 1287 pf->empr_count++; 1288 else 1289 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", 1290 reset); 1291 1292 /* If a reset cycle isn't already in progress, we set a bit in 1293 * pf->state so that the service task can start a reset/rebuild. 1294 * We also make note of which reset happened so that peer 1295 * devices/drivers can be informed. 1296 */ 1297 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { 1298 if (reset == ICE_RESET_CORER) 1299 set_bit(__ICE_CORER_RECV, pf->state); 1300 else if (reset == ICE_RESET_GLOBR) 1301 set_bit(__ICE_GLOBR_RECV, pf->state); 1302 else 1303 set_bit(__ICE_EMPR_RECV, pf->state); 1304 1305 /* There are couple of different bits at play here. 1306 * hw->reset_ongoing indicates whether the hardware is 1307 * in reset. This is set to true when a reset interrupt 1308 * is received and set back to false after the driver 1309 * has determined that the hardware is out of reset. 1310 * 1311 * __ICE_RESET_OICR_RECV in pf->state indicates 1312 * that a post reset rebuild is required before the 1313 * driver is operational again. This is set above. 1314 * 1315 * As this is the start of the reset/rebuild cycle, set 1316 * both to indicate that. 1317 */ 1318 hw->reset_ongoing = true; 1319 } 1320 } 1321 1322 if (oicr & PFINT_OICR_HMC_ERR_M) { 1323 ena_mask &= ~PFINT_OICR_HMC_ERR_M; 1324 dev_dbg(&pf->pdev->dev, 1325 "HMC Error interrupt - info 0x%x, data 0x%x\n", 1326 rd32(hw, PFHMC_ERRORINFO), 1327 rd32(hw, PFHMC_ERRORDATA)); 1328 } 1329 1330 /* Report and mask off any remaining unexpected interrupts */ 1331 oicr &= ena_mask; 1332 if (oicr) { 1333 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", 1334 oicr); 1335 /* If a critical error is pending there is no choice but to 1336 * reset the device. 1337 */ 1338 if (oicr & (PFINT_OICR_PE_CRITERR_M | 1339 PFINT_OICR_PCI_EXCEPTION_M | 1340 PFINT_OICR_ECC_ERR_M)) { 1341 set_bit(__ICE_PFR_REQ, pf->state); 1342 ice_service_task_schedule(pf); 1343 } 1344 ena_mask &= ~oicr; 1345 } 1346 ret = IRQ_HANDLED; 1347 1348 /* re-enable interrupt causes that are not handled during this pass */ 1349 wr32(hw, PFINT_OICR_ENA, ena_mask); 1350 if (!test_bit(__ICE_DOWN, pf->state)) { 1351 ice_service_task_schedule(pf); 1352 ice_irq_dynamic_ena(hw, NULL, NULL); 1353 } 1354 1355 return ret; 1356 } 1357 1358 /** 1359 * ice_free_irq_msix_misc - Unroll misc vector setup 1360 * @pf: board private structure 1361 */ 1362 static void ice_free_irq_msix_misc(struct ice_pf *pf) 1363 { 1364 /* disable OICR interrupt */ 1365 wr32(&pf->hw, PFINT_OICR_ENA, 0); 1366 ice_flush(&pf->hw); 1367 1368 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { 1369 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); 1370 devm_free_irq(&pf->pdev->dev, 1371 pf->msix_entries[pf->sw_oicr_idx].vector, pf); 1372 } 1373 1374 pf->num_avail_sw_msix += 1; 1375 ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); 1376 pf->num_avail_hw_msix += 1; 1377 ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); 1378 } 1379 1380 /** 1381 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 1382 * @pf: board private structure 1383 * 1384 * This sets up the handler for MSIX 0, which is used to manage the 1385 * non-queue interrupts, e.g. AdminQ and errors. This is not used 1386 * when in MSI or Legacy interrupt mode. 1387 */ 1388 static int ice_req_irq_msix_misc(struct ice_pf *pf) 1389 { 1390 struct ice_hw *hw = &pf->hw; 1391 int oicr_idx, err = 0; 1392 u8 itr_gran; 1393 u32 val; 1394 1395 if (!pf->int_name[0]) 1396 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 1397 dev_driver_string(&pf->pdev->dev), 1398 dev_name(&pf->pdev->dev)); 1399 1400 /* Do not request IRQ but do enable OICR interrupt since settings are 1401 * lost during reset. Note that this function is called only during 1402 * rebuild path and not while reset is in progress. 1403 */ 1404 if (ice_is_reset_in_progress(pf->state)) 1405 goto skip_req_irq; 1406 1407 /* reserve one vector in sw_irq_tracker for misc interrupts */ 1408 oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1409 if (oicr_idx < 0) 1410 return oicr_idx; 1411 1412 pf->num_avail_sw_msix -= 1; 1413 pf->sw_oicr_idx = oicr_idx; 1414 1415 /* reserve one vector in hw_irq_tracker for misc interrupts */ 1416 oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1417 if (oicr_idx < 0) { 1418 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1419 pf->num_avail_sw_msix += 1; 1420 return oicr_idx; 1421 } 1422 pf->num_avail_hw_msix -= 1; 1423 pf->hw_oicr_idx = oicr_idx; 1424 1425 err = devm_request_irq(&pf->pdev->dev, 1426 pf->msix_entries[pf->sw_oicr_idx].vector, 1427 ice_misc_intr, 0, pf->int_name, pf); 1428 if (err) { 1429 dev_err(&pf->pdev->dev, 1430 "devm_request_irq for %s failed: %d\n", 1431 pf->int_name, err); 1432 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1433 pf->num_avail_sw_msix += 1; 1434 ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1435 pf->num_avail_hw_msix += 1; 1436 return err; 1437 } 1438 1439 skip_req_irq: 1440 ice_ena_misc_vector(pf); 1441 1442 val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 1443 PFINT_OICR_CTL_CAUSE_ENA_M); 1444 wr32(hw, PFINT_OICR_CTL, val); 1445 1446 /* This enables Admin queue Interrupt causes */ 1447 val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 1448 PFINT_FW_CTL_CAUSE_ENA_M); 1449 wr32(hw, PFINT_FW_CTL, val); 1450 1451 /* This enables Mailbox queue Interrupt causes */ 1452 val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 1453 PFINT_MBX_CTL_CAUSE_ENA_M); 1454 wr32(hw, PFINT_MBX_CTL, val); 1455 1456 itr_gran = hw->itr_gran; 1457 1458 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), 1459 ITR_TO_REG(ICE_ITR_8K, itr_gran)); 1460 1461 ice_flush(hw); 1462 ice_irq_dynamic_ena(hw, NULL, NULL); 1463 1464 return 0; 1465 } 1466 1467 /** 1468 * ice_napi_del - Remove NAPI handler for the VSI 1469 * @vsi: VSI for which NAPI handler is to be removed 1470 */ 1471 void ice_napi_del(struct ice_vsi *vsi) 1472 { 1473 int v_idx; 1474 1475 if (!vsi->netdev) 1476 return; 1477 1478 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1479 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 1480 } 1481 1482 /** 1483 * ice_napi_add - register NAPI handler for the VSI 1484 * @vsi: VSI for which NAPI handler is to be registered 1485 * 1486 * This function is only called in the driver's load path. Registering the NAPI 1487 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 1488 * reset/rebuild, etc.) 1489 */ 1490 static void ice_napi_add(struct ice_vsi *vsi) 1491 { 1492 int v_idx; 1493 1494 if (!vsi->netdev) 1495 return; 1496 1497 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1498 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 1499 ice_napi_poll, NAPI_POLL_WEIGHT); 1500 } 1501 1502 /** 1503 * ice_cfg_netdev - Allocate, configure and register a netdev 1504 * @vsi: the VSI associated with the new netdev 1505 * 1506 * Returns 0 on success, negative value on failure 1507 */ 1508 static int ice_cfg_netdev(struct ice_vsi *vsi) 1509 { 1510 netdev_features_t csumo_features; 1511 netdev_features_t vlano_features; 1512 netdev_features_t dflt_features; 1513 netdev_features_t tso_features; 1514 struct ice_netdev_priv *np; 1515 struct net_device *netdev; 1516 u8 mac_addr[ETH_ALEN]; 1517 int err; 1518 1519 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), 1520 vsi->alloc_txq, vsi->alloc_rxq); 1521 if (!netdev) 1522 return -ENOMEM; 1523 1524 vsi->netdev = netdev; 1525 np = netdev_priv(netdev); 1526 np->vsi = vsi; 1527 1528 dflt_features = NETIF_F_SG | 1529 NETIF_F_HIGHDMA | 1530 NETIF_F_RXHASH; 1531 1532 csumo_features = NETIF_F_RXCSUM | 1533 NETIF_F_IP_CSUM | 1534 NETIF_F_IPV6_CSUM; 1535 1536 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 1537 NETIF_F_HW_VLAN_CTAG_TX | 1538 NETIF_F_HW_VLAN_CTAG_RX; 1539 1540 tso_features = NETIF_F_TSO; 1541 1542 /* set features that user can change */ 1543 netdev->hw_features = dflt_features | csumo_features | 1544 vlano_features | tso_features; 1545 1546 /* enable features */ 1547 netdev->features |= netdev->hw_features; 1548 /* encap and VLAN devices inherit default, csumo and tso features */ 1549 netdev->hw_enc_features |= dflt_features | csumo_features | 1550 tso_features; 1551 netdev->vlan_features |= dflt_features | csumo_features | 1552 tso_features; 1553 1554 if (vsi->type == ICE_VSI_PF) { 1555 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); 1556 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 1557 1558 ether_addr_copy(netdev->dev_addr, mac_addr); 1559 ether_addr_copy(netdev->perm_addr, mac_addr); 1560 } 1561 1562 netdev->priv_flags |= IFF_UNICAST_FLT; 1563 1564 /* assign netdev_ops */ 1565 netdev->netdev_ops = &ice_netdev_ops; 1566 1567 /* setup watchdog timeout value to be 5 second */ 1568 netdev->watchdog_timeo = 5 * HZ; 1569 1570 ice_set_ethtool_ops(netdev); 1571 1572 netdev->min_mtu = ETH_MIN_MTU; 1573 netdev->max_mtu = ICE_MAX_MTU; 1574 1575 err = register_netdev(vsi->netdev); 1576 if (err) 1577 return err; 1578 1579 netif_carrier_off(vsi->netdev); 1580 1581 /* make sure transmit queues start off as stopped */ 1582 netif_tx_stop_all_queues(vsi->netdev); 1583 1584 return 0; 1585 } 1586 1587 /** 1588 * ice_fill_rss_lut - Fill the RSS lookup table with default values 1589 * @lut: Lookup table 1590 * @rss_table_size: Lookup table size 1591 * @rss_size: Range of queue number for hashing 1592 */ 1593 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 1594 { 1595 u16 i; 1596 1597 for (i = 0; i < rss_table_size; i++) 1598 lut[i] = i % rss_size; 1599 } 1600 1601 /** 1602 * ice_pf_vsi_setup - Set up a PF VSI 1603 * @pf: board private structure 1604 * @pi: pointer to the port_info instance 1605 * 1606 * Returns pointer to the successfully allocated VSI sw struct on success, 1607 * otherwise returns NULL on failure. 1608 */ 1609 static struct ice_vsi * 1610 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 1611 { 1612 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 1613 } 1614 1615 /** 1616 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload 1617 * @netdev: network interface to be adjusted 1618 * @proto: unused protocol 1619 * @vid: vlan id to be added 1620 * 1621 * net_device_ops implementation for adding vlan ids 1622 */ 1623 static int ice_vlan_rx_add_vid(struct net_device *netdev, 1624 __always_unused __be16 proto, u16 vid) 1625 { 1626 struct ice_netdev_priv *np = netdev_priv(netdev); 1627 struct ice_vsi *vsi = np->vsi; 1628 1629 if (vid >= VLAN_N_VID) { 1630 netdev_err(netdev, "VLAN id requested %d is out of range %d\n", 1631 vid, VLAN_N_VID); 1632 return -EINVAL; 1633 } 1634 1635 if (vsi->info.pvid) 1636 return -EINVAL; 1637 1638 /* Enable VLAN pruning when VLAN 0 is added */ 1639 if (unlikely(!vid)) { 1640 int ret = ice_cfg_vlan_pruning(vsi, true); 1641 1642 if (ret) 1643 return ret; 1644 } 1645 1646 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is 1647 * needed to continue allowing all untagged packets since VLAN prune 1648 * list is applied to all packets by the switch 1649 */ 1650 return ice_vsi_add_vlan(vsi, vid); 1651 } 1652 1653 /** 1654 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1655 * @netdev: network interface to be adjusted 1656 * @proto: unused protocol 1657 * @vid: vlan id to be removed 1658 * 1659 * net_device_ops implementation for removing vlan ids 1660 */ 1661 static int ice_vlan_rx_kill_vid(struct net_device *netdev, 1662 __always_unused __be16 proto, u16 vid) 1663 { 1664 struct ice_netdev_priv *np = netdev_priv(netdev); 1665 struct ice_vsi *vsi = np->vsi; 1666 int status; 1667 1668 if (vsi->info.pvid) 1669 return -EINVAL; 1670 1671 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 1672 * information 1673 */ 1674 status = ice_vsi_kill_vlan(vsi, vid); 1675 if (status) 1676 return status; 1677 1678 /* Disable VLAN pruning when VLAN 0 is removed */ 1679 if (unlikely(!vid)) 1680 status = ice_cfg_vlan_pruning(vsi, false); 1681 1682 return status; 1683 } 1684 1685 /** 1686 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 1687 * @pf: board private structure 1688 * 1689 * Returns 0 on success, negative value on failure 1690 */ 1691 static int ice_setup_pf_sw(struct ice_pf *pf) 1692 { 1693 LIST_HEAD(tmp_add_list); 1694 u8 broadcast[ETH_ALEN]; 1695 struct ice_vsi *vsi; 1696 int status = 0; 1697 1698 if (ice_is_reset_in_progress(pf->state)) 1699 return -EBUSY; 1700 1701 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 1702 if (!vsi) { 1703 status = -ENOMEM; 1704 goto unroll_vsi_setup; 1705 } 1706 1707 status = ice_cfg_netdev(vsi); 1708 if (status) { 1709 status = -ENODEV; 1710 goto unroll_vsi_setup; 1711 } 1712 1713 /* registering the NAPI handler requires both the queues and 1714 * netdev to be created, which are done in ice_pf_vsi_setup() 1715 * and ice_cfg_netdev() respectively 1716 */ 1717 ice_napi_add(vsi); 1718 1719 /* To add a MAC filter, first add the MAC to a list and then 1720 * pass the list to ice_add_mac. 1721 */ 1722 1723 /* Add a unicast MAC filter so the VSI can get its packets */ 1724 status = ice_add_mac_to_list(vsi, &tmp_add_list, 1725 vsi->port_info->mac.perm_addr); 1726 if (status) 1727 goto unroll_napi_add; 1728 1729 /* VSI needs to receive broadcast traffic, so add the broadcast 1730 * MAC address to the list as well. 1731 */ 1732 eth_broadcast_addr(broadcast); 1733 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); 1734 if (status) 1735 goto free_mac_list; 1736 1737 /* program MAC filters for entries in tmp_add_list */ 1738 status = ice_add_mac(&pf->hw, &tmp_add_list); 1739 if (status) { 1740 dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); 1741 status = -ENOMEM; 1742 goto free_mac_list; 1743 } 1744 1745 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1746 return status; 1747 1748 free_mac_list: 1749 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1750 1751 unroll_napi_add: 1752 if (vsi) { 1753 ice_napi_del(vsi); 1754 if (vsi->netdev) { 1755 if (vsi->netdev->reg_state == NETREG_REGISTERED) 1756 unregister_netdev(vsi->netdev); 1757 free_netdev(vsi->netdev); 1758 vsi->netdev = NULL; 1759 } 1760 } 1761 1762 unroll_vsi_setup: 1763 if (vsi) { 1764 ice_vsi_free_q_vectors(vsi); 1765 ice_vsi_delete(vsi); 1766 ice_vsi_put_qs(vsi); 1767 pf->q_left_tx += vsi->alloc_txq; 1768 pf->q_left_rx += vsi->alloc_rxq; 1769 ice_vsi_clear(vsi); 1770 } 1771 return status; 1772 } 1773 1774 /** 1775 * ice_determine_q_usage - Calculate queue distribution 1776 * @pf: board private structure 1777 * 1778 * Return -ENOMEM if we don't get enough queues for all ports 1779 */ 1780 static void ice_determine_q_usage(struct ice_pf *pf) 1781 { 1782 u16 q_left_tx, q_left_rx; 1783 1784 q_left_tx = pf->hw.func_caps.common_cap.num_txq; 1785 q_left_rx = pf->hw.func_caps.common_cap.num_rxq; 1786 1787 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); 1788 1789 /* only 1 Rx queue unless RSS is enabled */ 1790 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 1791 pf->num_lan_rx = 1; 1792 else 1793 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); 1794 1795 pf->q_left_tx = q_left_tx - pf->num_lan_tx; 1796 pf->q_left_rx = q_left_rx - pf->num_lan_rx; 1797 } 1798 1799 /** 1800 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 1801 * @pf: board private structure to initialize 1802 */ 1803 static void ice_deinit_pf(struct ice_pf *pf) 1804 { 1805 ice_service_task_stop(pf); 1806 mutex_destroy(&pf->sw_mutex); 1807 mutex_destroy(&pf->avail_q_mutex); 1808 } 1809 1810 /** 1811 * ice_init_pf - Initialize general software structures (struct ice_pf) 1812 * @pf: board private structure to initialize 1813 */ 1814 static void ice_init_pf(struct ice_pf *pf) 1815 { 1816 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); 1817 set_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1818 #ifdef CONFIG_PCI_IOV 1819 if (pf->hw.func_caps.common_cap.sr_iov_1_1) { 1820 struct ice_hw *hw = &pf->hw; 1821 1822 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 1823 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, 1824 ICE_MAX_VF_COUNT); 1825 } 1826 #endif /* CONFIG_PCI_IOV */ 1827 1828 mutex_init(&pf->sw_mutex); 1829 mutex_init(&pf->avail_q_mutex); 1830 1831 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ 1832 mutex_lock(&pf->avail_q_mutex); 1833 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); 1834 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); 1835 mutex_unlock(&pf->avail_q_mutex); 1836 1837 if (pf->hw.func_caps.common_cap.rss_table_size) 1838 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 1839 1840 /* setup service timer and periodic service task */ 1841 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 1842 pf->serv_tmr_period = HZ; 1843 INIT_WORK(&pf->serv_task, ice_service_task); 1844 clear_bit(__ICE_SERVICE_SCHED, pf->state); 1845 } 1846 1847 /** 1848 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 1849 * @pf: board private structure 1850 * 1851 * compute the number of MSIX vectors required (v_budget) and request from 1852 * the OS. Return the number of vectors reserved or negative on failure 1853 */ 1854 static int ice_ena_msix_range(struct ice_pf *pf) 1855 { 1856 int v_left, v_actual, v_budget = 0; 1857 int needed, err, i; 1858 1859 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 1860 1861 /* reserve one vector for miscellaneous handler */ 1862 needed = 1; 1863 v_budget += needed; 1864 v_left -= needed; 1865 1866 /* reserve vectors for LAN traffic */ 1867 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); 1868 v_budget += pf->num_lan_msix; 1869 v_left -= pf->num_lan_msix; 1870 1871 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, 1872 sizeof(struct msix_entry), GFP_KERNEL); 1873 1874 if (!pf->msix_entries) { 1875 err = -ENOMEM; 1876 goto exit_err; 1877 } 1878 1879 for (i = 0; i < v_budget; i++) 1880 pf->msix_entries[i].entry = i; 1881 1882 /* actually reserve the vectors */ 1883 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 1884 ICE_MIN_MSIX, v_budget); 1885 1886 if (v_actual < 0) { 1887 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); 1888 err = v_actual; 1889 goto msix_err; 1890 } 1891 1892 if (v_actual < v_budget) { 1893 dev_warn(&pf->pdev->dev, 1894 "not enough vectors. requested = %d, obtained = %d\n", 1895 v_budget, v_actual); 1896 if (v_actual >= (pf->num_lan_msix + 1)) { 1897 pf->num_avail_sw_msix = v_actual - 1898 (pf->num_lan_msix + 1); 1899 } else if (v_actual >= 2) { 1900 pf->num_lan_msix = 1; 1901 pf->num_avail_sw_msix = v_actual - 2; 1902 } else { 1903 pci_disable_msix(pf->pdev); 1904 err = -ERANGE; 1905 goto msix_err; 1906 } 1907 } 1908 1909 return v_actual; 1910 1911 msix_err: 1912 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1913 goto exit_err; 1914 1915 exit_err: 1916 pf->num_lan_msix = 0; 1917 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1918 return err; 1919 } 1920 1921 /** 1922 * ice_dis_msix - Disable MSI-X interrupt setup in OS 1923 * @pf: board private structure 1924 */ 1925 static void ice_dis_msix(struct ice_pf *pf) 1926 { 1927 pci_disable_msix(pf->pdev); 1928 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1929 pf->msix_entries = NULL; 1930 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1931 } 1932 1933 /** 1934 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 1935 * @pf: board private structure 1936 */ 1937 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 1938 { 1939 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1940 ice_dis_msix(pf); 1941 1942 if (pf->sw_irq_tracker) { 1943 devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); 1944 pf->sw_irq_tracker = NULL; 1945 } 1946 1947 if (pf->hw_irq_tracker) { 1948 devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); 1949 pf->hw_irq_tracker = NULL; 1950 } 1951 } 1952 1953 /** 1954 * ice_init_interrupt_scheme - Determine proper interrupt scheme 1955 * @pf: board private structure to initialize 1956 */ 1957 static int ice_init_interrupt_scheme(struct ice_pf *pf) 1958 { 1959 int vectors = 0, hw_vectors = 0; 1960 ssize_t size; 1961 1962 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1963 vectors = ice_ena_msix_range(pf); 1964 else 1965 return -ENODEV; 1966 1967 if (vectors < 0) 1968 return vectors; 1969 1970 /* set up vector assignment tracking */ 1971 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); 1972 1973 pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1974 if (!pf->sw_irq_tracker) { 1975 ice_dis_msix(pf); 1976 return -ENOMEM; 1977 } 1978 1979 /* populate SW interrupts pool with number of OS granted IRQs. */ 1980 pf->num_avail_sw_msix = vectors; 1981 pf->sw_irq_tracker->num_entries = vectors; 1982 1983 /* set up HW vector assignment tracking */ 1984 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 1985 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); 1986 1987 pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1988 if (!pf->hw_irq_tracker) { 1989 ice_clear_interrupt_scheme(pf); 1990 return -ENOMEM; 1991 } 1992 1993 /* populate HW interrupts pool with number of HW supported irqs. */ 1994 pf->num_avail_hw_msix = hw_vectors; 1995 pf->hw_irq_tracker->num_entries = hw_vectors; 1996 1997 return 0; 1998 } 1999 2000 /** 2001 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 2002 * @pf: pointer to the PF structure 2003 * 2004 * There is no error returned here because the driver should be able to handle 2005 * 128 Byte cache lines, so we only print a warning in case issues are seen, 2006 * specifically with Tx. 2007 */ 2008 static void ice_verify_cacheline_size(struct ice_pf *pf) 2009 { 2010 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 2011 dev_warn(&pf->pdev->dev, 2012 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 2013 ICE_CACHE_LINE_BYTES); 2014 } 2015 2016 /** 2017 * ice_probe - Device initialization routine 2018 * @pdev: PCI device information struct 2019 * @ent: entry in ice_pci_tbl 2020 * 2021 * Returns 0 on success, negative on failure 2022 */ 2023 static int ice_probe(struct pci_dev *pdev, 2024 const struct pci_device_id __always_unused *ent) 2025 { 2026 struct ice_pf *pf; 2027 struct ice_hw *hw; 2028 int err; 2029 2030 /* this driver uses devres, see Documentation/driver-model/devres.txt */ 2031 err = pcim_enable_device(pdev); 2032 if (err) 2033 return err; 2034 2035 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 2036 if (err) { 2037 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); 2038 return err; 2039 } 2040 2041 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); 2042 if (!pf) 2043 return -ENOMEM; 2044 2045 /* set up for high or low dma */ 2046 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2047 if (err) 2048 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2049 if (err) { 2050 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 2051 return err; 2052 } 2053 2054 pci_enable_pcie_error_reporting(pdev); 2055 pci_set_master(pdev); 2056 2057 pf->pdev = pdev; 2058 pci_set_drvdata(pdev, pf); 2059 set_bit(__ICE_DOWN, pf->state); 2060 /* Disable service task until DOWN bit is cleared */ 2061 set_bit(__ICE_SERVICE_DIS, pf->state); 2062 2063 hw = &pf->hw; 2064 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 2065 hw->back = pf; 2066 hw->vendor_id = pdev->vendor; 2067 hw->device_id = pdev->device; 2068 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2069 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2070 hw->subsystem_device_id = pdev->subsystem_device; 2071 hw->bus.device = PCI_SLOT(pdev->devfn); 2072 hw->bus.func = PCI_FUNC(pdev->devfn); 2073 ice_set_ctrlq_len(hw); 2074 2075 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 2076 2077 #ifndef CONFIG_DYNAMIC_DEBUG 2078 if (debug < -1) 2079 hw->debug_mask = debug; 2080 #endif 2081 2082 err = ice_init_hw(hw); 2083 if (err) { 2084 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); 2085 err = -EIO; 2086 goto err_exit_unroll; 2087 } 2088 2089 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", 2090 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2091 hw->api_maj_ver, hw->api_min_ver); 2092 2093 ice_init_pf(pf); 2094 2095 ice_determine_q_usage(pf); 2096 2097 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 2098 if (!pf->num_alloc_vsi) { 2099 err = -EIO; 2100 goto err_init_pf_unroll; 2101 } 2102 2103 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, 2104 sizeof(struct ice_vsi *), GFP_KERNEL); 2105 if (!pf->vsi) { 2106 err = -ENOMEM; 2107 goto err_init_pf_unroll; 2108 } 2109 2110 err = ice_init_interrupt_scheme(pf); 2111 if (err) { 2112 dev_err(&pdev->dev, 2113 "ice_init_interrupt_scheme failed: %d\n", err); 2114 err = -EIO; 2115 goto err_init_interrupt_unroll; 2116 } 2117 2118 /* Driver is mostly up */ 2119 clear_bit(__ICE_DOWN, pf->state); 2120 2121 /* In case of MSIX we are going to setup the misc vector right here 2122 * to handle admin queue events etc. In case of legacy and MSI 2123 * the misc functionality and queue processing is combined in 2124 * the same vector and that gets setup at open. 2125 */ 2126 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2127 err = ice_req_irq_msix_misc(pf); 2128 if (err) { 2129 dev_err(&pdev->dev, 2130 "setup of misc vector failed: %d\n", err); 2131 goto err_init_interrupt_unroll; 2132 } 2133 } 2134 2135 /* create switch struct for the switch element created by FW on boot */ 2136 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), 2137 GFP_KERNEL); 2138 if (!pf->first_sw) { 2139 err = -ENOMEM; 2140 goto err_msix_misc_unroll; 2141 } 2142 2143 if (hw->evb_veb) 2144 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 2145 else 2146 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 2147 2148 pf->first_sw->pf = pf; 2149 2150 /* record the sw_id available for later use */ 2151 pf->first_sw->sw_id = hw->port_info->sw_id; 2152 2153 err = ice_setup_pf_sw(pf); 2154 if (err) { 2155 dev_err(&pdev->dev, 2156 "probe failed due to setup pf switch:%d\n", err); 2157 goto err_alloc_sw_unroll; 2158 } 2159 2160 clear_bit(__ICE_SERVICE_DIS, pf->state); 2161 2162 /* since everything is good, start the service timer */ 2163 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 2164 2165 ice_verify_cacheline_size(pf); 2166 2167 return 0; 2168 2169 err_alloc_sw_unroll: 2170 set_bit(__ICE_SERVICE_DIS, pf->state); 2171 set_bit(__ICE_DOWN, pf->state); 2172 devm_kfree(&pf->pdev->dev, pf->first_sw); 2173 err_msix_misc_unroll: 2174 ice_free_irq_msix_misc(pf); 2175 err_init_interrupt_unroll: 2176 ice_clear_interrupt_scheme(pf); 2177 devm_kfree(&pdev->dev, pf->vsi); 2178 err_init_pf_unroll: 2179 ice_deinit_pf(pf); 2180 ice_deinit_hw(hw); 2181 err_exit_unroll: 2182 pci_disable_pcie_error_reporting(pdev); 2183 return err; 2184 } 2185 2186 /** 2187 * ice_remove - Device removal routine 2188 * @pdev: PCI device information struct 2189 */ 2190 static void ice_remove(struct pci_dev *pdev) 2191 { 2192 struct ice_pf *pf = pci_get_drvdata(pdev); 2193 int i; 2194 2195 if (!pf) 2196 return; 2197 2198 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 2199 if (!ice_is_reset_in_progress(pf->state)) 2200 break; 2201 msleep(100); 2202 } 2203 2204 set_bit(__ICE_DOWN, pf->state); 2205 ice_service_task_stop(pf); 2206 2207 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 2208 ice_free_vfs(pf); 2209 ice_vsi_release_all(pf); 2210 ice_free_irq_msix_misc(pf); 2211 ice_for_each_vsi(pf, i) { 2212 if (!pf->vsi[i]) 2213 continue; 2214 ice_vsi_free_q_vectors(pf->vsi[i]); 2215 } 2216 ice_clear_interrupt_scheme(pf); 2217 ice_deinit_pf(pf); 2218 ice_deinit_hw(&pf->hw); 2219 pci_disable_pcie_error_reporting(pdev); 2220 } 2221 2222 /* ice_pci_tbl - PCI Device ID Table 2223 * 2224 * Wildcard entries (PCI_ANY_ID) should come last 2225 * Last entry must be all 0s 2226 * 2227 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 2228 * Class, Class Mask, private data (not used) } 2229 */ 2230 static const struct pci_device_id ice_pci_tbl[] = { 2231 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 2232 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 2233 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 2234 /* required last entry */ 2235 { 0, } 2236 }; 2237 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 2238 2239 static struct pci_driver ice_driver = { 2240 .name = KBUILD_MODNAME, 2241 .id_table = ice_pci_tbl, 2242 .probe = ice_probe, 2243 .remove = ice_remove, 2244 .sriov_configure = ice_sriov_configure, 2245 }; 2246 2247 /** 2248 * ice_module_init - Driver registration routine 2249 * 2250 * ice_module_init is the first routine called when the driver is 2251 * loaded. All it does is register with the PCI subsystem. 2252 */ 2253 static int __init ice_module_init(void) 2254 { 2255 int status; 2256 2257 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); 2258 pr_info("%s\n", ice_copyright); 2259 2260 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 2261 if (!ice_wq) { 2262 pr_err("Failed to create workqueue\n"); 2263 return -ENOMEM; 2264 } 2265 2266 status = pci_register_driver(&ice_driver); 2267 if (status) { 2268 pr_err("failed to register pci driver, err %d\n", status); 2269 destroy_workqueue(ice_wq); 2270 } 2271 2272 return status; 2273 } 2274 module_init(ice_module_init); 2275 2276 /** 2277 * ice_module_exit - Driver exit cleanup routine 2278 * 2279 * ice_module_exit is called just before the driver is removed 2280 * from memory. 2281 */ 2282 static void __exit ice_module_exit(void) 2283 { 2284 pci_unregister_driver(&ice_driver); 2285 destroy_workqueue(ice_wq); 2286 pr_info("module unloaded\n"); 2287 } 2288 module_exit(ice_module_exit); 2289 2290 /** 2291 * ice_set_mac_address - NDO callback to set mac address 2292 * @netdev: network interface device structure 2293 * @pi: pointer to an address structure 2294 * 2295 * Returns 0 on success, negative on failure 2296 */ 2297 static int ice_set_mac_address(struct net_device *netdev, void *pi) 2298 { 2299 struct ice_netdev_priv *np = netdev_priv(netdev); 2300 struct ice_vsi *vsi = np->vsi; 2301 struct ice_pf *pf = vsi->back; 2302 struct ice_hw *hw = &pf->hw; 2303 struct sockaddr *addr = pi; 2304 enum ice_status status; 2305 LIST_HEAD(a_mac_list); 2306 LIST_HEAD(r_mac_list); 2307 u8 flags = 0; 2308 int err; 2309 u8 *mac; 2310 2311 mac = (u8 *)addr->sa_data; 2312 2313 if (!is_valid_ether_addr(mac)) 2314 return -EADDRNOTAVAIL; 2315 2316 if (ether_addr_equal(netdev->dev_addr, mac)) { 2317 netdev_warn(netdev, "already using mac %pM\n", mac); 2318 return 0; 2319 } 2320 2321 if (test_bit(__ICE_DOWN, pf->state) || 2322 ice_is_reset_in_progress(pf->state)) { 2323 netdev_err(netdev, "can't set mac %pM. device not ready\n", 2324 mac); 2325 return -EBUSY; 2326 } 2327 2328 /* When we change the mac address we also have to change the mac address 2329 * based filter rules that were created previously for the old mac 2330 * address. So first, we remove the old filter rule using ice_remove_mac 2331 * and then create a new filter rule using ice_add_mac. Note that for 2332 * both these operations, we first need to form a "list" of mac 2333 * addresses (even though in this case, we have only 1 mac address to be 2334 * added/removed) and this done using ice_add_mac_to_list. Depending on 2335 * the ensuing operation this "list" of mac addresses is either to be 2336 * added or removed from the filter. 2337 */ 2338 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); 2339 if (err) { 2340 err = -EADDRNOTAVAIL; 2341 goto free_lists; 2342 } 2343 2344 status = ice_remove_mac(hw, &r_mac_list); 2345 if (status) { 2346 err = -EADDRNOTAVAIL; 2347 goto free_lists; 2348 } 2349 2350 err = ice_add_mac_to_list(vsi, &a_mac_list, mac); 2351 if (err) { 2352 err = -EADDRNOTAVAIL; 2353 goto free_lists; 2354 } 2355 2356 status = ice_add_mac(hw, &a_mac_list); 2357 if (status) { 2358 err = -EADDRNOTAVAIL; 2359 goto free_lists; 2360 } 2361 2362 free_lists: 2363 /* free list entries */ 2364 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); 2365 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); 2366 2367 if (err) { 2368 netdev_err(netdev, "can't set mac %pM. filter update failed\n", 2369 mac); 2370 return err; 2371 } 2372 2373 /* change the netdev's mac address */ 2374 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2375 netdev_dbg(vsi->netdev, "updated mac address to %pM\n", 2376 netdev->dev_addr); 2377 2378 /* write new mac address to the firmware */ 2379 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 2380 status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 2381 if (status) { 2382 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", 2383 mac); 2384 } 2385 return 0; 2386 } 2387 2388 /** 2389 * ice_set_rx_mode - NDO callback to set the netdev filters 2390 * @netdev: network interface device structure 2391 */ 2392 static void ice_set_rx_mode(struct net_device *netdev) 2393 { 2394 struct ice_netdev_priv *np = netdev_priv(netdev); 2395 struct ice_vsi *vsi = np->vsi; 2396 2397 if (!vsi) 2398 return; 2399 2400 /* Set the flags to synchronize filters 2401 * ndo_set_rx_mode may be triggered even without a change in netdev 2402 * flags 2403 */ 2404 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 2405 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 2406 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 2407 2408 /* schedule our worker thread which will take care of 2409 * applying the new filter changes 2410 */ 2411 ice_service_task_schedule(vsi->back); 2412 } 2413 2414 /** 2415 * ice_fdb_add - add an entry to the hardware database 2416 * @ndm: the input from the stack 2417 * @tb: pointer to array of nladdr (unused) 2418 * @dev: the net device pointer 2419 * @addr: the MAC address entry being added 2420 * @vid: VLAN id 2421 * @flags: instructions from stack about fdb operation 2422 */ 2423 static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 2424 struct net_device *dev, const unsigned char *addr, 2425 u16 vid, u16 flags) 2426 { 2427 int err; 2428 2429 if (vid) { 2430 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 2431 return -EINVAL; 2432 } 2433 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 2434 netdev_err(dev, "FDB only supports static addresses\n"); 2435 return -EINVAL; 2436 } 2437 2438 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 2439 err = dev_uc_add_excl(dev, addr); 2440 else if (is_multicast_ether_addr(addr)) 2441 err = dev_mc_add_excl(dev, addr); 2442 else 2443 err = -EINVAL; 2444 2445 /* Only return duplicate errors if NLM_F_EXCL is set */ 2446 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 2447 err = 0; 2448 2449 return err; 2450 } 2451 2452 /** 2453 * ice_fdb_del - delete an entry from the hardware database 2454 * @ndm: the input from the stack 2455 * @tb: pointer to array of nladdr (unused) 2456 * @dev: the net device pointer 2457 * @addr: the MAC address entry being added 2458 * @vid: VLAN id 2459 */ 2460 static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 2461 struct net_device *dev, const unsigned char *addr, 2462 __always_unused u16 vid) 2463 { 2464 int err; 2465 2466 if (ndm->ndm_state & NUD_PERMANENT) { 2467 netdev_err(dev, "FDB only supports static addresses\n"); 2468 return -EINVAL; 2469 } 2470 2471 if (is_unicast_ether_addr(addr)) 2472 err = dev_uc_del(dev, addr); 2473 else if (is_multicast_ether_addr(addr)) 2474 err = dev_mc_del(dev, addr); 2475 else 2476 err = -EINVAL; 2477 2478 return err; 2479 } 2480 2481 /** 2482 * ice_set_features - set the netdev feature flags 2483 * @netdev: ptr to the netdev being adjusted 2484 * @features: the feature set that the stack is suggesting 2485 */ 2486 static int ice_set_features(struct net_device *netdev, 2487 netdev_features_t features) 2488 { 2489 struct ice_netdev_priv *np = netdev_priv(netdev); 2490 struct ice_vsi *vsi = np->vsi; 2491 int ret = 0; 2492 2493 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 2494 ret = ice_vsi_manage_rss_lut(vsi, true); 2495 else if (!(features & NETIF_F_RXHASH) && 2496 netdev->features & NETIF_F_RXHASH) 2497 ret = ice_vsi_manage_rss_lut(vsi, false); 2498 2499 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 2500 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2501 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2502 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 2503 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2504 ret = ice_vsi_manage_vlan_stripping(vsi, false); 2505 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 2506 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2507 ret = ice_vsi_manage_vlan_insertion(vsi); 2508 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 2509 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2510 ret = ice_vsi_manage_vlan_insertion(vsi); 2511 2512 return ret; 2513 } 2514 2515 /** 2516 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI 2517 * @vsi: VSI to setup vlan properties for 2518 */ 2519 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 2520 { 2521 int ret = 0; 2522 2523 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2524 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2525 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 2526 ret = ice_vsi_manage_vlan_insertion(vsi); 2527 2528 return ret; 2529 } 2530 2531 /** 2532 * ice_vsi_cfg - Setup the VSI 2533 * @vsi: the VSI being configured 2534 * 2535 * Return 0 on success and negative value on error 2536 */ 2537 static int ice_vsi_cfg(struct ice_vsi *vsi) 2538 { 2539 int err; 2540 2541 if (vsi->netdev) { 2542 ice_set_rx_mode(vsi->netdev); 2543 2544 err = ice_vsi_vlan_setup(vsi); 2545 2546 if (err) 2547 return err; 2548 } 2549 err = ice_vsi_cfg_txqs(vsi); 2550 if (!err) 2551 err = ice_vsi_cfg_rxqs(vsi); 2552 2553 return err; 2554 } 2555 2556 /** 2557 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 2558 * @vsi: the VSI being configured 2559 */ 2560 static void ice_napi_enable_all(struct ice_vsi *vsi) 2561 { 2562 int q_idx; 2563 2564 if (!vsi->netdev) 2565 return; 2566 2567 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 2568 napi_enable(&vsi->q_vectors[q_idx]->napi); 2569 } 2570 2571 /** 2572 * ice_up_complete - Finish the last steps of bringing up a connection 2573 * @vsi: The VSI being configured 2574 * 2575 * Return 0 on success and negative value on error 2576 */ 2577 static int ice_up_complete(struct ice_vsi *vsi) 2578 { 2579 struct ice_pf *pf = vsi->back; 2580 int err; 2581 2582 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 2583 ice_vsi_cfg_msix(vsi); 2584 else 2585 return -ENOTSUPP; 2586 2587 /* Enable only Rx rings, Tx rings were enabled by the FW when the 2588 * Tx queue group list was configured and the context bits were 2589 * programmed using ice_vsi_cfg_txqs 2590 */ 2591 err = ice_vsi_start_rx_rings(vsi); 2592 if (err) 2593 return err; 2594 2595 clear_bit(__ICE_DOWN, vsi->state); 2596 ice_napi_enable_all(vsi); 2597 ice_vsi_ena_irq(vsi); 2598 2599 if (vsi->port_info && 2600 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 2601 vsi->netdev) { 2602 ice_print_link_msg(vsi, true); 2603 netif_tx_start_all_queues(vsi->netdev); 2604 netif_carrier_on(vsi->netdev); 2605 } 2606 2607 ice_service_task_schedule(pf); 2608 2609 return err; 2610 } 2611 2612 /** 2613 * ice_up - Bring the connection back up after being down 2614 * @vsi: VSI being configured 2615 */ 2616 int ice_up(struct ice_vsi *vsi) 2617 { 2618 int err; 2619 2620 err = ice_vsi_cfg(vsi); 2621 if (!err) 2622 err = ice_up_complete(vsi); 2623 2624 return err; 2625 } 2626 2627 /** 2628 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 2629 * @ring: Tx or Rx ring to read stats from 2630 * @pkts: packets stats counter 2631 * @bytes: bytes stats counter 2632 * 2633 * This function fetches stats from the ring considering the atomic operations 2634 * that needs to be performed to read u64 values in 32 bit machine. 2635 */ 2636 static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, 2637 u64 *bytes) 2638 { 2639 unsigned int start; 2640 *pkts = 0; 2641 *bytes = 0; 2642 2643 if (!ring) 2644 return; 2645 do { 2646 start = u64_stats_fetch_begin_irq(&ring->syncp); 2647 *pkts = ring->stats.pkts; 2648 *bytes = ring->stats.bytes; 2649 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 2650 } 2651 2652 /** 2653 * ice_update_vsi_ring_stats - Update VSI stats counters 2654 * @vsi: the VSI to be updated 2655 */ 2656 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 2657 { 2658 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 2659 struct ice_ring *ring; 2660 u64 pkts, bytes; 2661 int i; 2662 2663 /* reset netdev stats */ 2664 vsi_stats->tx_packets = 0; 2665 vsi_stats->tx_bytes = 0; 2666 vsi_stats->rx_packets = 0; 2667 vsi_stats->rx_bytes = 0; 2668 2669 /* reset non-netdev (extended) stats */ 2670 vsi->tx_restart = 0; 2671 vsi->tx_busy = 0; 2672 vsi->tx_linearize = 0; 2673 vsi->rx_buf_failed = 0; 2674 vsi->rx_page_failed = 0; 2675 2676 rcu_read_lock(); 2677 2678 /* update Tx rings counters */ 2679 ice_for_each_txq(vsi, i) { 2680 ring = READ_ONCE(vsi->tx_rings[i]); 2681 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2682 vsi_stats->tx_packets += pkts; 2683 vsi_stats->tx_bytes += bytes; 2684 vsi->tx_restart += ring->tx_stats.restart_q; 2685 vsi->tx_busy += ring->tx_stats.tx_busy; 2686 vsi->tx_linearize += ring->tx_stats.tx_linearize; 2687 } 2688 2689 /* update Rx rings counters */ 2690 ice_for_each_rxq(vsi, i) { 2691 ring = READ_ONCE(vsi->rx_rings[i]); 2692 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2693 vsi_stats->rx_packets += pkts; 2694 vsi_stats->rx_bytes += bytes; 2695 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 2696 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 2697 } 2698 2699 rcu_read_unlock(); 2700 } 2701 2702 /** 2703 * ice_update_vsi_stats - Update VSI stats counters 2704 * @vsi: the VSI to be updated 2705 */ 2706 static void ice_update_vsi_stats(struct ice_vsi *vsi) 2707 { 2708 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 2709 struct ice_eth_stats *cur_es = &vsi->eth_stats; 2710 struct ice_pf *pf = vsi->back; 2711 2712 if (test_bit(__ICE_DOWN, vsi->state) || 2713 test_bit(__ICE_CFG_BUSY, pf->state)) 2714 return; 2715 2716 /* get stats as recorded by Tx/Rx rings */ 2717 ice_update_vsi_ring_stats(vsi); 2718 2719 /* get VSI stats as recorded by the hardware */ 2720 ice_update_eth_stats(vsi); 2721 2722 cur_ns->tx_errors = cur_es->tx_errors; 2723 cur_ns->rx_dropped = cur_es->rx_discards; 2724 cur_ns->tx_dropped = cur_es->tx_discards; 2725 cur_ns->multicast = cur_es->rx_multicast; 2726 2727 /* update some more netdev stats if this is main VSI */ 2728 if (vsi->type == ICE_VSI_PF) { 2729 cur_ns->rx_crc_errors = pf->stats.crc_errors; 2730 cur_ns->rx_errors = pf->stats.crc_errors + 2731 pf->stats.illegal_bytes; 2732 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 2733 } 2734 } 2735 2736 /** 2737 * ice_update_pf_stats - Update PF port stats counters 2738 * @pf: PF whose stats needs to be updated 2739 */ 2740 static void ice_update_pf_stats(struct ice_pf *pf) 2741 { 2742 struct ice_hw_port_stats *prev_ps, *cur_ps; 2743 struct ice_hw *hw = &pf->hw; 2744 u8 pf_id; 2745 2746 prev_ps = &pf->stats_prev; 2747 cur_ps = &pf->stats; 2748 pf_id = hw->pf_id; 2749 2750 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), 2751 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, 2752 &cur_ps->eth.rx_bytes); 2753 2754 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), 2755 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, 2756 &cur_ps->eth.rx_unicast); 2757 2758 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), 2759 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, 2760 &cur_ps->eth.rx_multicast); 2761 2762 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), 2763 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, 2764 &cur_ps->eth.rx_broadcast); 2765 2766 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), 2767 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, 2768 &cur_ps->eth.tx_bytes); 2769 2770 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), 2771 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, 2772 &cur_ps->eth.tx_unicast); 2773 2774 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), 2775 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, 2776 &cur_ps->eth.tx_multicast); 2777 2778 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), 2779 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, 2780 &cur_ps->eth.tx_broadcast); 2781 2782 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, 2783 &prev_ps->tx_dropped_link_down, 2784 &cur_ps->tx_dropped_link_down); 2785 2786 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), 2787 pf->stat_prev_loaded, &prev_ps->rx_size_64, 2788 &cur_ps->rx_size_64); 2789 2790 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), 2791 pf->stat_prev_loaded, &prev_ps->rx_size_127, 2792 &cur_ps->rx_size_127); 2793 2794 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), 2795 pf->stat_prev_loaded, &prev_ps->rx_size_255, 2796 &cur_ps->rx_size_255); 2797 2798 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), 2799 pf->stat_prev_loaded, &prev_ps->rx_size_511, 2800 &cur_ps->rx_size_511); 2801 2802 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), 2803 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, 2804 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 2805 2806 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), 2807 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, 2808 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 2809 2810 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), 2811 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, 2812 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 2813 2814 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), 2815 pf->stat_prev_loaded, &prev_ps->tx_size_64, 2816 &cur_ps->tx_size_64); 2817 2818 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), 2819 pf->stat_prev_loaded, &prev_ps->tx_size_127, 2820 &cur_ps->tx_size_127); 2821 2822 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), 2823 pf->stat_prev_loaded, &prev_ps->tx_size_255, 2824 &cur_ps->tx_size_255); 2825 2826 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), 2827 pf->stat_prev_loaded, &prev_ps->tx_size_511, 2828 &cur_ps->tx_size_511); 2829 2830 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), 2831 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, 2832 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 2833 2834 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), 2835 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, 2836 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 2837 2838 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), 2839 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, 2840 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 2841 2842 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, 2843 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 2844 2845 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, 2846 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 2847 2848 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, 2849 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 2850 2851 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, 2852 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 2853 2854 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, 2855 &prev_ps->crc_errors, &cur_ps->crc_errors); 2856 2857 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, 2858 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 2859 2860 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, 2861 &prev_ps->mac_local_faults, 2862 &cur_ps->mac_local_faults); 2863 2864 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, 2865 &prev_ps->mac_remote_faults, 2866 &cur_ps->mac_remote_faults); 2867 2868 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, 2869 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 2870 2871 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, 2872 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 2873 2874 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, 2875 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 2876 2877 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, 2878 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 2879 2880 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, 2881 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 2882 2883 pf->stat_prev_loaded = true; 2884 } 2885 2886 /** 2887 * ice_get_stats64 - get statistics for network device structure 2888 * @netdev: network interface device structure 2889 * @stats: main device statistics structure 2890 */ 2891 static 2892 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 2893 { 2894 struct ice_netdev_priv *np = netdev_priv(netdev); 2895 struct rtnl_link_stats64 *vsi_stats; 2896 struct ice_vsi *vsi = np->vsi; 2897 2898 vsi_stats = &vsi->net_stats; 2899 2900 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) 2901 return; 2902 /* netdev packet/byte stats come from ring counter. These are obtained 2903 * by summing up ring counters (done by ice_update_vsi_ring_stats). 2904 */ 2905 ice_update_vsi_ring_stats(vsi); 2906 stats->tx_packets = vsi_stats->tx_packets; 2907 stats->tx_bytes = vsi_stats->tx_bytes; 2908 stats->rx_packets = vsi_stats->rx_packets; 2909 stats->rx_bytes = vsi_stats->rx_bytes; 2910 2911 /* The rest of the stats can be read from the hardware but instead we 2912 * just return values that the watchdog task has already obtained from 2913 * the hardware. 2914 */ 2915 stats->multicast = vsi_stats->multicast; 2916 stats->tx_errors = vsi_stats->tx_errors; 2917 stats->tx_dropped = vsi_stats->tx_dropped; 2918 stats->rx_errors = vsi_stats->rx_errors; 2919 stats->rx_dropped = vsi_stats->rx_dropped; 2920 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 2921 stats->rx_length_errors = vsi_stats->rx_length_errors; 2922 } 2923 2924 /** 2925 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 2926 * @vsi: VSI having NAPI disabled 2927 */ 2928 static void ice_napi_disable_all(struct ice_vsi *vsi) 2929 { 2930 int q_idx; 2931 2932 if (!vsi->netdev) 2933 return; 2934 2935 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 2936 napi_disable(&vsi->q_vectors[q_idx]->napi); 2937 } 2938 2939 /** 2940 * ice_down - Shutdown the connection 2941 * @vsi: The VSI being stopped 2942 */ 2943 int ice_down(struct ice_vsi *vsi) 2944 { 2945 int i, tx_err, rx_err; 2946 2947 /* Caller of this function is expected to set the 2948 * vsi->state __ICE_DOWN bit 2949 */ 2950 if (vsi->netdev) { 2951 netif_carrier_off(vsi->netdev); 2952 netif_tx_disable(vsi->netdev); 2953 } 2954 2955 ice_vsi_dis_irq(vsi); 2956 tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); 2957 if (tx_err) 2958 netdev_err(vsi->netdev, 2959 "Failed stop Tx rings, VSI %d error %d\n", 2960 vsi->vsi_num, tx_err); 2961 2962 rx_err = ice_vsi_stop_rx_rings(vsi); 2963 if (rx_err) 2964 netdev_err(vsi->netdev, 2965 "Failed stop Rx rings, VSI %d error %d\n", 2966 vsi->vsi_num, rx_err); 2967 2968 ice_napi_disable_all(vsi); 2969 2970 ice_for_each_txq(vsi, i) 2971 ice_clean_tx_ring(vsi->tx_rings[i]); 2972 2973 ice_for_each_rxq(vsi, i) 2974 ice_clean_rx_ring(vsi->rx_rings[i]); 2975 2976 if (tx_err || rx_err) { 2977 netdev_err(vsi->netdev, 2978 "Failed to close VSI 0x%04X on switch 0x%04X\n", 2979 vsi->vsi_num, vsi->vsw->sw_id); 2980 return -EIO; 2981 } 2982 2983 return 0; 2984 } 2985 2986 /** 2987 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 2988 * @vsi: VSI having resources allocated 2989 * 2990 * Return 0 on success, negative on failure 2991 */ 2992 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 2993 { 2994 int i, err = 0; 2995 2996 if (!vsi->num_txq) { 2997 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 2998 vsi->vsi_num); 2999 return -EINVAL; 3000 } 3001 3002 ice_for_each_txq(vsi, i) { 3003 vsi->tx_rings[i]->netdev = vsi->netdev; 3004 err = ice_setup_tx_ring(vsi->tx_rings[i]); 3005 if (err) 3006 break; 3007 } 3008 3009 return err; 3010 } 3011 3012 /** 3013 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 3014 * @vsi: VSI having resources allocated 3015 * 3016 * Return 0 on success, negative on failure 3017 */ 3018 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 3019 { 3020 int i, err = 0; 3021 3022 if (!vsi->num_rxq) { 3023 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 3024 vsi->vsi_num); 3025 return -EINVAL; 3026 } 3027 3028 ice_for_each_rxq(vsi, i) { 3029 vsi->rx_rings[i]->netdev = vsi->netdev; 3030 err = ice_setup_rx_ring(vsi->rx_rings[i]); 3031 if (err) 3032 break; 3033 } 3034 3035 return err; 3036 } 3037 3038 /** 3039 * ice_vsi_req_irq - Request IRQ from the OS 3040 * @vsi: The VSI IRQ is being requested for 3041 * @basename: name for the vector 3042 * 3043 * Return 0 on success and a negative value on error 3044 */ 3045 static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) 3046 { 3047 struct ice_pf *pf = vsi->back; 3048 int err = -EINVAL; 3049 3050 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3051 err = ice_vsi_req_irq_msix(vsi, basename); 3052 3053 return err; 3054 } 3055 3056 /** 3057 * ice_vsi_open - Called when a network interface is made active 3058 * @vsi: the VSI to open 3059 * 3060 * Initialization of the VSI 3061 * 3062 * Returns 0 on success, negative value on error 3063 */ 3064 static int ice_vsi_open(struct ice_vsi *vsi) 3065 { 3066 char int_name[ICE_INT_NAME_STR_LEN]; 3067 struct ice_pf *pf = vsi->back; 3068 int err; 3069 3070 /* allocate descriptors */ 3071 err = ice_vsi_setup_tx_rings(vsi); 3072 if (err) 3073 goto err_setup_tx; 3074 3075 err = ice_vsi_setup_rx_rings(vsi); 3076 if (err) 3077 goto err_setup_rx; 3078 3079 err = ice_vsi_cfg(vsi); 3080 if (err) 3081 goto err_setup_rx; 3082 3083 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 3084 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 3085 err = ice_vsi_req_irq(vsi, int_name); 3086 if (err) 3087 goto err_setup_rx; 3088 3089 /* Notify the stack of the actual queue counts. */ 3090 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 3091 if (err) 3092 goto err_set_qs; 3093 3094 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 3095 if (err) 3096 goto err_set_qs; 3097 3098 err = ice_up_complete(vsi); 3099 if (err) 3100 goto err_up_complete; 3101 3102 return 0; 3103 3104 err_up_complete: 3105 ice_down(vsi); 3106 err_set_qs: 3107 ice_vsi_free_irq(vsi); 3108 err_setup_rx: 3109 ice_vsi_free_rx_rings(vsi); 3110 err_setup_tx: 3111 ice_vsi_free_tx_rings(vsi); 3112 3113 return err; 3114 } 3115 3116 /** 3117 * ice_vsi_release_all - Delete all VSIs 3118 * @pf: PF from which all VSIs are being removed 3119 */ 3120 static void ice_vsi_release_all(struct ice_pf *pf) 3121 { 3122 int err, i; 3123 3124 if (!pf->vsi) 3125 return; 3126 3127 for (i = 0; i < pf->num_alloc_vsi; i++) { 3128 if (!pf->vsi[i]) 3129 continue; 3130 3131 err = ice_vsi_release(pf->vsi[i]); 3132 if (err) 3133 dev_dbg(&pf->pdev->dev, 3134 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 3135 i, err, pf->vsi[i]->vsi_num); 3136 } 3137 } 3138 3139 /** 3140 * ice_dis_vsi - pause a VSI 3141 * @vsi: the VSI being paused 3142 * @locked: is the rtnl_lock already held 3143 */ 3144 static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 3145 { 3146 if (test_bit(__ICE_DOWN, vsi->state)) 3147 return; 3148 3149 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3150 3151 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 3152 if (netif_running(vsi->netdev)) { 3153 if (!locked) { 3154 rtnl_lock(); 3155 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3156 rtnl_unlock(); 3157 } else { 3158 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3159 } 3160 } else { 3161 ice_vsi_close(vsi); 3162 } 3163 } 3164 } 3165 3166 /** 3167 * ice_ena_vsi - resume a VSI 3168 * @vsi: the VSI being resume 3169 */ 3170 static int ice_ena_vsi(struct ice_vsi *vsi) 3171 { 3172 int err = 0; 3173 3174 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && 3175 vsi->netdev) { 3176 if (netif_running(vsi->netdev)) { 3177 rtnl_lock(); 3178 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3179 rtnl_unlock(); 3180 } else { 3181 err = ice_vsi_open(vsi); 3182 } 3183 } 3184 3185 return err; 3186 } 3187 3188 /** 3189 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 3190 * @pf: the PF 3191 */ 3192 static void ice_pf_dis_all_vsi(struct ice_pf *pf) 3193 { 3194 int v; 3195 3196 ice_for_each_vsi(pf, v) 3197 if (pf->vsi[v]) 3198 ice_dis_vsi(pf->vsi[v], false); 3199 } 3200 3201 /** 3202 * ice_pf_ena_all_vsi - Resume all VSIs on a PF 3203 * @pf: the PF 3204 */ 3205 static int ice_pf_ena_all_vsi(struct ice_pf *pf) 3206 { 3207 int v; 3208 3209 ice_for_each_vsi(pf, v) 3210 if (pf->vsi[v]) 3211 if (ice_ena_vsi(pf->vsi[v])) 3212 return -EIO; 3213 3214 return 0; 3215 } 3216 3217 /** 3218 * ice_vsi_rebuild_all - rebuild all VSIs in pf 3219 * @pf: the PF 3220 */ 3221 static int ice_vsi_rebuild_all(struct ice_pf *pf) 3222 { 3223 int i; 3224 3225 /* loop through pf->vsi array and reinit the VSI if found */ 3226 for (i = 0; i < pf->num_alloc_vsi; i++) { 3227 int err; 3228 3229 if (!pf->vsi[i]) 3230 continue; 3231 3232 /* VF VSI rebuild isn't supported yet */ 3233 if (pf->vsi[i]->type == ICE_VSI_VF) 3234 continue; 3235 3236 err = ice_vsi_rebuild(pf->vsi[i]); 3237 if (err) { 3238 dev_err(&pf->pdev->dev, 3239 "VSI at index %d rebuild failed\n", 3240 pf->vsi[i]->idx); 3241 return err; 3242 } 3243 3244 dev_info(&pf->pdev->dev, 3245 "VSI at index %d rebuilt. vsi_num = 0x%x\n", 3246 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3247 } 3248 3249 return 0; 3250 } 3251 3252 /** 3253 * ice_vsi_replay_all - replay all VSIs configuration in the PF 3254 * @pf: the PF 3255 */ 3256 static int ice_vsi_replay_all(struct ice_pf *pf) 3257 { 3258 struct ice_hw *hw = &pf->hw; 3259 enum ice_status ret; 3260 int i; 3261 3262 /* loop through pf->vsi array and replay the VSI if found */ 3263 for (i = 0; i < pf->num_alloc_vsi; i++) { 3264 if (!pf->vsi[i]) 3265 continue; 3266 3267 ret = ice_replay_vsi(hw, pf->vsi[i]->idx); 3268 if (ret) { 3269 dev_err(&pf->pdev->dev, 3270 "VSI at index %d replay failed %d\n", 3271 pf->vsi[i]->idx, ret); 3272 return -EIO; 3273 } 3274 3275 /* Re-map HW VSI number, using VSI handle that has been 3276 * previously validated in ice_replay_vsi() call above 3277 */ 3278 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); 3279 3280 dev_info(&pf->pdev->dev, 3281 "VSI at index %d filter replayed successfully - vsi_num %i\n", 3282 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3283 } 3284 3285 /* Clean up replay filter after successful re-configuration */ 3286 ice_replay_post(hw); 3287 return 0; 3288 } 3289 3290 /** 3291 * ice_rebuild - rebuild after reset 3292 * @pf: pf to rebuild 3293 */ 3294 static void ice_rebuild(struct ice_pf *pf) 3295 { 3296 struct device *dev = &pf->pdev->dev; 3297 struct ice_hw *hw = &pf->hw; 3298 enum ice_status ret; 3299 int err, i; 3300 3301 if (test_bit(__ICE_DOWN, pf->state)) 3302 goto clear_recovery; 3303 3304 dev_dbg(dev, "rebuilding pf\n"); 3305 3306 ret = ice_init_all_ctrlq(hw); 3307 if (ret) { 3308 dev_err(dev, "control queues init failed %d\n", ret); 3309 goto err_init_ctrlq; 3310 } 3311 3312 ret = ice_clear_pf_cfg(hw); 3313 if (ret) { 3314 dev_err(dev, "clear PF configuration failed %d\n", ret); 3315 goto err_init_ctrlq; 3316 } 3317 3318 ice_clear_pxe_mode(hw); 3319 3320 ret = ice_get_caps(hw); 3321 if (ret) { 3322 dev_err(dev, "ice_get_caps failed %d\n", ret); 3323 goto err_init_ctrlq; 3324 } 3325 3326 err = ice_sched_init_port(hw->port_info); 3327 if (err) 3328 goto err_sched_init_port; 3329 3330 /* reset search_hint of irq_trackers to 0 since interrupts are 3331 * reclaimed and could be allocated from beginning during VSI rebuild 3332 */ 3333 pf->sw_irq_tracker->search_hint = 0; 3334 pf->hw_irq_tracker->search_hint = 0; 3335 3336 err = ice_vsi_rebuild_all(pf); 3337 if (err) { 3338 dev_err(dev, "ice_vsi_rebuild_all failed\n"); 3339 goto err_vsi_rebuild; 3340 } 3341 3342 err = ice_update_link_info(hw->port_info); 3343 if (err) 3344 dev_err(&pf->pdev->dev, "Get link status error %d\n", err); 3345 3346 /* Replay all VSIs Configuration, including filters after reset */ 3347 if (ice_vsi_replay_all(pf)) { 3348 dev_err(&pf->pdev->dev, 3349 "error replaying VSI configurations with switch filter rules\n"); 3350 goto err_vsi_rebuild; 3351 } 3352 3353 /* start misc vector */ 3354 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 3355 err = ice_req_irq_msix_misc(pf); 3356 if (err) { 3357 dev_err(dev, "misc vector setup failed: %d\n", err); 3358 goto err_vsi_rebuild; 3359 } 3360 } 3361 3362 /* restart the VSIs that were rebuilt and running before the reset */ 3363 err = ice_pf_ena_all_vsi(pf); 3364 if (err) { 3365 dev_err(&pf->pdev->dev, "error enabling VSIs\n"); 3366 /* no need to disable VSIs in tear down path in ice_rebuild() 3367 * since its already taken care in ice_vsi_open() 3368 */ 3369 goto err_vsi_rebuild; 3370 } 3371 3372 ice_reset_all_vfs(pf, true); 3373 3374 for (i = 0; i < pf->num_alloc_vsi; i++) { 3375 bool link_up; 3376 3377 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) 3378 continue; 3379 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 3380 if (link_up) { 3381 netif_carrier_on(pf->vsi[i]->netdev); 3382 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 3383 } else { 3384 netif_carrier_off(pf->vsi[i]->netdev); 3385 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 3386 } 3387 } 3388 3389 /* if we get here, reset flow is successful */ 3390 clear_bit(__ICE_RESET_FAILED, pf->state); 3391 return; 3392 3393 err_vsi_rebuild: 3394 ice_vsi_release_all(pf); 3395 err_sched_init_port: 3396 ice_sched_cleanup_all(hw); 3397 err_init_ctrlq: 3398 ice_shutdown_all_ctrlq(hw); 3399 set_bit(__ICE_RESET_FAILED, pf->state); 3400 clear_recovery: 3401 /* set this bit in PF state to control service task scheduling */ 3402 set_bit(__ICE_NEEDS_RESTART, pf->state); 3403 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 3404 } 3405 3406 /** 3407 * ice_change_mtu - NDO callback to change the MTU 3408 * @netdev: network interface device structure 3409 * @new_mtu: new value for maximum frame size 3410 * 3411 * Returns 0 on success, negative on failure 3412 */ 3413 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 3414 { 3415 struct ice_netdev_priv *np = netdev_priv(netdev); 3416 struct ice_vsi *vsi = np->vsi; 3417 struct ice_pf *pf = vsi->back; 3418 u8 count = 0; 3419 3420 if (new_mtu == netdev->mtu) { 3421 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); 3422 return 0; 3423 } 3424 3425 if (new_mtu < netdev->min_mtu) { 3426 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", 3427 netdev->min_mtu); 3428 return -EINVAL; 3429 } else if (new_mtu > netdev->max_mtu) { 3430 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", 3431 netdev->min_mtu); 3432 return -EINVAL; 3433 } 3434 /* if a reset is in progress, wait for some time for it to complete */ 3435 do { 3436 if (ice_is_reset_in_progress(pf->state)) { 3437 count++; 3438 usleep_range(1000, 2000); 3439 } else { 3440 break; 3441 } 3442 3443 } while (count < 100); 3444 3445 if (count == 100) { 3446 netdev_err(netdev, "can't change mtu. Device is busy\n"); 3447 return -EBUSY; 3448 } 3449 3450 netdev->mtu = new_mtu; 3451 3452 /* if VSI is up, bring it down and then back up */ 3453 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 3454 int err; 3455 3456 err = ice_down(vsi); 3457 if (err) { 3458 netdev_err(netdev, "change mtu if_up err %d\n", err); 3459 return err; 3460 } 3461 3462 err = ice_up(vsi); 3463 if (err) { 3464 netdev_err(netdev, "change mtu if_up err %d\n", err); 3465 return err; 3466 } 3467 } 3468 3469 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); 3470 return 0; 3471 } 3472 3473 /** 3474 * ice_set_rss - Set RSS keys and lut 3475 * @vsi: Pointer to VSI structure 3476 * @seed: RSS hash seed 3477 * @lut: Lookup table 3478 * @lut_size: Lookup table size 3479 * 3480 * Returns 0 on success, negative on failure 3481 */ 3482 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3483 { 3484 struct ice_pf *pf = vsi->back; 3485 struct ice_hw *hw = &pf->hw; 3486 enum ice_status status; 3487 3488 if (seed) { 3489 struct ice_aqc_get_set_rss_keys *buf = 3490 (struct ice_aqc_get_set_rss_keys *)seed; 3491 3492 status = ice_aq_set_rss_key(hw, vsi->idx, buf); 3493 3494 if (status) { 3495 dev_err(&pf->pdev->dev, 3496 "Cannot set RSS key, err %d aq_err %d\n", 3497 status, hw->adminq.rq_last_status); 3498 return -EIO; 3499 } 3500 } 3501 3502 if (lut) { 3503 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3504 lut, lut_size); 3505 if (status) { 3506 dev_err(&pf->pdev->dev, 3507 "Cannot set RSS lut, err %d aq_err %d\n", 3508 status, hw->adminq.rq_last_status); 3509 return -EIO; 3510 } 3511 } 3512 3513 return 0; 3514 } 3515 3516 /** 3517 * ice_get_rss - Get RSS keys and lut 3518 * @vsi: Pointer to VSI structure 3519 * @seed: Buffer to store the keys 3520 * @lut: Buffer to store the lookup table entries 3521 * @lut_size: Size of buffer to store the lookup table entries 3522 * 3523 * Returns 0 on success, negative on failure 3524 */ 3525 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3526 { 3527 struct ice_pf *pf = vsi->back; 3528 struct ice_hw *hw = &pf->hw; 3529 enum ice_status status; 3530 3531 if (seed) { 3532 struct ice_aqc_get_set_rss_keys *buf = 3533 (struct ice_aqc_get_set_rss_keys *)seed; 3534 3535 status = ice_aq_get_rss_key(hw, vsi->idx, buf); 3536 if (status) { 3537 dev_err(&pf->pdev->dev, 3538 "Cannot get RSS key, err %d aq_err %d\n", 3539 status, hw->adminq.rq_last_status); 3540 return -EIO; 3541 } 3542 } 3543 3544 if (lut) { 3545 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3546 lut, lut_size); 3547 if (status) { 3548 dev_err(&pf->pdev->dev, 3549 "Cannot get RSS lut, err %d aq_err %d\n", 3550 status, hw->adminq.rq_last_status); 3551 return -EIO; 3552 } 3553 } 3554 3555 return 0; 3556 } 3557 3558 /** 3559 * ice_bridge_getlink - Get the hardware bridge mode 3560 * @skb: skb buff 3561 * @pid: process id 3562 * @seq: RTNL message seq 3563 * @dev: the netdev being configured 3564 * @filter_mask: filter mask passed in 3565 * @nlflags: netlink flags passed in 3566 * 3567 * Return the bridge mode (VEB/VEPA) 3568 */ 3569 static int 3570 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 3571 struct net_device *dev, u32 filter_mask, int nlflags) 3572 { 3573 struct ice_netdev_priv *np = netdev_priv(dev); 3574 struct ice_vsi *vsi = np->vsi; 3575 struct ice_pf *pf = vsi->back; 3576 u16 bmode; 3577 3578 bmode = pf->first_sw->bridge_mode; 3579 3580 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 3581 filter_mask, NULL); 3582 } 3583 3584 /** 3585 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 3586 * @vsi: Pointer to VSI structure 3587 * @bmode: Hardware bridge mode (VEB/VEPA) 3588 * 3589 * Returns 0 on success, negative on failure 3590 */ 3591 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 3592 { 3593 struct device *dev = &vsi->back->pdev->dev; 3594 struct ice_aqc_vsi_props *vsi_props; 3595 struct ice_hw *hw = &vsi->back->hw; 3596 struct ice_vsi_ctx ctxt = { 0 }; 3597 enum ice_status status; 3598 3599 vsi_props = &vsi->info; 3600 ctxt.info = vsi->info; 3601 3602 if (bmode == BRIDGE_MODE_VEB) 3603 /* change from VEPA to VEB mode */ 3604 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3605 else 3606 /* change from VEB to VEPA mode */ 3607 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3608 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 3609 3610 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 3611 if (status) { 3612 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", 3613 bmode, status, hw->adminq.sq_last_status); 3614 return -EIO; 3615 } 3616 /* Update sw flags for book keeping */ 3617 vsi_props->sw_flags = ctxt.info.sw_flags; 3618 3619 return 0; 3620 } 3621 3622 /** 3623 * ice_bridge_setlink - Set the hardware bridge mode 3624 * @dev: the netdev being configured 3625 * @nlh: RTNL message 3626 * @flags: bridge setlink flags 3627 * @extack: netlink extended ack 3628 * 3629 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 3630 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 3631 * not already set for all VSIs connected to this switch. And also update the 3632 * unicast switch filter rules for the corresponding switch of the netdev. 3633 */ 3634 static int 3635 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 3636 u16 __always_unused flags, struct netlink_ext_ack *extack) 3637 { 3638 struct ice_netdev_priv *np = netdev_priv(dev); 3639 struct ice_pf *pf = np->vsi->back; 3640 struct nlattr *attr, *br_spec; 3641 struct ice_hw *hw = &pf->hw; 3642 enum ice_status status; 3643 struct ice_sw *pf_sw; 3644 int rem, v, err = 0; 3645 3646 pf_sw = pf->first_sw; 3647 /* find the attribute in the netlink message */ 3648 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 3649 3650 nla_for_each_nested(attr, br_spec, rem) { 3651 __u16 mode; 3652 3653 if (nla_type(attr) != IFLA_BRIDGE_MODE) 3654 continue; 3655 mode = nla_get_u16(attr); 3656 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 3657 return -EINVAL; 3658 /* Continue if bridge mode is not being flipped */ 3659 if (mode == pf_sw->bridge_mode) 3660 continue; 3661 /* Iterates through the PF VSI list and update the loopback 3662 * mode of the VSI 3663 */ 3664 ice_for_each_vsi(pf, v) { 3665 if (!pf->vsi[v]) 3666 continue; 3667 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 3668 if (err) 3669 return err; 3670 } 3671 3672 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 3673 /* Update the unicast switch filter rules for the corresponding 3674 * switch of the netdev 3675 */ 3676 status = ice_update_sw_rule_bridge_mode(hw); 3677 if (status) { 3678 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", 3679 mode, status, hw->adminq.sq_last_status); 3680 /* revert hw->evb_veb */ 3681 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 3682 return -EIO; 3683 } 3684 3685 pf_sw->bridge_mode = mode; 3686 } 3687 3688 return 0; 3689 } 3690 3691 /** 3692 * ice_tx_timeout - Respond to a Tx Hang 3693 * @netdev: network interface device structure 3694 */ 3695 static void ice_tx_timeout(struct net_device *netdev) 3696 { 3697 struct ice_netdev_priv *np = netdev_priv(netdev); 3698 struct ice_ring *tx_ring = NULL; 3699 struct ice_vsi *vsi = np->vsi; 3700 struct ice_pf *pf = vsi->back; 3701 int hung_queue = -1; 3702 u32 i; 3703 3704 pf->tx_timeout_count++; 3705 3706 /* find the stopped queue the same way dev_watchdog() does */ 3707 for (i = 0; i < netdev->num_tx_queues; i++) { 3708 unsigned long trans_start; 3709 struct netdev_queue *q; 3710 3711 q = netdev_get_tx_queue(netdev, i); 3712 trans_start = q->trans_start; 3713 if (netif_xmit_stopped(q) && 3714 time_after(jiffies, 3715 trans_start + netdev->watchdog_timeo)) { 3716 hung_queue = i; 3717 break; 3718 } 3719 } 3720 3721 if (i == netdev->num_tx_queues) 3722 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 3723 else 3724 /* now that we have an index, find the tx_ring struct */ 3725 for (i = 0; i < vsi->num_txq; i++) 3726 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 3727 if (hung_queue == vsi->tx_rings[i]->q_index) { 3728 tx_ring = vsi->tx_rings[i]; 3729 break; 3730 } 3731 3732 /* Reset recovery level if enough time has elapsed after last timeout. 3733 * Also ensure no new reset action happens before next timeout period. 3734 */ 3735 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 3736 pf->tx_timeout_recovery_level = 1; 3737 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 3738 netdev->watchdog_timeo))) 3739 return; 3740 3741 if (tx_ring) { 3742 struct ice_hw *hw = &pf->hw; 3743 u32 head, val = 0; 3744 3745 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & 3746 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 3747 /* Read interrupt register */ 3748 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3749 val = rd32(hw, 3750 GLINT_DYN_CTL(tx_ring->q_vector->v_idx + 3751 tx_ring->vsi->hw_base_vector)); 3752 3753 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 3754 vsi->vsi_num, hung_queue, tx_ring->next_to_clean, 3755 head, tx_ring->next_to_use, val); 3756 } 3757 3758 pf->tx_timeout_last_recovery = jiffies; 3759 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 3760 pf->tx_timeout_recovery_level, hung_queue); 3761 3762 switch (pf->tx_timeout_recovery_level) { 3763 case 1: 3764 set_bit(__ICE_PFR_REQ, pf->state); 3765 break; 3766 case 2: 3767 set_bit(__ICE_CORER_REQ, pf->state); 3768 break; 3769 case 3: 3770 set_bit(__ICE_GLOBR_REQ, pf->state); 3771 break; 3772 default: 3773 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 3774 set_bit(__ICE_DOWN, pf->state); 3775 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3776 set_bit(__ICE_SERVICE_DIS, pf->state); 3777 break; 3778 } 3779 3780 ice_service_task_schedule(pf); 3781 pf->tx_timeout_recovery_level++; 3782 } 3783 3784 /** 3785 * ice_open - Called when a network interface becomes active 3786 * @netdev: network interface device structure 3787 * 3788 * The open entry point is called when a network interface is made 3789 * active by the system (IFF_UP). At this point all resources needed 3790 * for transmit and receive operations are allocated, the interrupt 3791 * handler is registered with the OS, the netdev watchdog is enabled, 3792 * and the stack is notified that the interface is ready. 3793 * 3794 * Returns 0 on success, negative value on failure 3795 */ 3796 static int ice_open(struct net_device *netdev) 3797 { 3798 struct ice_netdev_priv *np = netdev_priv(netdev); 3799 struct ice_vsi *vsi = np->vsi; 3800 int err; 3801 3802 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { 3803 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 3804 return -EIO; 3805 } 3806 3807 netif_carrier_off(netdev); 3808 3809 err = ice_vsi_open(vsi); 3810 3811 if (err) 3812 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 3813 vsi->vsi_num, vsi->vsw->sw_id); 3814 return err; 3815 } 3816 3817 /** 3818 * ice_stop - Disables a network interface 3819 * @netdev: network interface device structure 3820 * 3821 * The stop entry point is called when an interface is de-activated by the OS, 3822 * and the netdevice enters the DOWN state. The hardware is still under the 3823 * driver's control, but the netdev interface is disabled. 3824 * 3825 * Returns success only - not allowed to fail 3826 */ 3827 static int ice_stop(struct net_device *netdev) 3828 { 3829 struct ice_netdev_priv *np = netdev_priv(netdev); 3830 struct ice_vsi *vsi = np->vsi; 3831 3832 ice_vsi_close(vsi); 3833 3834 return 0; 3835 } 3836 3837 /** 3838 * ice_features_check - Validate encapsulated packet conforms to limits 3839 * @skb: skb buffer 3840 * @netdev: This port's netdev 3841 * @features: Offload features that the stack believes apply 3842 */ 3843 static netdev_features_t 3844 ice_features_check(struct sk_buff *skb, 3845 struct net_device __always_unused *netdev, 3846 netdev_features_t features) 3847 { 3848 size_t len; 3849 3850 /* No point in doing any of this if neither checksum nor GSO are 3851 * being requested for this frame. We can rule out both by just 3852 * checking for CHECKSUM_PARTIAL 3853 */ 3854 if (skb->ip_summed != CHECKSUM_PARTIAL) 3855 return features; 3856 3857 /* We cannot support GSO if the MSS is going to be less than 3858 * 64 bytes. If it is then we need to drop support for GSO. 3859 */ 3860 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3861 features &= ~NETIF_F_GSO_MASK; 3862 3863 len = skb_network_header(skb) - skb->data; 3864 if (len & ~(ICE_TXD_MACLEN_MAX)) 3865 goto out_rm_features; 3866 3867 len = skb_transport_header(skb) - skb_network_header(skb); 3868 if (len & ~(ICE_TXD_IPLEN_MAX)) 3869 goto out_rm_features; 3870 3871 if (skb->encapsulation) { 3872 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3873 if (len & ~(ICE_TXD_L4LEN_MAX)) 3874 goto out_rm_features; 3875 3876 len = skb_inner_transport_header(skb) - 3877 skb_inner_network_header(skb); 3878 if (len & ~(ICE_TXD_IPLEN_MAX)) 3879 goto out_rm_features; 3880 } 3881 3882 return features; 3883 out_rm_features: 3884 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3885 } 3886 3887 static const struct net_device_ops ice_netdev_ops = { 3888 .ndo_open = ice_open, 3889 .ndo_stop = ice_stop, 3890 .ndo_start_xmit = ice_start_xmit, 3891 .ndo_features_check = ice_features_check, 3892 .ndo_set_rx_mode = ice_set_rx_mode, 3893 .ndo_set_mac_address = ice_set_mac_address, 3894 .ndo_validate_addr = eth_validate_addr, 3895 .ndo_change_mtu = ice_change_mtu, 3896 .ndo_get_stats64 = ice_get_stats64, 3897 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 3898 .ndo_set_vf_mac = ice_set_vf_mac, 3899 .ndo_get_vf_config = ice_get_vf_cfg, 3900 .ndo_set_vf_trust = ice_set_vf_trust, 3901 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 3902 .ndo_set_vf_link_state = ice_set_vf_link_state, 3903 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 3904 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 3905 .ndo_set_features = ice_set_features, 3906 .ndo_bridge_getlink = ice_bridge_getlink, 3907 .ndo_bridge_setlink = ice_bridge_setlink, 3908 .ndo_fdb_add = ice_fdb_add, 3909 .ndo_fdb_del = ice_fdb_del, 3910 .ndo_tx_timeout = ice_tx_timeout, 3911 }; 3912