1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include "ice.h" 9 #include "ice_lib.h" 10 11 #define DRV_VERSION "0.7.2-k" 12 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 13 const char ice_drv_ver[] = DRV_VERSION; 14 static const char ice_driver_string[] = DRV_SUMMARY; 15 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 16 17 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 18 MODULE_DESCRIPTION(DRV_SUMMARY); 19 MODULE_LICENSE("GPL v2"); 20 MODULE_VERSION(DRV_VERSION); 21 22 static int debug = -1; 23 module_param(debug, int, 0644); 24 #ifndef CONFIG_DYNAMIC_DEBUG 25 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 26 #else 27 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 28 #endif /* !CONFIG_DYNAMIC_DEBUG */ 29 30 static struct workqueue_struct *ice_wq; 31 static const struct net_device_ops ice_netdev_ops; 32 33 static void ice_pf_dis_all_vsi(struct ice_pf *pf); 34 static void ice_rebuild(struct ice_pf *pf); 35 36 static void ice_vsi_release_all(struct ice_pf *pf); 37 static void ice_update_vsi_stats(struct ice_vsi *vsi); 38 static void ice_update_pf_stats(struct ice_pf *pf); 39 40 /** 41 * ice_get_tx_pending - returns number of Tx descriptors not processed 42 * @ring: the ring of descriptors 43 */ 44 static u32 ice_get_tx_pending(struct ice_ring *ring) 45 { 46 u32 head, tail; 47 48 head = ring->next_to_clean; 49 tail = readl(ring->tail); 50 51 if (head != tail) 52 return (head < tail) ? 53 tail - head : (tail + ring->count - head); 54 return 0; 55 } 56 57 /** 58 * ice_check_for_hang_subtask - check for and recover hung queues 59 * @pf: pointer to PF struct 60 */ 61 static void ice_check_for_hang_subtask(struct ice_pf *pf) 62 { 63 struct ice_vsi *vsi = NULL; 64 unsigned int i; 65 u32 v, v_idx; 66 int packets; 67 68 ice_for_each_vsi(pf, v) 69 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 70 vsi = pf->vsi[v]; 71 break; 72 } 73 74 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 75 return; 76 77 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 78 return; 79 80 for (i = 0; i < vsi->num_txq; i++) { 81 struct ice_ring *tx_ring = vsi->tx_rings[i]; 82 83 if (tx_ring && tx_ring->desc) { 84 int itr = ICE_ITR_NONE; 85 86 /* If packet counter has not changed the queue is 87 * likely stalled, so force an interrupt for this 88 * queue. 89 * 90 * prev_pkt would be negative if there was no 91 * pending work. 92 */ 93 packets = tx_ring->stats.pkts & INT_MAX; 94 if (tx_ring->tx_stats.prev_pkt == packets) { 95 /* Trigger sw interrupt to revive the queue */ 96 v_idx = tx_ring->q_vector->v_idx; 97 wr32(&vsi->back->hw, 98 GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 99 (itr << GLINT_DYN_CTL_ITR_INDX_S) | 100 GLINT_DYN_CTL_SWINT_TRIG_M | 101 GLINT_DYN_CTL_INTENA_MSK_M); 102 continue; 103 } 104 105 /* Memory barrier between read of packet count and call 106 * to ice_get_tx_pending() 107 */ 108 smp_rmb(); 109 tx_ring->tx_stats.prev_pkt = 110 ice_get_tx_pending(tx_ring) ? packets : -1; 111 } 112 } 113 } 114 115 /** 116 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced 117 * @netdev: the net device on which the sync is happening 118 * @addr: mac address to sync 119 * 120 * This is a callback function which is called by the in kernel device sync 121 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 122 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 123 * mac filters from the hardware. 124 */ 125 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 126 { 127 struct ice_netdev_priv *np = netdev_priv(netdev); 128 struct ice_vsi *vsi = np->vsi; 129 130 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) 131 return -EINVAL; 132 133 return 0; 134 } 135 136 /** 137 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced 138 * @netdev: the net device on which the unsync is happening 139 * @addr: mac address to unsync 140 * 141 * This is a callback function which is called by the in kernel device unsync 142 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 143 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 144 * delete the mac filters from the hardware. 145 */ 146 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 147 { 148 struct ice_netdev_priv *np = netdev_priv(netdev); 149 struct ice_vsi *vsi = np->vsi; 150 151 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) 152 return -EINVAL; 153 154 return 0; 155 } 156 157 /** 158 * ice_vsi_fltr_changed - check if filter state changed 159 * @vsi: VSI to be checked 160 * 161 * returns true if filter state has changed, false otherwise. 162 */ 163 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 164 { 165 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || 166 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || 167 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 168 } 169 170 /** 171 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 172 * @vsi: ptr to the VSI 173 * 174 * Push any outstanding VSI filter changes through the AdminQ. 175 */ 176 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 177 { 178 struct device *dev = &vsi->back->pdev->dev; 179 struct net_device *netdev = vsi->netdev; 180 bool promisc_forced_on = false; 181 struct ice_pf *pf = vsi->back; 182 struct ice_hw *hw = &pf->hw; 183 enum ice_status status = 0; 184 u32 changed_flags = 0; 185 int err = 0; 186 187 if (!vsi->netdev) 188 return -EINVAL; 189 190 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) 191 usleep_range(1000, 2000); 192 193 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 194 vsi->current_netdev_flags = vsi->netdev->flags; 195 196 INIT_LIST_HEAD(&vsi->tmp_sync_list); 197 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 198 199 if (ice_vsi_fltr_changed(vsi)) { 200 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 201 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 202 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 203 204 /* grab the netdev's addr_list_lock */ 205 netif_addr_lock_bh(netdev); 206 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 207 ice_add_mac_to_unsync_list); 208 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 209 ice_add_mac_to_unsync_list); 210 /* our temp lists are populated. release lock */ 211 netif_addr_unlock_bh(netdev); 212 } 213 214 /* Remove mac addresses in the unsync list */ 215 status = ice_remove_mac(hw, &vsi->tmp_unsync_list); 216 ice_free_fltr_list(dev, &vsi->tmp_unsync_list); 217 if (status) { 218 netdev_err(netdev, "Failed to delete MAC filters\n"); 219 /* if we failed because of alloc failures, just bail */ 220 if (status == ICE_ERR_NO_MEMORY) { 221 err = -ENOMEM; 222 goto out; 223 } 224 } 225 226 /* Add mac addresses in the sync list */ 227 status = ice_add_mac(hw, &vsi->tmp_sync_list); 228 ice_free_fltr_list(dev, &vsi->tmp_sync_list); 229 if (status) { 230 netdev_err(netdev, "Failed to add MAC filters\n"); 231 /* If there is no more space for new umac filters, vsi 232 * should go into promiscuous mode. There should be some 233 * space reserved for promiscuous filters. 234 */ 235 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 236 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, 237 vsi->state)) { 238 promisc_forced_on = true; 239 netdev_warn(netdev, 240 "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 241 vsi->vsi_num); 242 } else { 243 err = -EIO; 244 goto out; 245 } 246 } 247 /* check for changes in promiscuous modes */ 248 if (changed_flags & IFF_ALLMULTI) 249 netdev_warn(netdev, "Unsupported configuration\n"); 250 251 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 252 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { 253 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 254 if (vsi->current_netdev_flags & IFF_PROMISC) { 255 /* Apply TX filter rule to get traffic from VMs */ 256 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 257 ICE_FLTR_TX); 258 if (status) { 259 netdev_err(netdev, "Error setting default VSI %i tx rule\n", 260 vsi->vsi_num); 261 vsi->current_netdev_flags &= ~IFF_PROMISC; 262 err = -EIO; 263 goto out_promisc; 264 } 265 /* Apply RX filter rule to get traffic from wire */ 266 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 267 ICE_FLTR_RX); 268 if (status) { 269 netdev_err(netdev, "Error setting default VSI %i rx rule\n", 270 vsi->vsi_num); 271 vsi->current_netdev_flags &= ~IFF_PROMISC; 272 err = -EIO; 273 goto out_promisc; 274 } 275 } else { 276 /* Clear TX filter rule to stop traffic from VMs */ 277 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 278 ICE_FLTR_TX); 279 if (status) { 280 netdev_err(netdev, "Error clearing default VSI %i tx rule\n", 281 vsi->vsi_num); 282 vsi->current_netdev_flags |= IFF_PROMISC; 283 err = -EIO; 284 goto out_promisc; 285 } 286 /* Clear RX filter to remove traffic from wire */ 287 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 288 ICE_FLTR_RX); 289 if (status) { 290 netdev_err(netdev, "Error clearing default VSI %i rx rule\n", 291 vsi->vsi_num); 292 vsi->current_netdev_flags |= IFF_PROMISC; 293 err = -EIO; 294 goto out_promisc; 295 } 296 } 297 } 298 goto exit; 299 300 out_promisc: 301 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 302 goto exit; 303 out: 304 /* if something went wrong then set the changed flag so we try again */ 305 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 306 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 307 exit: 308 clear_bit(__ICE_CFG_BUSY, vsi->state); 309 return err; 310 } 311 312 /** 313 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 314 * @pf: board private structure 315 */ 316 static void ice_sync_fltr_subtask(struct ice_pf *pf) 317 { 318 int v; 319 320 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 321 return; 322 323 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 324 325 for (v = 0; v < pf->num_alloc_vsi; v++) 326 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 327 ice_vsi_sync_fltr(pf->vsi[v])) { 328 /* come back and try again later */ 329 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 330 break; 331 } 332 } 333 334 /** 335 * ice_prepare_for_reset - prep for the core to reset 336 * @pf: board private structure 337 * 338 * Inform or close all dependent features in prep for reset. 339 */ 340 static void 341 ice_prepare_for_reset(struct ice_pf *pf) 342 { 343 struct ice_hw *hw = &pf->hw; 344 345 /* Notify VFs of impending reset */ 346 if (ice_check_sq_alive(hw, &hw->mailboxq)) 347 ice_vc_notify_reset(pf); 348 349 /* disable the VSIs and their queues that are not already DOWN */ 350 ice_pf_dis_all_vsi(pf); 351 352 if (hw->port_info) 353 ice_sched_clear_port(hw->port_info); 354 355 ice_shutdown_all_ctrlq(hw); 356 357 set_bit(__ICE_PREPARED_FOR_RESET, pf->state); 358 } 359 360 /** 361 * ice_do_reset - Initiate one of many types of resets 362 * @pf: board private structure 363 * @reset_type: reset type requested 364 * before this function was called. 365 */ 366 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 367 { 368 struct device *dev = &pf->pdev->dev; 369 struct ice_hw *hw = &pf->hw; 370 371 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 372 WARN_ON(in_interrupt()); 373 374 ice_prepare_for_reset(pf); 375 376 /* trigger the reset */ 377 if (ice_reset(hw, reset_type)) { 378 dev_err(dev, "reset %d failed\n", reset_type); 379 set_bit(__ICE_RESET_FAILED, pf->state); 380 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 381 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 382 clear_bit(__ICE_PFR_REQ, pf->state); 383 clear_bit(__ICE_CORER_REQ, pf->state); 384 clear_bit(__ICE_GLOBR_REQ, pf->state); 385 return; 386 } 387 388 /* PFR is a bit of a special case because it doesn't result in an OICR 389 * interrupt. So for PFR, rebuild after the reset and clear the reset- 390 * associated state bits. 391 */ 392 if (reset_type == ICE_RESET_PFR) { 393 pf->pfr_count++; 394 ice_rebuild(pf); 395 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 396 clear_bit(__ICE_PFR_REQ, pf->state); 397 } 398 } 399 400 /** 401 * ice_reset_subtask - Set up for resetting the device and driver 402 * @pf: board private structure 403 */ 404 static void ice_reset_subtask(struct ice_pf *pf) 405 { 406 enum ice_reset_req reset_type = ICE_RESET_INVAL; 407 408 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 409 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 410 * of reset is pending and sets bits in pf->state indicating the reset 411 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set 412 * prepare for pending reset if not already (for PF software-initiated 413 * global resets the software should already be prepared for it as 414 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated 415 * by firmware or software on other PFs, that bit is not set so prepare 416 * for the reset now), poll for reset done, rebuild and return. 417 */ 418 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { 419 clear_bit(__ICE_GLOBR_RECV, pf->state); 420 clear_bit(__ICE_CORER_RECV, pf->state); 421 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) 422 ice_prepare_for_reset(pf); 423 424 /* make sure we are ready to rebuild */ 425 if (ice_check_reset(&pf->hw)) { 426 set_bit(__ICE_RESET_FAILED, pf->state); 427 } else { 428 /* done with reset. start rebuild */ 429 pf->hw.reset_ongoing = false; 430 ice_rebuild(pf); 431 /* clear bit to resume normal operations, but 432 * ICE_NEEDS_RESTART bit is set incase rebuild failed 433 */ 434 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 435 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 436 clear_bit(__ICE_PFR_REQ, pf->state); 437 clear_bit(__ICE_CORER_REQ, pf->state); 438 clear_bit(__ICE_GLOBR_REQ, pf->state); 439 } 440 441 return; 442 } 443 444 /* No pending resets to finish processing. Check for new resets */ 445 if (test_bit(__ICE_PFR_REQ, pf->state)) 446 reset_type = ICE_RESET_PFR; 447 if (test_bit(__ICE_CORER_REQ, pf->state)) 448 reset_type = ICE_RESET_CORER; 449 if (test_bit(__ICE_GLOBR_REQ, pf->state)) 450 reset_type = ICE_RESET_GLOBR; 451 /* If no valid reset type requested just return */ 452 if (reset_type == ICE_RESET_INVAL) 453 return; 454 455 /* reset if not already down or busy */ 456 if (!test_bit(__ICE_DOWN, pf->state) && 457 !test_bit(__ICE_CFG_BUSY, pf->state)) { 458 ice_do_reset(pf, reset_type); 459 } 460 } 461 462 /** 463 * ice_print_link_msg - print link up or down message 464 * @vsi: the VSI whose link status is being queried 465 * @isup: boolean for if the link is now up or down 466 */ 467 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 468 { 469 const char *speed; 470 const char *fc; 471 472 if (vsi->current_isup == isup) 473 return; 474 475 vsi->current_isup = isup; 476 477 if (!isup) { 478 netdev_info(vsi->netdev, "NIC Link is Down\n"); 479 return; 480 } 481 482 switch (vsi->port_info->phy.link_info.link_speed) { 483 case ICE_AQ_LINK_SPEED_40GB: 484 speed = "40 G"; 485 break; 486 case ICE_AQ_LINK_SPEED_25GB: 487 speed = "25 G"; 488 break; 489 case ICE_AQ_LINK_SPEED_20GB: 490 speed = "20 G"; 491 break; 492 case ICE_AQ_LINK_SPEED_10GB: 493 speed = "10 G"; 494 break; 495 case ICE_AQ_LINK_SPEED_5GB: 496 speed = "5 G"; 497 break; 498 case ICE_AQ_LINK_SPEED_2500MB: 499 speed = "2.5 G"; 500 break; 501 case ICE_AQ_LINK_SPEED_1000MB: 502 speed = "1 G"; 503 break; 504 case ICE_AQ_LINK_SPEED_100MB: 505 speed = "100 M"; 506 break; 507 default: 508 speed = "Unknown"; 509 break; 510 } 511 512 switch (vsi->port_info->fc.current_mode) { 513 case ICE_FC_FULL: 514 fc = "RX/TX"; 515 break; 516 case ICE_FC_TX_PAUSE: 517 fc = "TX"; 518 break; 519 case ICE_FC_RX_PAUSE: 520 fc = "RX"; 521 break; 522 default: 523 fc = "Unknown"; 524 break; 525 } 526 527 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", 528 speed, fc); 529 } 530 531 /** 532 * ice_vsi_link_event - update the vsi's netdev 533 * @vsi: the vsi on which the link event occurred 534 * @link_up: whether or not the vsi needs to be set up or down 535 */ 536 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 537 { 538 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 539 return; 540 541 if (vsi->type == ICE_VSI_PF) { 542 if (!vsi->netdev) { 543 dev_dbg(&vsi->back->pdev->dev, 544 "vsi->netdev is not initialized!\n"); 545 return; 546 } 547 if (link_up) { 548 netif_carrier_on(vsi->netdev); 549 netif_tx_wake_all_queues(vsi->netdev); 550 } else { 551 netif_carrier_off(vsi->netdev); 552 netif_tx_stop_all_queues(vsi->netdev); 553 } 554 } 555 } 556 557 /** 558 * ice_link_event - process the link event 559 * @pf: pf that the link event is associated with 560 * @pi: port_info for the port that the link event is associated with 561 * 562 * Returns -EIO if ice_get_link_status() fails 563 * Returns 0 on success 564 */ 565 static int 566 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) 567 { 568 u8 new_link_speed, old_link_speed; 569 struct ice_phy_info *phy_info; 570 bool new_link_same_as_old; 571 bool new_link, old_link; 572 u8 lport; 573 u16 v; 574 575 phy_info = &pi->phy; 576 phy_info->link_info_old = phy_info->link_info; 577 /* Force ice_get_link_status() to update link info */ 578 phy_info->get_link_info = true; 579 580 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 581 old_link_speed = phy_info->link_info_old.link_speed; 582 583 lport = pi->lport; 584 if (ice_get_link_status(pi, &new_link)) { 585 dev_dbg(&pf->pdev->dev, 586 "Could not get link status for port %d\n", lport); 587 return -EIO; 588 } 589 590 new_link_speed = phy_info->link_info.link_speed; 591 592 new_link_same_as_old = (new_link == old_link && 593 new_link_speed == old_link_speed); 594 595 ice_for_each_vsi(pf, v) { 596 struct ice_vsi *vsi = pf->vsi[v]; 597 598 if (!vsi || !vsi->port_info) 599 continue; 600 601 if (new_link_same_as_old && 602 (test_bit(__ICE_DOWN, vsi->state) || 603 new_link == netif_carrier_ok(vsi->netdev))) 604 continue; 605 606 if (vsi->port_info->lport == lport) { 607 ice_print_link_msg(vsi, new_link); 608 ice_vsi_link_event(vsi, new_link); 609 } 610 } 611 612 ice_vc_notify_link_state(pf); 613 614 return 0; 615 } 616 617 /** 618 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 619 * @pf: board private structure 620 */ 621 static void ice_watchdog_subtask(struct ice_pf *pf) 622 { 623 int i; 624 625 /* if interface is down do nothing */ 626 if (test_bit(__ICE_DOWN, pf->state) || 627 test_bit(__ICE_CFG_BUSY, pf->state)) 628 return; 629 630 /* make sure we don't do these things too often */ 631 if (time_before(jiffies, 632 pf->serv_tmr_prev + pf->serv_tmr_period)) 633 return; 634 635 pf->serv_tmr_prev = jiffies; 636 637 if (ice_link_event(pf, pf->hw.port_info)) 638 dev_dbg(&pf->pdev->dev, "ice_link_event failed\n"); 639 640 /* Update the stats for active netdevs so the network stack 641 * can look at updated numbers whenever it cares to 642 */ 643 ice_update_pf_stats(pf); 644 for (i = 0; i < pf->num_alloc_vsi; i++) 645 if (pf->vsi[i] && pf->vsi[i]->netdev) 646 ice_update_vsi_stats(pf->vsi[i]); 647 } 648 649 /** 650 * __ice_clean_ctrlq - helper function to clean controlq rings 651 * @pf: ptr to struct ice_pf 652 * @q_type: specific Control queue type 653 */ 654 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 655 { 656 struct ice_rq_event_info event; 657 struct ice_hw *hw = &pf->hw; 658 struct ice_ctl_q_info *cq; 659 u16 pending, i = 0; 660 const char *qtype; 661 u32 oldval, val; 662 663 /* Do not clean control queue if/when PF reset fails */ 664 if (test_bit(__ICE_RESET_FAILED, pf->state)) 665 return 0; 666 667 switch (q_type) { 668 case ICE_CTL_Q_ADMIN: 669 cq = &hw->adminq; 670 qtype = "Admin"; 671 break; 672 case ICE_CTL_Q_MAILBOX: 673 cq = &hw->mailboxq; 674 qtype = "Mailbox"; 675 break; 676 default: 677 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", 678 q_type); 679 return 0; 680 } 681 682 /* check for error indications - PF_xx_AxQLEN register layout for 683 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 684 */ 685 val = rd32(hw, cq->rq.len); 686 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 687 PF_FW_ARQLEN_ARQCRIT_M)) { 688 oldval = val; 689 if (val & PF_FW_ARQLEN_ARQVFE_M) 690 dev_dbg(&pf->pdev->dev, 691 "%s Receive Queue VF Error detected\n", qtype); 692 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 693 dev_dbg(&pf->pdev->dev, 694 "%s Receive Queue Overflow Error detected\n", 695 qtype); 696 } 697 if (val & PF_FW_ARQLEN_ARQCRIT_M) 698 dev_dbg(&pf->pdev->dev, 699 "%s Receive Queue Critical Error detected\n", 700 qtype); 701 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 702 PF_FW_ARQLEN_ARQCRIT_M); 703 if (oldval != val) 704 wr32(hw, cq->rq.len, val); 705 } 706 707 val = rd32(hw, cq->sq.len); 708 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 709 PF_FW_ATQLEN_ATQCRIT_M)) { 710 oldval = val; 711 if (val & PF_FW_ATQLEN_ATQVFE_M) 712 dev_dbg(&pf->pdev->dev, 713 "%s Send Queue VF Error detected\n", qtype); 714 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 715 dev_dbg(&pf->pdev->dev, 716 "%s Send Queue Overflow Error detected\n", 717 qtype); 718 } 719 if (val & PF_FW_ATQLEN_ATQCRIT_M) 720 dev_dbg(&pf->pdev->dev, 721 "%s Send Queue Critical Error detected\n", 722 qtype); 723 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 724 PF_FW_ATQLEN_ATQCRIT_M); 725 if (oldval != val) 726 wr32(hw, cq->sq.len, val); 727 } 728 729 event.buf_len = cq->rq_buf_size; 730 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, 731 GFP_KERNEL); 732 if (!event.msg_buf) 733 return 0; 734 735 do { 736 enum ice_status ret; 737 u16 opcode; 738 739 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 740 if (ret == ICE_ERR_AQ_NO_WORK) 741 break; 742 if (ret) { 743 dev_err(&pf->pdev->dev, 744 "%s Receive Queue event error %d\n", qtype, 745 ret); 746 break; 747 } 748 749 opcode = le16_to_cpu(event.desc.opcode); 750 751 switch (opcode) { 752 case ice_mbx_opc_send_msg_to_pf: 753 ice_vc_process_vf_msg(pf, &event); 754 break; 755 case ice_aqc_opc_fw_logging: 756 ice_output_fw_log(hw, &event.desc, event.msg_buf); 757 break; 758 default: 759 dev_dbg(&pf->pdev->dev, 760 "%s Receive Queue unknown event 0x%04x ignored\n", 761 qtype, opcode); 762 break; 763 } 764 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 765 766 devm_kfree(&pf->pdev->dev, event.msg_buf); 767 768 return pending && (i == ICE_DFLT_IRQ_WORK); 769 } 770 771 /** 772 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 773 * @hw: pointer to hardware info 774 * @cq: control queue information 775 * 776 * returns true if there are pending messages in a queue, false if there aren't 777 */ 778 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 779 { 780 u16 ntu; 781 782 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 783 return cq->rq.next_to_clean != ntu; 784 } 785 786 /** 787 * ice_clean_adminq_subtask - clean the AdminQ rings 788 * @pf: board private structure 789 */ 790 static void ice_clean_adminq_subtask(struct ice_pf *pf) 791 { 792 struct ice_hw *hw = &pf->hw; 793 794 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 795 return; 796 797 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 798 return; 799 800 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 801 802 /* There might be a situation where new messages arrive to a control 803 * queue between processing the last message and clearing the 804 * EVENT_PENDING bit. So before exiting, check queue head again (using 805 * ice_ctrlq_pending) and process new messages if any. 806 */ 807 if (ice_ctrlq_pending(hw, &hw->adminq)) 808 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 809 810 ice_flush(hw); 811 } 812 813 /** 814 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 815 * @pf: board private structure 816 */ 817 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 818 { 819 struct ice_hw *hw = &pf->hw; 820 821 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 822 return; 823 824 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 825 return; 826 827 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 828 829 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 830 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 831 832 ice_flush(hw); 833 } 834 835 /** 836 * ice_service_task_schedule - schedule the service task to wake up 837 * @pf: board private structure 838 * 839 * If not already scheduled, this puts the task into the work queue. 840 */ 841 static void ice_service_task_schedule(struct ice_pf *pf) 842 { 843 if (!test_bit(__ICE_SERVICE_DIS, pf->state) && 844 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && 845 !test_bit(__ICE_NEEDS_RESTART, pf->state)) 846 queue_work(ice_wq, &pf->serv_task); 847 } 848 849 /** 850 * ice_service_task_complete - finish up the service task 851 * @pf: board private structure 852 */ 853 static void ice_service_task_complete(struct ice_pf *pf) 854 { 855 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); 856 857 /* force memory (pf->state) to sync before next service task */ 858 smp_mb__before_atomic(); 859 clear_bit(__ICE_SERVICE_SCHED, pf->state); 860 } 861 862 /** 863 * ice_service_task_stop - stop service task and cancel works 864 * @pf: board private structure 865 */ 866 static void ice_service_task_stop(struct ice_pf *pf) 867 { 868 set_bit(__ICE_SERVICE_DIS, pf->state); 869 870 if (pf->serv_tmr.function) 871 del_timer_sync(&pf->serv_tmr); 872 if (pf->serv_task.func) 873 cancel_work_sync(&pf->serv_task); 874 875 clear_bit(__ICE_SERVICE_SCHED, pf->state); 876 } 877 878 /** 879 * ice_service_timer - timer callback to schedule service task 880 * @t: pointer to timer_list 881 */ 882 static void ice_service_timer(struct timer_list *t) 883 { 884 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 885 886 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 887 ice_service_task_schedule(pf); 888 } 889 890 /** 891 * ice_handle_mdd_event - handle malicious driver detect event 892 * @pf: pointer to the PF structure 893 * 894 * Called from service task. OICR interrupt handler indicates MDD event 895 */ 896 static void ice_handle_mdd_event(struct ice_pf *pf) 897 { 898 struct ice_hw *hw = &pf->hw; 899 bool mdd_detected = false; 900 u32 reg; 901 int i; 902 903 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) 904 return; 905 906 /* find what triggered the MDD event */ 907 reg = rd32(hw, GL_MDET_TX_PQM); 908 if (reg & GL_MDET_TX_PQM_VALID_M) { 909 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 910 GL_MDET_TX_PQM_PF_NUM_S; 911 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 912 GL_MDET_TX_PQM_VF_NUM_S; 913 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 914 GL_MDET_TX_PQM_MAL_TYPE_S; 915 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 916 GL_MDET_TX_PQM_QNUM_S); 917 918 if (netif_msg_tx_err(pf)) 919 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 920 event, queue, pf_num, vf_num); 921 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 922 mdd_detected = true; 923 } 924 925 reg = rd32(hw, GL_MDET_TX_TCLAN); 926 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 927 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 928 GL_MDET_TX_TCLAN_PF_NUM_S; 929 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 930 GL_MDET_TX_TCLAN_VF_NUM_S; 931 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 932 GL_MDET_TX_TCLAN_MAL_TYPE_S; 933 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 934 GL_MDET_TX_TCLAN_QNUM_S); 935 936 if (netif_msg_rx_err(pf)) 937 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 938 event, queue, pf_num, vf_num); 939 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 940 mdd_detected = true; 941 } 942 943 reg = rd32(hw, GL_MDET_RX); 944 if (reg & GL_MDET_RX_VALID_M) { 945 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 946 GL_MDET_RX_PF_NUM_S; 947 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 948 GL_MDET_RX_VF_NUM_S; 949 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 950 GL_MDET_RX_MAL_TYPE_S; 951 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 952 GL_MDET_RX_QNUM_S); 953 954 if (netif_msg_rx_err(pf)) 955 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 956 event, queue, pf_num, vf_num); 957 wr32(hw, GL_MDET_RX, 0xffffffff); 958 mdd_detected = true; 959 } 960 961 if (mdd_detected) { 962 bool pf_mdd_detected = false; 963 964 reg = rd32(hw, PF_MDET_TX_PQM); 965 if (reg & PF_MDET_TX_PQM_VALID_M) { 966 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 967 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 968 pf_mdd_detected = true; 969 } 970 971 reg = rd32(hw, PF_MDET_TX_TCLAN); 972 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 973 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 974 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 975 pf_mdd_detected = true; 976 } 977 978 reg = rd32(hw, PF_MDET_RX); 979 if (reg & PF_MDET_RX_VALID_M) { 980 wr32(hw, PF_MDET_RX, 0xFFFF); 981 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 982 pf_mdd_detected = true; 983 } 984 /* Queue belongs to the PF initiate a reset */ 985 if (pf_mdd_detected) { 986 set_bit(__ICE_NEEDS_RESTART, pf->state); 987 ice_service_task_schedule(pf); 988 } 989 } 990 991 /* see if one of the VFs needs to be reset */ 992 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 993 struct ice_vf *vf = &pf->vf[i]; 994 995 reg = rd32(hw, VP_MDET_TX_PQM(i)); 996 if (reg & VP_MDET_TX_PQM_VALID_M) { 997 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 998 vf->num_mdd_events++; 999 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1000 i); 1001 } 1002 1003 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1004 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1005 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1006 vf->num_mdd_events++; 1007 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1008 i); 1009 } 1010 1011 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1012 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1013 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1014 vf->num_mdd_events++; 1015 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1016 i); 1017 } 1018 1019 reg = rd32(hw, VP_MDET_RX(i)); 1020 if (reg & VP_MDET_RX_VALID_M) { 1021 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1022 vf->num_mdd_events++; 1023 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 1024 i); 1025 } 1026 1027 if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { 1028 dev_info(&pf->pdev->dev, 1029 "Too many MDD events on VF %d, disabled\n", i); 1030 dev_info(&pf->pdev->dev, 1031 "Use PF Control I/F to re-enable the VF\n"); 1032 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 1033 } 1034 } 1035 1036 /* re-enable MDD interrupt cause */ 1037 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1038 reg = rd32(hw, PFINT_OICR_ENA); 1039 reg |= PFINT_OICR_MAL_DETECT_M; 1040 wr32(hw, PFINT_OICR_ENA, reg); 1041 ice_flush(hw); 1042 } 1043 1044 /** 1045 * ice_service_task - manage and run subtasks 1046 * @work: pointer to work_struct contained by the PF struct 1047 */ 1048 static void ice_service_task(struct work_struct *work) 1049 { 1050 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 1051 unsigned long start_time = jiffies; 1052 1053 /* subtasks */ 1054 1055 /* process reset requests first */ 1056 ice_reset_subtask(pf); 1057 1058 /* bail if a reset/recovery cycle is pending or rebuild failed */ 1059 if (ice_is_reset_in_progress(pf->state) || 1060 test_bit(__ICE_SUSPENDED, pf->state) || 1061 test_bit(__ICE_NEEDS_RESTART, pf->state)) { 1062 ice_service_task_complete(pf); 1063 return; 1064 } 1065 1066 ice_check_for_hang_subtask(pf); 1067 ice_sync_fltr_subtask(pf); 1068 ice_handle_mdd_event(pf); 1069 ice_process_vflr_event(pf); 1070 ice_watchdog_subtask(pf); 1071 ice_clean_adminq_subtask(pf); 1072 ice_clean_mailboxq_subtask(pf); 1073 1074 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1075 ice_service_task_complete(pf); 1076 1077 /* If the tasks have taken longer than one service timer period 1078 * or there is more work to be done, reset the service timer to 1079 * schedule the service task now. 1080 */ 1081 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 1082 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || 1083 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || 1084 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 1085 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1086 mod_timer(&pf->serv_tmr, jiffies); 1087 } 1088 1089 /** 1090 * ice_set_ctrlq_len - helper function to set controlq length 1091 * @hw: pointer to the hw instance 1092 */ 1093 static void ice_set_ctrlq_len(struct ice_hw *hw) 1094 { 1095 hw->adminq.num_rq_entries = ICE_AQ_LEN; 1096 hw->adminq.num_sq_entries = ICE_AQ_LEN; 1097 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 1098 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 1099 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 1100 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 1101 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1102 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1103 } 1104 1105 /** 1106 * ice_irq_affinity_notify - Callback for affinity changes 1107 * @notify: context as to what irq was changed 1108 * @mask: the new affinity mask 1109 * 1110 * This is a callback function used by the irq_set_affinity_notifier function 1111 * so that we may register to receive changes to the irq affinity masks. 1112 */ 1113 static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, 1114 const cpumask_t *mask) 1115 { 1116 struct ice_q_vector *q_vector = 1117 container_of(notify, struct ice_q_vector, affinity_notify); 1118 1119 cpumask_copy(&q_vector->affinity_mask, mask); 1120 } 1121 1122 /** 1123 * ice_irq_affinity_release - Callback for affinity notifier release 1124 * @ref: internal core kernel usage 1125 * 1126 * This is a callback function used by the irq_set_affinity_notifier function 1127 * to inform the current notification subscriber that they will no longer 1128 * receive notifications. 1129 */ 1130 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 1131 1132 /** 1133 * ice_vsi_ena_irq - Enable IRQ for the given VSI 1134 * @vsi: the VSI being configured 1135 */ 1136 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 1137 { 1138 struct ice_pf *pf = vsi->back; 1139 struct ice_hw *hw = &pf->hw; 1140 1141 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 1142 int i; 1143 1144 for (i = 0; i < vsi->num_q_vectors; i++) 1145 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 1146 } 1147 1148 ice_flush(hw); 1149 return 0; 1150 } 1151 1152 /** 1153 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 1154 * @vsi: the VSI being configured 1155 * @basename: name for the vector 1156 */ 1157 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 1158 { 1159 int q_vectors = vsi->num_q_vectors; 1160 struct ice_pf *pf = vsi->back; 1161 int base = vsi->sw_base_vector; 1162 int rx_int_idx = 0; 1163 int tx_int_idx = 0; 1164 int vector, err; 1165 int irq_num; 1166 1167 for (vector = 0; vector < q_vectors; vector++) { 1168 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 1169 1170 irq_num = pf->msix_entries[base + vector].vector; 1171 1172 if (q_vector->tx.ring && q_vector->rx.ring) { 1173 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1174 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 1175 tx_int_idx++; 1176 } else if (q_vector->rx.ring) { 1177 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1178 "%s-%s-%d", basename, "rx", rx_int_idx++); 1179 } else if (q_vector->tx.ring) { 1180 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1181 "%s-%s-%d", basename, "tx", tx_int_idx++); 1182 } else { 1183 /* skip this unused q_vector */ 1184 continue; 1185 } 1186 err = devm_request_irq(&pf->pdev->dev, 1187 pf->msix_entries[base + vector].vector, 1188 vsi->irq_handler, 0, q_vector->name, 1189 q_vector); 1190 if (err) { 1191 netdev_err(vsi->netdev, 1192 "MSIX request_irq failed, error: %d\n", err); 1193 goto free_q_irqs; 1194 } 1195 1196 /* register for affinity change notifications */ 1197 q_vector->affinity_notify.notify = ice_irq_affinity_notify; 1198 q_vector->affinity_notify.release = ice_irq_affinity_release; 1199 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 1200 1201 /* assign the mask for this irq */ 1202 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 1203 } 1204 1205 vsi->irqs_ready = true; 1206 return 0; 1207 1208 free_q_irqs: 1209 while (vector) { 1210 vector--; 1211 irq_num = pf->msix_entries[base + vector].vector, 1212 irq_set_affinity_notifier(irq_num, NULL); 1213 irq_set_affinity_hint(irq_num, NULL); 1214 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); 1215 } 1216 return err; 1217 } 1218 1219 /** 1220 * ice_ena_misc_vector - enable the non-queue interrupts 1221 * @pf: board private structure 1222 */ 1223 static void ice_ena_misc_vector(struct ice_pf *pf) 1224 { 1225 struct ice_hw *hw = &pf->hw; 1226 u32 val; 1227 1228 /* clear things first */ 1229 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1230 rd32(hw, PFINT_OICR); /* read to clear */ 1231 1232 val = (PFINT_OICR_ECC_ERR_M | 1233 PFINT_OICR_MAL_DETECT_M | 1234 PFINT_OICR_GRST_M | 1235 PFINT_OICR_PCI_EXCEPTION_M | 1236 PFINT_OICR_VFLR_M | 1237 PFINT_OICR_HMC_ERR_M | 1238 PFINT_OICR_PE_CRITERR_M); 1239 1240 wr32(hw, PFINT_OICR_ENA, val); 1241 1242 /* SW_ITR_IDX = 0, but don't change INTENA */ 1243 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), 1244 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 1245 } 1246 1247 /** 1248 * ice_misc_intr - misc interrupt handler 1249 * @irq: interrupt number 1250 * @data: pointer to a q_vector 1251 */ 1252 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 1253 { 1254 struct ice_pf *pf = (struct ice_pf *)data; 1255 struct ice_hw *hw = &pf->hw; 1256 irqreturn_t ret = IRQ_NONE; 1257 u32 oicr, ena_mask; 1258 1259 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 1260 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1261 1262 oicr = rd32(hw, PFINT_OICR); 1263 ena_mask = rd32(hw, PFINT_OICR_ENA); 1264 1265 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1266 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 1267 set_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1268 } 1269 if (oicr & PFINT_OICR_VFLR_M) { 1270 ena_mask &= ~PFINT_OICR_VFLR_M; 1271 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); 1272 } 1273 1274 if (oicr & PFINT_OICR_GRST_M) { 1275 u32 reset; 1276 1277 /* we have a reset warning */ 1278 ena_mask &= ~PFINT_OICR_GRST_M; 1279 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 1280 GLGEN_RSTAT_RESET_TYPE_S; 1281 1282 if (reset == ICE_RESET_CORER) 1283 pf->corer_count++; 1284 else if (reset == ICE_RESET_GLOBR) 1285 pf->globr_count++; 1286 else if (reset == ICE_RESET_EMPR) 1287 pf->empr_count++; 1288 else 1289 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", 1290 reset); 1291 1292 /* If a reset cycle isn't already in progress, we set a bit in 1293 * pf->state so that the service task can start a reset/rebuild. 1294 * We also make note of which reset happened so that peer 1295 * devices/drivers can be informed. 1296 */ 1297 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { 1298 if (reset == ICE_RESET_CORER) 1299 set_bit(__ICE_CORER_RECV, pf->state); 1300 else if (reset == ICE_RESET_GLOBR) 1301 set_bit(__ICE_GLOBR_RECV, pf->state); 1302 else 1303 set_bit(__ICE_EMPR_RECV, pf->state); 1304 1305 /* There are couple of different bits at play here. 1306 * hw->reset_ongoing indicates whether the hardware is 1307 * in reset. This is set to true when a reset interrupt 1308 * is received and set back to false after the driver 1309 * has determined that the hardware is out of reset. 1310 * 1311 * __ICE_RESET_OICR_RECV in pf->state indicates 1312 * that a post reset rebuild is required before the 1313 * driver is operational again. This is set above. 1314 * 1315 * As this is the start of the reset/rebuild cycle, set 1316 * both to indicate that. 1317 */ 1318 hw->reset_ongoing = true; 1319 } 1320 } 1321 1322 if (oicr & PFINT_OICR_HMC_ERR_M) { 1323 ena_mask &= ~PFINT_OICR_HMC_ERR_M; 1324 dev_dbg(&pf->pdev->dev, 1325 "HMC Error interrupt - info 0x%x, data 0x%x\n", 1326 rd32(hw, PFHMC_ERRORINFO), 1327 rd32(hw, PFHMC_ERRORDATA)); 1328 } 1329 1330 /* Report and mask off any remaining unexpected interrupts */ 1331 oicr &= ena_mask; 1332 if (oicr) { 1333 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", 1334 oicr); 1335 /* If a critical error is pending there is no choice but to 1336 * reset the device. 1337 */ 1338 if (oicr & (PFINT_OICR_PE_CRITERR_M | 1339 PFINT_OICR_PCI_EXCEPTION_M | 1340 PFINT_OICR_ECC_ERR_M)) { 1341 set_bit(__ICE_PFR_REQ, pf->state); 1342 ice_service_task_schedule(pf); 1343 } 1344 ena_mask &= ~oicr; 1345 } 1346 ret = IRQ_HANDLED; 1347 1348 /* re-enable interrupt causes that are not handled during this pass */ 1349 wr32(hw, PFINT_OICR_ENA, ena_mask); 1350 if (!test_bit(__ICE_DOWN, pf->state)) { 1351 ice_service_task_schedule(pf); 1352 ice_irq_dynamic_ena(hw, NULL, NULL); 1353 } 1354 1355 return ret; 1356 } 1357 1358 /** 1359 * ice_free_irq_msix_misc - Unroll misc vector setup 1360 * @pf: board private structure 1361 */ 1362 static void ice_free_irq_msix_misc(struct ice_pf *pf) 1363 { 1364 /* disable OICR interrupt */ 1365 wr32(&pf->hw, PFINT_OICR_ENA, 0); 1366 ice_flush(&pf->hw); 1367 1368 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { 1369 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); 1370 devm_free_irq(&pf->pdev->dev, 1371 pf->msix_entries[pf->sw_oicr_idx].vector, pf); 1372 } 1373 1374 pf->num_avail_sw_msix += 1; 1375 ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); 1376 pf->num_avail_hw_msix += 1; 1377 ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); 1378 } 1379 1380 /** 1381 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 1382 * @pf: board private structure 1383 * 1384 * This sets up the handler for MSIX 0, which is used to manage the 1385 * non-queue interrupts, e.g. AdminQ and errors. This is not used 1386 * when in MSI or Legacy interrupt mode. 1387 */ 1388 static int ice_req_irq_msix_misc(struct ice_pf *pf) 1389 { 1390 struct ice_hw *hw = &pf->hw; 1391 int oicr_idx, err = 0; 1392 u8 itr_gran; 1393 u32 val; 1394 1395 if (!pf->int_name[0]) 1396 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 1397 dev_driver_string(&pf->pdev->dev), 1398 dev_name(&pf->pdev->dev)); 1399 1400 /* Do not request IRQ but do enable OICR interrupt since settings are 1401 * lost during reset. Note that this function is called only during 1402 * rebuild path and not while reset is in progress. 1403 */ 1404 if (ice_is_reset_in_progress(pf->state)) 1405 goto skip_req_irq; 1406 1407 /* reserve one vector in sw_irq_tracker for misc interrupts */ 1408 oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1409 if (oicr_idx < 0) 1410 return oicr_idx; 1411 1412 pf->num_avail_sw_msix -= 1; 1413 pf->sw_oicr_idx = oicr_idx; 1414 1415 /* reserve one vector in hw_irq_tracker for misc interrupts */ 1416 oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1417 if (oicr_idx < 0) { 1418 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1419 pf->num_avail_sw_msix += 1; 1420 return oicr_idx; 1421 } 1422 pf->num_avail_hw_msix -= 1; 1423 pf->hw_oicr_idx = oicr_idx; 1424 1425 err = devm_request_irq(&pf->pdev->dev, 1426 pf->msix_entries[pf->sw_oicr_idx].vector, 1427 ice_misc_intr, 0, pf->int_name, pf); 1428 if (err) { 1429 dev_err(&pf->pdev->dev, 1430 "devm_request_irq for %s failed: %d\n", 1431 pf->int_name, err); 1432 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1433 pf->num_avail_sw_msix += 1; 1434 ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1435 pf->num_avail_hw_msix += 1; 1436 return err; 1437 } 1438 1439 skip_req_irq: 1440 ice_ena_misc_vector(pf); 1441 1442 val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 1443 PFINT_OICR_CTL_CAUSE_ENA_M); 1444 wr32(hw, PFINT_OICR_CTL, val); 1445 1446 /* This enables Admin queue Interrupt causes */ 1447 val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 1448 PFINT_FW_CTL_CAUSE_ENA_M); 1449 wr32(hw, PFINT_FW_CTL, val); 1450 1451 /* This enables Mailbox queue Interrupt causes */ 1452 val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 1453 PFINT_MBX_CTL_CAUSE_ENA_M); 1454 wr32(hw, PFINT_MBX_CTL, val); 1455 1456 itr_gran = hw->itr_gran; 1457 1458 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), 1459 ITR_TO_REG(ICE_ITR_8K, itr_gran)); 1460 1461 ice_flush(hw); 1462 ice_irq_dynamic_ena(hw, NULL, NULL); 1463 1464 return 0; 1465 } 1466 1467 /** 1468 * ice_napi_del - Remove NAPI handler for the VSI 1469 * @vsi: VSI for which NAPI handler is to be removed 1470 */ 1471 void ice_napi_del(struct ice_vsi *vsi) 1472 { 1473 int v_idx; 1474 1475 if (!vsi->netdev) 1476 return; 1477 1478 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1479 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 1480 } 1481 1482 /** 1483 * ice_napi_add - register NAPI handler for the VSI 1484 * @vsi: VSI for which NAPI handler is to be registered 1485 * 1486 * This function is only called in the driver's load path. Registering the NAPI 1487 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 1488 * reset/rebuild, etc.) 1489 */ 1490 static void ice_napi_add(struct ice_vsi *vsi) 1491 { 1492 int v_idx; 1493 1494 if (!vsi->netdev) 1495 return; 1496 1497 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1498 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 1499 ice_napi_poll, NAPI_POLL_WEIGHT); 1500 } 1501 1502 /** 1503 * ice_cfg_netdev - Allocate, configure and register a netdev 1504 * @vsi: the VSI associated with the new netdev 1505 * 1506 * Returns 0 on success, negative value on failure 1507 */ 1508 static int ice_cfg_netdev(struct ice_vsi *vsi) 1509 { 1510 netdev_features_t csumo_features; 1511 netdev_features_t vlano_features; 1512 netdev_features_t dflt_features; 1513 netdev_features_t tso_features; 1514 struct ice_netdev_priv *np; 1515 struct net_device *netdev; 1516 u8 mac_addr[ETH_ALEN]; 1517 int err; 1518 1519 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), 1520 vsi->alloc_txq, vsi->alloc_rxq); 1521 if (!netdev) 1522 return -ENOMEM; 1523 1524 vsi->netdev = netdev; 1525 np = netdev_priv(netdev); 1526 np->vsi = vsi; 1527 1528 dflt_features = NETIF_F_SG | 1529 NETIF_F_HIGHDMA | 1530 NETIF_F_RXHASH; 1531 1532 csumo_features = NETIF_F_RXCSUM | 1533 NETIF_F_IP_CSUM | 1534 NETIF_F_IPV6_CSUM; 1535 1536 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 1537 NETIF_F_HW_VLAN_CTAG_TX | 1538 NETIF_F_HW_VLAN_CTAG_RX; 1539 1540 tso_features = NETIF_F_TSO; 1541 1542 /* set features that user can change */ 1543 netdev->hw_features = dflt_features | csumo_features | 1544 vlano_features | tso_features; 1545 1546 /* enable features */ 1547 netdev->features |= netdev->hw_features; 1548 /* encap and VLAN devices inherit default, csumo and tso features */ 1549 netdev->hw_enc_features |= dflt_features | csumo_features | 1550 tso_features; 1551 netdev->vlan_features |= dflt_features | csumo_features | 1552 tso_features; 1553 1554 if (vsi->type == ICE_VSI_PF) { 1555 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); 1556 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 1557 1558 ether_addr_copy(netdev->dev_addr, mac_addr); 1559 ether_addr_copy(netdev->perm_addr, mac_addr); 1560 } 1561 1562 netdev->priv_flags |= IFF_UNICAST_FLT; 1563 1564 /* assign netdev_ops */ 1565 netdev->netdev_ops = &ice_netdev_ops; 1566 1567 /* setup watchdog timeout value to be 5 second */ 1568 netdev->watchdog_timeo = 5 * HZ; 1569 1570 ice_set_ethtool_ops(netdev); 1571 1572 netdev->min_mtu = ETH_MIN_MTU; 1573 netdev->max_mtu = ICE_MAX_MTU; 1574 1575 err = register_netdev(vsi->netdev); 1576 if (err) 1577 return err; 1578 1579 netif_carrier_off(vsi->netdev); 1580 1581 /* make sure transmit queues start off as stopped */ 1582 netif_tx_stop_all_queues(vsi->netdev); 1583 1584 return 0; 1585 } 1586 1587 /** 1588 * ice_fill_rss_lut - Fill the RSS lookup table with default values 1589 * @lut: Lookup table 1590 * @rss_table_size: Lookup table size 1591 * @rss_size: Range of queue number for hashing 1592 */ 1593 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 1594 { 1595 u16 i; 1596 1597 for (i = 0; i < rss_table_size; i++) 1598 lut[i] = i % rss_size; 1599 } 1600 1601 /** 1602 * ice_pf_vsi_setup - Set up a PF VSI 1603 * @pf: board private structure 1604 * @pi: pointer to the port_info instance 1605 * 1606 * Returns pointer to the successfully allocated VSI sw struct on success, 1607 * otherwise returns NULL on failure. 1608 */ 1609 static struct ice_vsi * 1610 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 1611 { 1612 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 1613 } 1614 1615 /** 1616 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload 1617 * @netdev: network interface to be adjusted 1618 * @proto: unused protocol 1619 * @vid: vlan id to be added 1620 * 1621 * net_device_ops implementation for adding vlan ids 1622 */ 1623 static int ice_vlan_rx_add_vid(struct net_device *netdev, 1624 __always_unused __be16 proto, u16 vid) 1625 { 1626 struct ice_netdev_priv *np = netdev_priv(netdev); 1627 struct ice_vsi *vsi = np->vsi; 1628 1629 if (vid >= VLAN_N_VID) { 1630 netdev_err(netdev, "VLAN id requested %d is out of range %d\n", 1631 vid, VLAN_N_VID); 1632 return -EINVAL; 1633 } 1634 1635 if (vsi->info.pvid) 1636 return -EINVAL; 1637 1638 /* Enable VLAN pruning when VLAN 0 is added */ 1639 if (unlikely(!vid)) { 1640 int ret = ice_cfg_vlan_pruning(vsi, true); 1641 1642 if (ret) 1643 return ret; 1644 } 1645 1646 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is 1647 * needed to continue allowing all untagged packets since VLAN prune 1648 * list is applied to all packets by the switch 1649 */ 1650 return ice_vsi_add_vlan(vsi, vid); 1651 } 1652 1653 /** 1654 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1655 * @netdev: network interface to be adjusted 1656 * @proto: unused protocol 1657 * @vid: vlan id to be removed 1658 * 1659 * net_device_ops implementation for removing vlan ids 1660 */ 1661 static int ice_vlan_rx_kill_vid(struct net_device *netdev, 1662 __always_unused __be16 proto, u16 vid) 1663 { 1664 struct ice_netdev_priv *np = netdev_priv(netdev); 1665 struct ice_vsi *vsi = np->vsi; 1666 int status; 1667 1668 if (vsi->info.pvid) 1669 return -EINVAL; 1670 1671 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 1672 * information 1673 */ 1674 status = ice_vsi_kill_vlan(vsi, vid); 1675 if (status) 1676 return status; 1677 1678 /* Disable VLAN pruning when VLAN 0 is removed */ 1679 if (unlikely(!vid)) 1680 status = ice_cfg_vlan_pruning(vsi, false); 1681 1682 return status; 1683 } 1684 1685 /** 1686 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 1687 * @pf: board private structure 1688 * 1689 * Returns 0 on success, negative value on failure 1690 */ 1691 static int ice_setup_pf_sw(struct ice_pf *pf) 1692 { 1693 LIST_HEAD(tmp_add_list); 1694 u8 broadcast[ETH_ALEN]; 1695 struct ice_vsi *vsi; 1696 int status = 0; 1697 1698 if (ice_is_reset_in_progress(pf->state)) 1699 return -EBUSY; 1700 1701 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 1702 if (!vsi) { 1703 status = -ENOMEM; 1704 goto unroll_vsi_setup; 1705 } 1706 1707 status = ice_cfg_netdev(vsi); 1708 if (status) { 1709 status = -ENODEV; 1710 goto unroll_vsi_setup; 1711 } 1712 1713 /* registering the NAPI handler requires both the queues and 1714 * netdev to be created, which are done in ice_pf_vsi_setup() 1715 * and ice_cfg_netdev() respectively 1716 */ 1717 ice_napi_add(vsi); 1718 1719 /* To add a MAC filter, first add the MAC to a list and then 1720 * pass the list to ice_add_mac. 1721 */ 1722 1723 /* Add a unicast MAC filter so the VSI can get its packets */ 1724 status = ice_add_mac_to_list(vsi, &tmp_add_list, 1725 vsi->port_info->mac.perm_addr); 1726 if (status) 1727 goto unroll_napi_add; 1728 1729 /* VSI needs to receive broadcast traffic, so add the broadcast 1730 * MAC address to the list as well. 1731 */ 1732 eth_broadcast_addr(broadcast); 1733 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); 1734 if (status) 1735 goto free_mac_list; 1736 1737 /* program MAC filters for entries in tmp_add_list */ 1738 status = ice_add_mac(&pf->hw, &tmp_add_list); 1739 if (status) { 1740 dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); 1741 status = -ENOMEM; 1742 goto free_mac_list; 1743 } 1744 1745 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1746 return status; 1747 1748 free_mac_list: 1749 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1750 1751 unroll_napi_add: 1752 if (vsi) { 1753 ice_napi_del(vsi); 1754 if (vsi->netdev) { 1755 if (vsi->netdev->reg_state == NETREG_REGISTERED) 1756 unregister_netdev(vsi->netdev); 1757 free_netdev(vsi->netdev); 1758 vsi->netdev = NULL; 1759 } 1760 } 1761 1762 unroll_vsi_setup: 1763 if (vsi) { 1764 ice_vsi_free_q_vectors(vsi); 1765 ice_vsi_delete(vsi); 1766 ice_vsi_put_qs(vsi); 1767 pf->q_left_tx += vsi->alloc_txq; 1768 pf->q_left_rx += vsi->alloc_rxq; 1769 ice_vsi_clear(vsi); 1770 } 1771 return status; 1772 } 1773 1774 /** 1775 * ice_determine_q_usage - Calculate queue distribution 1776 * @pf: board private structure 1777 * 1778 * Return -ENOMEM if we don't get enough queues for all ports 1779 */ 1780 static void ice_determine_q_usage(struct ice_pf *pf) 1781 { 1782 u16 q_left_tx, q_left_rx; 1783 1784 q_left_tx = pf->hw.func_caps.common_cap.num_txq; 1785 q_left_rx = pf->hw.func_caps.common_cap.num_rxq; 1786 1787 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); 1788 1789 /* only 1 Rx queue unless RSS is enabled */ 1790 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 1791 pf->num_lan_rx = 1; 1792 else 1793 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); 1794 1795 pf->q_left_tx = q_left_tx - pf->num_lan_tx; 1796 pf->q_left_rx = q_left_rx - pf->num_lan_rx; 1797 } 1798 1799 /** 1800 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 1801 * @pf: board private structure to initialize 1802 */ 1803 static void ice_deinit_pf(struct ice_pf *pf) 1804 { 1805 ice_service_task_stop(pf); 1806 mutex_destroy(&pf->sw_mutex); 1807 mutex_destroy(&pf->avail_q_mutex); 1808 } 1809 1810 /** 1811 * ice_init_pf - Initialize general software structures (struct ice_pf) 1812 * @pf: board private structure to initialize 1813 */ 1814 static void ice_init_pf(struct ice_pf *pf) 1815 { 1816 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); 1817 set_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1818 #ifdef CONFIG_PCI_IOV 1819 if (pf->hw.func_caps.common_cap.sr_iov_1_1) { 1820 struct ice_hw *hw = &pf->hw; 1821 1822 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 1823 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, 1824 ICE_MAX_VF_COUNT); 1825 } 1826 #endif /* CONFIG_PCI_IOV */ 1827 1828 mutex_init(&pf->sw_mutex); 1829 mutex_init(&pf->avail_q_mutex); 1830 1831 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ 1832 mutex_lock(&pf->avail_q_mutex); 1833 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); 1834 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); 1835 mutex_unlock(&pf->avail_q_mutex); 1836 1837 if (pf->hw.func_caps.common_cap.rss_table_size) 1838 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 1839 1840 /* setup service timer and periodic service task */ 1841 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 1842 pf->serv_tmr_period = HZ; 1843 INIT_WORK(&pf->serv_task, ice_service_task); 1844 clear_bit(__ICE_SERVICE_SCHED, pf->state); 1845 } 1846 1847 /** 1848 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 1849 * @pf: board private structure 1850 * 1851 * compute the number of MSIX vectors required (v_budget) and request from 1852 * the OS. Return the number of vectors reserved or negative on failure 1853 */ 1854 static int ice_ena_msix_range(struct ice_pf *pf) 1855 { 1856 int v_left, v_actual, v_budget = 0; 1857 int needed, err, i; 1858 1859 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 1860 1861 /* reserve one vector for miscellaneous handler */ 1862 needed = 1; 1863 v_budget += needed; 1864 v_left -= needed; 1865 1866 /* reserve vectors for LAN traffic */ 1867 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); 1868 v_budget += pf->num_lan_msix; 1869 v_left -= pf->num_lan_msix; 1870 1871 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, 1872 sizeof(struct msix_entry), GFP_KERNEL); 1873 1874 if (!pf->msix_entries) { 1875 err = -ENOMEM; 1876 goto exit_err; 1877 } 1878 1879 for (i = 0; i < v_budget; i++) 1880 pf->msix_entries[i].entry = i; 1881 1882 /* actually reserve the vectors */ 1883 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 1884 ICE_MIN_MSIX, v_budget); 1885 1886 if (v_actual < 0) { 1887 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); 1888 err = v_actual; 1889 goto msix_err; 1890 } 1891 1892 if (v_actual < v_budget) { 1893 dev_warn(&pf->pdev->dev, 1894 "not enough vectors. requested = %d, obtained = %d\n", 1895 v_budget, v_actual); 1896 if (v_actual >= (pf->num_lan_msix + 1)) { 1897 pf->num_avail_sw_msix = v_actual - 1898 (pf->num_lan_msix + 1); 1899 } else if (v_actual >= 2) { 1900 pf->num_lan_msix = 1; 1901 pf->num_avail_sw_msix = v_actual - 2; 1902 } else { 1903 pci_disable_msix(pf->pdev); 1904 err = -ERANGE; 1905 goto msix_err; 1906 } 1907 } 1908 1909 return v_actual; 1910 1911 msix_err: 1912 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1913 goto exit_err; 1914 1915 exit_err: 1916 pf->num_lan_msix = 0; 1917 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1918 return err; 1919 } 1920 1921 /** 1922 * ice_dis_msix - Disable MSI-X interrupt setup in OS 1923 * @pf: board private structure 1924 */ 1925 static void ice_dis_msix(struct ice_pf *pf) 1926 { 1927 pci_disable_msix(pf->pdev); 1928 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1929 pf->msix_entries = NULL; 1930 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1931 } 1932 1933 /** 1934 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 1935 * @pf: board private structure 1936 */ 1937 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 1938 { 1939 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1940 ice_dis_msix(pf); 1941 1942 if (pf->sw_irq_tracker) { 1943 devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); 1944 pf->sw_irq_tracker = NULL; 1945 } 1946 1947 if (pf->hw_irq_tracker) { 1948 devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); 1949 pf->hw_irq_tracker = NULL; 1950 } 1951 } 1952 1953 /** 1954 * ice_init_interrupt_scheme - Determine proper interrupt scheme 1955 * @pf: board private structure to initialize 1956 */ 1957 static int ice_init_interrupt_scheme(struct ice_pf *pf) 1958 { 1959 int vectors = 0, hw_vectors = 0; 1960 ssize_t size; 1961 1962 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1963 vectors = ice_ena_msix_range(pf); 1964 else 1965 return -ENODEV; 1966 1967 if (vectors < 0) 1968 return vectors; 1969 1970 /* set up vector assignment tracking */ 1971 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); 1972 1973 pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1974 if (!pf->sw_irq_tracker) { 1975 ice_dis_msix(pf); 1976 return -ENOMEM; 1977 } 1978 1979 /* populate SW interrupts pool with number of OS granted IRQs. */ 1980 pf->num_avail_sw_msix = vectors; 1981 pf->sw_irq_tracker->num_entries = vectors; 1982 1983 /* set up HW vector assignment tracking */ 1984 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 1985 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); 1986 1987 pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1988 if (!pf->hw_irq_tracker) { 1989 ice_clear_interrupt_scheme(pf); 1990 return -ENOMEM; 1991 } 1992 1993 /* populate HW interrupts pool with number of HW supported irqs. */ 1994 pf->num_avail_hw_msix = hw_vectors; 1995 pf->hw_irq_tracker->num_entries = hw_vectors; 1996 1997 return 0; 1998 } 1999 2000 /** 2001 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 2002 * @pf: pointer to the PF structure 2003 * 2004 * There is no error returned here because the driver should be able to handle 2005 * 128 Byte cache lines, so we only print a warning in case issues are seen, 2006 * specifically with Tx. 2007 */ 2008 static void ice_verify_cacheline_size(struct ice_pf *pf) 2009 { 2010 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 2011 dev_warn(&pf->pdev->dev, 2012 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 2013 ICE_CACHE_LINE_BYTES); 2014 } 2015 2016 /** 2017 * ice_probe - Device initialization routine 2018 * @pdev: PCI device information struct 2019 * @ent: entry in ice_pci_tbl 2020 * 2021 * Returns 0 on success, negative on failure 2022 */ 2023 static int ice_probe(struct pci_dev *pdev, 2024 const struct pci_device_id __always_unused *ent) 2025 { 2026 struct ice_pf *pf; 2027 struct ice_hw *hw; 2028 int err; 2029 2030 /* this driver uses devres, see Documentation/driver-model/devres.txt */ 2031 err = pcim_enable_device(pdev); 2032 if (err) 2033 return err; 2034 2035 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 2036 if (err) { 2037 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); 2038 return err; 2039 } 2040 2041 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); 2042 if (!pf) 2043 return -ENOMEM; 2044 2045 /* set up for high or low dma */ 2046 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2047 if (err) 2048 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2049 if (err) { 2050 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 2051 return err; 2052 } 2053 2054 pci_enable_pcie_error_reporting(pdev); 2055 pci_set_master(pdev); 2056 2057 pf->pdev = pdev; 2058 pci_set_drvdata(pdev, pf); 2059 set_bit(__ICE_DOWN, pf->state); 2060 /* Disable service task until DOWN bit is cleared */ 2061 set_bit(__ICE_SERVICE_DIS, pf->state); 2062 2063 hw = &pf->hw; 2064 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 2065 hw->back = pf; 2066 hw->vendor_id = pdev->vendor; 2067 hw->device_id = pdev->device; 2068 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2069 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2070 hw->subsystem_device_id = pdev->subsystem_device; 2071 hw->bus.device = PCI_SLOT(pdev->devfn); 2072 hw->bus.func = PCI_FUNC(pdev->devfn); 2073 ice_set_ctrlq_len(hw); 2074 2075 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 2076 2077 #ifndef CONFIG_DYNAMIC_DEBUG 2078 if (debug < -1) 2079 hw->debug_mask = debug; 2080 #endif 2081 2082 err = ice_init_hw(hw); 2083 if (err) { 2084 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); 2085 err = -EIO; 2086 goto err_exit_unroll; 2087 } 2088 2089 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", 2090 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2091 hw->api_maj_ver, hw->api_min_ver); 2092 2093 ice_init_pf(pf); 2094 2095 ice_determine_q_usage(pf); 2096 2097 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 2098 if (!pf->num_alloc_vsi) { 2099 err = -EIO; 2100 goto err_init_pf_unroll; 2101 } 2102 2103 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, 2104 sizeof(struct ice_vsi *), GFP_KERNEL); 2105 if (!pf->vsi) { 2106 err = -ENOMEM; 2107 goto err_init_pf_unroll; 2108 } 2109 2110 err = ice_init_interrupt_scheme(pf); 2111 if (err) { 2112 dev_err(&pdev->dev, 2113 "ice_init_interrupt_scheme failed: %d\n", err); 2114 err = -EIO; 2115 goto err_init_interrupt_unroll; 2116 } 2117 2118 /* Driver is mostly up */ 2119 clear_bit(__ICE_DOWN, pf->state); 2120 2121 /* In case of MSIX we are going to setup the misc vector right here 2122 * to handle admin queue events etc. In case of legacy and MSI 2123 * the misc functionality and queue processing is combined in 2124 * the same vector and that gets setup at open. 2125 */ 2126 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2127 err = ice_req_irq_msix_misc(pf); 2128 if (err) { 2129 dev_err(&pdev->dev, 2130 "setup of misc vector failed: %d\n", err); 2131 goto err_init_interrupt_unroll; 2132 } 2133 } 2134 2135 /* create switch struct for the switch element created by FW on boot */ 2136 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), 2137 GFP_KERNEL); 2138 if (!pf->first_sw) { 2139 err = -ENOMEM; 2140 goto err_msix_misc_unroll; 2141 } 2142 2143 if (hw->evb_veb) 2144 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 2145 else 2146 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 2147 2148 pf->first_sw->pf = pf; 2149 2150 /* record the sw_id available for later use */ 2151 pf->first_sw->sw_id = hw->port_info->sw_id; 2152 2153 err = ice_setup_pf_sw(pf); 2154 if (err) { 2155 dev_err(&pdev->dev, 2156 "probe failed due to setup pf switch:%d\n", err); 2157 goto err_alloc_sw_unroll; 2158 } 2159 2160 clear_bit(__ICE_SERVICE_DIS, pf->state); 2161 2162 /* since everything is good, start the service timer */ 2163 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 2164 2165 ice_verify_cacheline_size(pf); 2166 2167 return 0; 2168 2169 err_alloc_sw_unroll: 2170 set_bit(__ICE_SERVICE_DIS, pf->state); 2171 set_bit(__ICE_DOWN, pf->state); 2172 devm_kfree(&pf->pdev->dev, pf->first_sw); 2173 err_msix_misc_unroll: 2174 ice_free_irq_msix_misc(pf); 2175 err_init_interrupt_unroll: 2176 ice_clear_interrupt_scheme(pf); 2177 devm_kfree(&pdev->dev, pf->vsi); 2178 err_init_pf_unroll: 2179 ice_deinit_pf(pf); 2180 ice_deinit_hw(hw); 2181 err_exit_unroll: 2182 pci_disable_pcie_error_reporting(pdev); 2183 return err; 2184 } 2185 2186 /** 2187 * ice_remove - Device removal routine 2188 * @pdev: PCI device information struct 2189 */ 2190 static void ice_remove(struct pci_dev *pdev) 2191 { 2192 struct ice_pf *pf = pci_get_drvdata(pdev); 2193 int i; 2194 2195 if (!pf) 2196 return; 2197 2198 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 2199 if (!ice_is_reset_in_progress(pf->state)) 2200 break; 2201 msleep(100); 2202 } 2203 2204 set_bit(__ICE_DOWN, pf->state); 2205 ice_service_task_stop(pf); 2206 2207 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 2208 ice_free_vfs(pf); 2209 ice_vsi_release_all(pf); 2210 ice_free_irq_msix_misc(pf); 2211 ice_for_each_vsi(pf, i) { 2212 if (!pf->vsi[i]) 2213 continue; 2214 ice_vsi_free_q_vectors(pf->vsi[i]); 2215 } 2216 ice_clear_interrupt_scheme(pf); 2217 ice_deinit_pf(pf); 2218 ice_deinit_hw(&pf->hw); 2219 pci_disable_pcie_error_reporting(pdev); 2220 } 2221 2222 /* ice_pci_tbl - PCI Device ID Table 2223 * 2224 * Wildcard entries (PCI_ANY_ID) should come last 2225 * Last entry must be all 0s 2226 * 2227 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 2228 * Class, Class Mask, private data (not used) } 2229 */ 2230 static const struct pci_device_id ice_pci_tbl[] = { 2231 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 2232 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 2233 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 2234 /* required last entry */ 2235 { 0, } 2236 }; 2237 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 2238 2239 static struct pci_driver ice_driver = { 2240 .name = KBUILD_MODNAME, 2241 .id_table = ice_pci_tbl, 2242 .probe = ice_probe, 2243 .remove = ice_remove, 2244 .sriov_configure = ice_sriov_configure, 2245 }; 2246 2247 /** 2248 * ice_module_init - Driver registration routine 2249 * 2250 * ice_module_init is the first routine called when the driver is 2251 * loaded. All it does is register with the PCI subsystem. 2252 */ 2253 static int __init ice_module_init(void) 2254 { 2255 int status; 2256 2257 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); 2258 pr_info("%s\n", ice_copyright); 2259 2260 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 2261 if (!ice_wq) { 2262 pr_err("Failed to create workqueue\n"); 2263 return -ENOMEM; 2264 } 2265 2266 status = pci_register_driver(&ice_driver); 2267 if (status) { 2268 pr_err("failed to register pci driver, err %d\n", status); 2269 destroy_workqueue(ice_wq); 2270 } 2271 2272 return status; 2273 } 2274 module_init(ice_module_init); 2275 2276 /** 2277 * ice_module_exit - Driver exit cleanup routine 2278 * 2279 * ice_module_exit is called just before the driver is removed 2280 * from memory. 2281 */ 2282 static void __exit ice_module_exit(void) 2283 { 2284 pci_unregister_driver(&ice_driver); 2285 destroy_workqueue(ice_wq); 2286 pr_info("module unloaded\n"); 2287 } 2288 module_exit(ice_module_exit); 2289 2290 /** 2291 * ice_set_mac_address - NDO callback to set mac address 2292 * @netdev: network interface device structure 2293 * @pi: pointer to an address structure 2294 * 2295 * Returns 0 on success, negative on failure 2296 */ 2297 static int ice_set_mac_address(struct net_device *netdev, void *pi) 2298 { 2299 struct ice_netdev_priv *np = netdev_priv(netdev); 2300 struct ice_vsi *vsi = np->vsi; 2301 struct ice_pf *pf = vsi->back; 2302 struct ice_hw *hw = &pf->hw; 2303 struct sockaddr *addr = pi; 2304 enum ice_status status; 2305 LIST_HEAD(a_mac_list); 2306 LIST_HEAD(r_mac_list); 2307 u8 flags = 0; 2308 int err; 2309 u8 *mac; 2310 2311 mac = (u8 *)addr->sa_data; 2312 2313 if (!is_valid_ether_addr(mac)) 2314 return -EADDRNOTAVAIL; 2315 2316 if (ether_addr_equal(netdev->dev_addr, mac)) { 2317 netdev_warn(netdev, "already using mac %pM\n", mac); 2318 return 0; 2319 } 2320 2321 if (test_bit(__ICE_DOWN, pf->state) || 2322 ice_is_reset_in_progress(pf->state)) { 2323 netdev_err(netdev, "can't set mac %pM. device not ready\n", 2324 mac); 2325 return -EBUSY; 2326 } 2327 2328 /* When we change the mac address we also have to change the mac address 2329 * based filter rules that were created previously for the old mac 2330 * address. So first, we remove the old filter rule using ice_remove_mac 2331 * and then create a new filter rule using ice_add_mac. Note that for 2332 * both these operations, we first need to form a "list" of mac 2333 * addresses (even though in this case, we have only 1 mac address to be 2334 * added/removed) and this done using ice_add_mac_to_list. Depending on 2335 * the ensuing operation this "list" of mac addresses is either to be 2336 * added or removed from the filter. 2337 */ 2338 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); 2339 if (err) { 2340 err = -EADDRNOTAVAIL; 2341 goto free_lists; 2342 } 2343 2344 status = ice_remove_mac(hw, &r_mac_list); 2345 if (status) { 2346 err = -EADDRNOTAVAIL; 2347 goto free_lists; 2348 } 2349 2350 err = ice_add_mac_to_list(vsi, &a_mac_list, mac); 2351 if (err) { 2352 err = -EADDRNOTAVAIL; 2353 goto free_lists; 2354 } 2355 2356 status = ice_add_mac(hw, &a_mac_list); 2357 if (status) { 2358 err = -EADDRNOTAVAIL; 2359 goto free_lists; 2360 } 2361 2362 free_lists: 2363 /* free list entries */ 2364 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); 2365 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); 2366 2367 if (err) { 2368 netdev_err(netdev, "can't set mac %pM. filter update failed\n", 2369 mac); 2370 return err; 2371 } 2372 2373 /* change the netdev's mac address */ 2374 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2375 netdev_dbg(vsi->netdev, "updated mac address to %pM\n", 2376 netdev->dev_addr); 2377 2378 /* write new mac address to the firmware */ 2379 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 2380 status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 2381 if (status) { 2382 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", 2383 mac); 2384 } 2385 return 0; 2386 } 2387 2388 /** 2389 * ice_set_rx_mode - NDO callback to set the netdev filters 2390 * @netdev: network interface device structure 2391 */ 2392 static void ice_set_rx_mode(struct net_device *netdev) 2393 { 2394 struct ice_netdev_priv *np = netdev_priv(netdev); 2395 struct ice_vsi *vsi = np->vsi; 2396 2397 if (!vsi) 2398 return; 2399 2400 /* Set the flags to synchronize filters 2401 * ndo_set_rx_mode may be triggered even without a change in netdev 2402 * flags 2403 */ 2404 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 2405 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 2406 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 2407 2408 /* schedule our worker thread which will take care of 2409 * applying the new filter changes 2410 */ 2411 ice_service_task_schedule(vsi->back); 2412 } 2413 2414 /** 2415 * ice_fdb_add - add an entry to the hardware database 2416 * @ndm: the input from the stack 2417 * @tb: pointer to array of nladdr (unused) 2418 * @dev: the net device pointer 2419 * @addr: the MAC address entry being added 2420 * @vid: VLAN id 2421 * @flags: instructions from stack about fdb operation 2422 */ 2423 static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 2424 struct net_device *dev, const unsigned char *addr, 2425 u16 vid, u16 flags) 2426 { 2427 int err; 2428 2429 if (vid) { 2430 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 2431 return -EINVAL; 2432 } 2433 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 2434 netdev_err(dev, "FDB only supports static addresses\n"); 2435 return -EINVAL; 2436 } 2437 2438 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 2439 err = dev_uc_add_excl(dev, addr); 2440 else if (is_multicast_ether_addr(addr)) 2441 err = dev_mc_add_excl(dev, addr); 2442 else 2443 err = -EINVAL; 2444 2445 /* Only return duplicate errors if NLM_F_EXCL is set */ 2446 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 2447 err = 0; 2448 2449 return err; 2450 } 2451 2452 /** 2453 * ice_fdb_del - delete an entry from the hardware database 2454 * @ndm: the input from the stack 2455 * @tb: pointer to array of nladdr (unused) 2456 * @dev: the net device pointer 2457 * @addr: the MAC address entry being added 2458 * @vid: VLAN id 2459 */ 2460 static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 2461 struct net_device *dev, const unsigned char *addr, 2462 __always_unused u16 vid) 2463 { 2464 int err; 2465 2466 if (ndm->ndm_state & NUD_PERMANENT) { 2467 netdev_err(dev, "FDB only supports static addresses\n"); 2468 return -EINVAL; 2469 } 2470 2471 if (is_unicast_ether_addr(addr)) 2472 err = dev_uc_del(dev, addr); 2473 else if (is_multicast_ether_addr(addr)) 2474 err = dev_mc_del(dev, addr); 2475 else 2476 err = -EINVAL; 2477 2478 return err; 2479 } 2480 2481 /** 2482 * ice_set_features - set the netdev feature flags 2483 * @netdev: ptr to the netdev being adjusted 2484 * @features: the feature set that the stack is suggesting 2485 */ 2486 static int ice_set_features(struct net_device *netdev, 2487 netdev_features_t features) 2488 { 2489 struct ice_netdev_priv *np = netdev_priv(netdev); 2490 struct ice_vsi *vsi = np->vsi; 2491 int ret = 0; 2492 2493 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 2494 ret = ice_vsi_manage_rss_lut(vsi, true); 2495 else if (!(features & NETIF_F_RXHASH) && 2496 netdev->features & NETIF_F_RXHASH) 2497 ret = ice_vsi_manage_rss_lut(vsi, false); 2498 2499 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 2500 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2501 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2502 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 2503 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2504 ret = ice_vsi_manage_vlan_stripping(vsi, false); 2505 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 2506 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2507 ret = ice_vsi_manage_vlan_insertion(vsi); 2508 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 2509 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2510 ret = ice_vsi_manage_vlan_insertion(vsi); 2511 2512 return ret; 2513 } 2514 2515 /** 2516 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI 2517 * @vsi: VSI to setup vlan properties for 2518 */ 2519 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 2520 { 2521 int ret = 0; 2522 2523 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2524 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2525 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 2526 ret = ice_vsi_manage_vlan_insertion(vsi); 2527 2528 return ret; 2529 } 2530 2531 /** 2532 * ice_vsi_cfg - Setup the VSI 2533 * @vsi: the VSI being configured 2534 * 2535 * Return 0 on success and negative value on error 2536 */ 2537 static int ice_vsi_cfg(struct ice_vsi *vsi) 2538 { 2539 int err; 2540 2541 if (vsi->netdev) { 2542 ice_set_rx_mode(vsi->netdev); 2543 2544 err = ice_vsi_vlan_setup(vsi); 2545 2546 if (err) 2547 return err; 2548 } 2549 err = ice_vsi_cfg_txqs(vsi); 2550 if (!err) 2551 err = ice_vsi_cfg_rxqs(vsi); 2552 2553 return err; 2554 } 2555 2556 /** 2557 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 2558 * @vsi: the VSI being configured 2559 */ 2560 static void ice_napi_enable_all(struct ice_vsi *vsi) 2561 { 2562 int q_idx; 2563 2564 if (!vsi->netdev) 2565 return; 2566 2567 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { 2568 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 2569 2570 if (q_vector->rx.ring || q_vector->tx.ring) 2571 napi_enable(&q_vector->napi); 2572 } 2573 } 2574 2575 /** 2576 * ice_up_complete - Finish the last steps of bringing up a connection 2577 * @vsi: The VSI being configured 2578 * 2579 * Return 0 on success and negative value on error 2580 */ 2581 static int ice_up_complete(struct ice_vsi *vsi) 2582 { 2583 struct ice_pf *pf = vsi->back; 2584 int err; 2585 2586 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 2587 ice_vsi_cfg_msix(vsi); 2588 else 2589 return -ENOTSUPP; 2590 2591 /* Enable only Rx rings, Tx rings were enabled by the FW when the 2592 * Tx queue group list was configured and the context bits were 2593 * programmed using ice_vsi_cfg_txqs 2594 */ 2595 err = ice_vsi_start_rx_rings(vsi); 2596 if (err) 2597 return err; 2598 2599 clear_bit(__ICE_DOWN, vsi->state); 2600 ice_napi_enable_all(vsi); 2601 ice_vsi_ena_irq(vsi); 2602 2603 if (vsi->port_info && 2604 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 2605 vsi->netdev) { 2606 ice_print_link_msg(vsi, true); 2607 netif_tx_start_all_queues(vsi->netdev); 2608 netif_carrier_on(vsi->netdev); 2609 } 2610 2611 ice_service_task_schedule(pf); 2612 2613 return err; 2614 } 2615 2616 /** 2617 * ice_up - Bring the connection back up after being down 2618 * @vsi: VSI being configured 2619 */ 2620 int ice_up(struct ice_vsi *vsi) 2621 { 2622 int err; 2623 2624 err = ice_vsi_cfg(vsi); 2625 if (!err) 2626 err = ice_up_complete(vsi); 2627 2628 return err; 2629 } 2630 2631 /** 2632 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 2633 * @ring: Tx or Rx ring to read stats from 2634 * @pkts: packets stats counter 2635 * @bytes: bytes stats counter 2636 * 2637 * This function fetches stats from the ring considering the atomic operations 2638 * that needs to be performed to read u64 values in 32 bit machine. 2639 */ 2640 static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, 2641 u64 *bytes) 2642 { 2643 unsigned int start; 2644 *pkts = 0; 2645 *bytes = 0; 2646 2647 if (!ring) 2648 return; 2649 do { 2650 start = u64_stats_fetch_begin_irq(&ring->syncp); 2651 *pkts = ring->stats.pkts; 2652 *bytes = ring->stats.bytes; 2653 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 2654 } 2655 2656 /** 2657 * ice_update_vsi_ring_stats - Update VSI stats counters 2658 * @vsi: the VSI to be updated 2659 */ 2660 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 2661 { 2662 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 2663 struct ice_ring *ring; 2664 u64 pkts, bytes; 2665 int i; 2666 2667 /* reset netdev stats */ 2668 vsi_stats->tx_packets = 0; 2669 vsi_stats->tx_bytes = 0; 2670 vsi_stats->rx_packets = 0; 2671 vsi_stats->rx_bytes = 0; 2672 2673 /* reset non-netdev (extended) stats */ 2674 vsi->tx_restart = 0; 2675 vsi->tx_busy = 0; 2676 vsi->tx_linearize = 0; 2677 vsi->rx_buf_failed = 0; 2678 vsi->rx_page_failed = 0; 2679 2680 rcu_read_lock(); 2681 2682 /* update Tx rings counters */ 2683 ice_for_each_txq(vsi, i) { 2684 ring = READ_ONCE(vsi->tx_rings[i]); 2685 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2686 vsi_stats->tx_packets += pkts; 2687 vsi_stats->tx_bytes += bytes; 2688 vsi->tx_restart += ring->tx_stats.restart_q; 2689 vsi->tx_busy += ring->tx_stats.tx_busy; 2690 vsi->tx_linearize += ring->tx_stats.tx_linearize; 2691 } 2692 2693 /* update Rx rings counters */ 2694 ice_for_each_rxq(vsi, i) { 2695 ring = READ_ONCE(vsi->rx_rings[i]); 2696 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2697 vsi_stats->rx_packets += pkts; 2698 vsi_stats->rx_bytes += bytes; 2699 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 2700 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 2701 } 2702 2703 rcu_read_unlock(); 2704 } 2705 2706 /** 2707 * ice_update_vsi_stats - Update VSI stats counters 2708 * @vsi: the VSI to be updated 2709 */ 2710 static void ice_update_vsi_stats(struct ice_vsi *vsi) 2711 { 2712 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 2713 struct ice_eth_stats *cur_es = &vsi->eth_stats; 2714 struct ice_pf *pf = vsi->back; 2715 2716 if (test_bit(__ICE_DOWN, vsi->state) || 2717 test_bit(__ICE_CFG_BUSY, pf->state)) 2718 return; 2719 2720 /* get stats as recorded by Tx/Rx rings */ 2721 ice_update_vsi_ring_stats(vsi); 2722 2723 /* get VSI stats as recorded by the hardware */ 2724 ice_update_eth_stats(vsi); 2725 2726 cur_ns->tx_errors = cur_es->tx_errors; 2727 cur_ns->rx_dropped = cur_es->rx_discards; 2728 cur_ns->tx_dropped = cur_es->tx_discards; 2729 cur_ns->multicast = cur_es->rx_multicast; 2730 2731 /* update some more netdev stats if this is main VSI */ 2732 if (vsi->type == ICE_VSI_PF) { 2733 cur_ns->rx_crc_errors = pf->stats.crc_errors; 2734 cur_ns->rx_errors = pf->stats.crc_errors + 2735 pf->stats.illegal_bytes; 2736 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 2737 } 2738 } 2739 2740 /** 2741 * ice_update_pf_stats - Update PF port stats counters 2742 * @pf: PF whose stats needs to be updated 2743 */ 2744 static void ice_update_pf_stats(struct ice_pf *pf) 2745 { 2746 struct ice_hw_port_stats *prev_ps, *cur_ps; 2747 struct ice_hw *hw = &pf->hw; 2748 u8 pf_id; 2749 2750 prev_ps = &pf->stats_prev; 2751 cur_ps = &pf->stats; 2752 pf_id = hw->pf_id; 2753 2754 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), 2755 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, 2756 &cur_ps->eth.rx_bytes); 2757 2758 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), 2759 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, 2760 &cur_ps->eth.rx_unicast); 2761 2762 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), 2763 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, 2764 &cur_ps->eth.rx_multicast); 2765 2766 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), 2767 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, 2768 &cur_ps->eth.rx_broadcast); 2769 2770 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), 2771 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, 2772 &cur_ps->eth.tx_bytes); 2773 2774 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), 2775 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, 2776 &cur_ps->eth.tx_unicast); 2777 2778 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), 2779 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, 2780 &cur_ps->eth.tx_multicast); 2781 2782 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), 2783 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, 2784 &cur_ps->eth.tx_broadcast); 2785 2786 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, 2787 &prev_ps->tx_dropped_link_down, 2788 &cur_ps->tx_dropped_link_down); 2789 2790 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), 2791 pf->stat_prev_loaded, &prev_ps->rx_size_64, 2792 &cur_ps->rx_size_64); 2793 2794 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), 2795 pf->stat_prev_loaded, &prev_ps->rx_size_127, 2796 &cur_ps->rx_size_127); 2797 2798 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), 2799 pf->stat_prev_loaded, &prev_ps->rx_size_255, 2800 &cur_ps->rx_size_255); 2801 2802 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), 2803 pf->stat_prev_loaded, &prev_ps->rx_size_511, 2804 &cur_ps->rx_size_511); 2805 2806 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), 2807 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, 2808 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 2809 2810 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), 2811 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, 2812 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 2813 2814 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), 2815 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, 2816 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 2817 2818 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), 2819 pf->stat_prev_loaded, &prev_ps->tx_size_64, 2820 &cur_ps->tx_size_64); 2821 2822 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), 2823 pf->stat_prev_loaded, &prev_ps->tx_size_127, 2824 &cur_ps->tx_size_127); 2825 2826 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), 2827 pf->stat_prev_loaded, &prev_ps->tx_size_255, 2828 &cur_ps->tx_size_255); 2829 2830 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), 2831 pf->stat_prev_loaded, &prev_ps->tx_size_511, 2832 &cur_ps->tx_size_511); 2833 2834 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), 2835 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, 2836 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 2837 2838 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), 2839 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, 2840 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 2841 2842 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), 2843 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, 2844 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 2845 2846 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, 2847 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 2848 2849 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, 2850 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 2851 2852 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, 2853 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 2854 2855 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, 2856 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 2857 2858 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, 2859 &prev_ps->crc_errors, &cur_ps->crc_errors); 2860 2861 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, 2862 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 2863 2864 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, 2865 &prev_ps->mac_local_faults, 2866 &cur_ps->mac_local_faults); 2867 2868 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, 2869 &prev_ps->mac_remote_faults, 2870 &cur_ps->mac_remote_faults); 2871 2872 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, 2873 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 2874 2875 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, 2876 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 2877 2878 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, 2879 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 2880 2881 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, 2882 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 2883 2884 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, 2885 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 2886 2887 pf->stat_prev_loaded = true; 2888 } 2889 2890 /** 2891 * ice_get_stats64 - get statistics for network device structure 2892 * @netdev: network interface device structure 2893 * @stats: main device statistics structure 2894 */ 2895 static 2896 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 2897 { 2898 struct ice_netdev_priv *np = netdev_priv(netdev); 2899 struct rtnl_link_stats64 *vsi_stats; 2900 struct ice_vsi *vsi = np->vsi; 2901 2902 vsi_stats = &vsi->net_stats; 2903 2904 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) 2905 return; 2906 /* netdev packet/byte stats come from ring counter. These are obtained 2907 * by summing up ring counters (done by ice_update_vsi_ring_stats). 2908 */ 2909 ice_update_vsi_ring_stats(vsi); 2910 stats->tx_packets = vsi_stats->tx_packets; 2911 stats->tx_bytes = vsi_stats->tx_bytes; 2912 stats->rx_packets = vsi_stats->rx_packets; 2913 stats->rx_bytes = vsi_stats->rx_bytes; 2914 2915 /* The rest of the stats can be read from the hardware but instead we 2916 * just return values that the watchdog task has already obtained from 2917 * the hardware. 2918 */ 2919 stats->multicast = vsi_stats->multicast; 2920 stats->tx_errors = vsi_stats->tx_errors; 2921 stats->tx_dropped = vsi_stats->tx_dropped; 2922 stats->rx_errors = vsi_stats->rx_errors; 2923 stats->rx_dropped = vsi_stats->rx_dropped; 2924 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 2925 stats->rx_length_errors = vsi_stats->rx_length_errors; 2926 } 2927 2928 /** 2929 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 2930 * @vsi: VSI having NAPI disabled 2931 */ 2932 static void ice_napi_disable_all(struct ice_vsi *vsi) 2933 { 2934 int q_idx; 2935 2936 if (!vsi->netdev) 2937 return; 2938 2939 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { 2940 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 2941 2942 if (q_vector->rx.ring || q_vector->tx.ring) 2943 napi_disable(&q_vector->napi); 2944 } 2945 } 2946 2947 /** 2948 * ice_down - Shutdown the connection 2949 * @vsi: The VSI being stopped 2950 */ 2951 int ice_down(struct ice_vsi *vsi) 2952 { 2953 int i, tx_err, rx_err; 2954 2955 /* Caller of this function is expected to set the 2956 * vsi->state __ICE_DOWN bit 2957 */ 2958 if (vsi->netdev) { 2959 netif_carrier_off(vsi->netdev); 2960 netif_tx_disable(vsi->netdev); 2961 } 2962 2963 ice_vsi_dis_irq(vsi); 2964 tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); 2965 if (tx_err) 2966 netdev_err(vsi->netdev, 2967 "Failed stop Tx rings, VSI %d error %d\n", 2968 vsi->vsi_num, tx_err); 2969 2970 rx_err = ice_vsi_stop_rx_rings(vsi); 2971 if (rx_err) 2972 netdev_err(vsi->netdev, 2973 "Failed stop Rx rings, VSI %d error %d\n", 2974 vsi->vsi_num, rx_err); 2975 2976 ice_napi_disable_all(vsi); 2977 2978 ice_for_each_txq(vsi, i) 2979 ice_clean_tx_ring(vsi->tx_rings[i]); 2980 2981 ice_for_each_rxq(vsi, i) 2982 ice_clean_rx_ring(vsi->rx_rings[i]); 2983 2984 if (tx_err || rx_err) { 2985 netdev_err(vsi->netdev, 2986 "Failed to close VSI 0x%04X on switch 0x%04X\n", 2987 vsi->vsi_num, vsi->vsw->sw_id); 2988 return -EIO; 2989 } 2990 2991 return 0; 2992 } 2993 2994 /** 2995 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 2996 * @vsi: VSI having resources allocated 2997 * 2998 * Return 0 on success, negative on failure 2999 */ 3000 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 3001 { 3002 int i, err = 0; 3003 3004 if (!vsi->num_txq) { 3005 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 3006 vsi->vsi_num); 3007 return -EINVAL; 3008 } 3009 3010 ice_for_each_txq(vsi, i) { 3011 vsi->tx_rings[i]->netdev = vsi->netdev; 3012 err = ice_setup_tx_ring(vsi->tx_rings[i]); 3013 if (err) 3014 break; 3015 } 3016 3017 return err; 3018 } 3019 3020 /** 3021 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 3022 * @vsi: VSI having resources allocated 3023 * 3024 * Return 0 on success, negative on failure 3025 */ 3026 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 3027 { 3028 int i, err = 0; 3029 3030 if (!vsi->num_rxq) { 3031 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 3032 vsi->vsi_num); 3033 return -EINVAL; 3034 } 3035 3036 ice_for_each_rxq(vsi, i) { 3037 vsi->rx_rings[i]->netdev = vsi->netdev; 3038 err = ice_setup_rx_ring(vsi->rx_rings[i]); 3039 if (err) 3040 break; 3041 } 3042 3043 return err; 3044 } 3045 3046 /** 3047 * ice_vsi_req_irq - Request IRQ from the OS 3048 * @vsi: The VSI IRQ is being requested for 3049 * @basename: name for the vector 3050 * 3051 * Return 0 on success and a negative value on error 3052 */ 3053 static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) 3054 { 3055 struct ice_pf *pf = vsi->back; 3056 int err = -EINVAL; 3057 3058 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3059 err = ice_vsi_req_irq_msix(vsi, basename); 3060 3061 return err; 3062 } 3063 3064 /** 3065 * ice_vsi_open - Called when a network interface is made active 3066 * @vsi: the VSI to open 3067 * 3068 * Initialization of the VSI 3069 * 3070 * Returns 0 on success, negative value on error 3071 */ 3072 static int ice_vsi_open(struct ice_vsi *vsi) 3073 { 3074 char int_name[ICE_INT_NAME_STR_LEN]; 3075 struct ice_pf *pf = vsi->back; 3076 int err; 3077 3078 /* allocate descriptors */ 3079 err = ice_vsi_setup_tx_rings(vsi); 3080 if (err) 3081 goto err_setup_tx; 3082 3083 err = ice_vsi_setup_rx_rings(vsi); 3084 if (err) 3085 goto err_setup_rx; 3086 3087 err = ice_vsi_cfg(vsi); 3088 if (err) 3089 goto err_setup_rx; 3090 3091 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 3092 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 3093 err = ice_vsi_req_irq(vsi, int_name); 3094 if (err) 3095 goto err_setup_rx; 3096 3097 /* Notify the stack of the actual queue counts. */ 3098 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 3099 if (err) 3100 goto err_set_qs; 3101 3102 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 3103 if (err) 3104 goto err_set_qs; 3105 3106 err = ice_up_complete(vsi); 3107 if (err) 3108 goto err_up_complete; 3109 3110 return 0; 3111 3112 err_up_complete: 3113 ice_down(vsi); 3114 err_set_qs: 3115 ice_vsi_free_irq(vsi); 3116 err_setup_rx: 3117 ice_vsi_free_rx_rings(vsi); 3118 err_setup_tx: 3119 ice_vsi_free_tx_rings(vsi); 3120 3121 return err; 3122 } 3123 3124 /** 3125 * ice_vsi_release_all - Delete all VSIs 3126 * @pf: PF from which all VSIs are being removed 3127 */ 3128 static void ice_vsi_release_all(struct ice_pf *pf) 3129 { 3130 int err, i; 3131 3132 if (!pf->vsi) 3133 return; 3134 3135 for (i = 0; i < pf->num_alloc_vsi; i++) { 3136 if (!pf->vsi[i]) 3137 continue; 3138 3139 err = ice_vsi_release(pf->vsi[i]); 3140 if (err) 3141 dev_dbg(&pf->pdev->dev, 3142 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 3143 i, err, pf->vsi[i]->vsi_num); 3144 } 3145 } 3146 3147 /** 3148 * ice_dis_vsi - pause a VSI 3149 * @vsi: the VSI being paused 3150 * @locked: is the rtnl_lock already held 3151 */ 3152 static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 3153 { 3154 if (test_bit(__ICE_DOWN, vsi->state)) 3155 return; 3156 3157 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3158 3159 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 3160 if (netif_running(vsi->netdev)) { 3161 if (!locked) { 3162 rtnl_lock(); 3163 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3164 rtnl_unlock(); 3165 } else { 3166 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3167 } 3168 } else { 3169 ice_vsi_close(vsi); 3170 } 3171 } 3172 } 3173 3174 /** 3175 * ice_ena_vsi - resume a VSI 3176 * @vsi: the VSI being resume 3177 */ 3178 static int ice_ena_vsi(struct ice_vsi *vsi) 3179 { 3180 int err = 0; 3181 3182 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && 3183 vsi->netdev) { 3184 if (netif_running(vsi->netdev)) { 3185 rtnl_lock(); 3186 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3187 rtnl_unlock(); 3188 } else { 3189 err = ice_vsi_open(vsi); 3190 } 3191 } 3192 3193 return err; 3194 } 3195 3196 /** 3197 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 3198 * @pf: the PF 3199 */ 3200 static void ice_pf_dis_all_vsi(struct ice_pf *pf) 3201 { 3202 int v; 3203 3204 ice_for_each_vsi(pf, v) 3205 if (pf->vsi[v]) 3206 ice_dis_vsi(pf->vsi[v], false); 3207 } 3208 3209 /** 3210 * ice_pf_ena_all_vsi - Resume all VSIs on a PF 3211 * @pf: the PF 3212 */ 3213 static int ice_pf_ena_all_vsi(struct ice_pf *pf) 3214 { 3215 int v; 3216 3217 ice_for_each_vsi(pf, v) 3218 if (pf->vsi[v]) 3219 if (ice_ena_vsi(pf->vsi[v])) 3220 return -EIO; 3221 3222 return 0; 3223 } 3224 3225 /** 3226 * ice_vsi_rebuild_all - rebuild all VSIs in pf 3227 * @pf: the PF 3228 */ 3229 static int ice_vsi_rebuild_all(struct ice_pf *pf) 3230 { 3231 int i; 3232 3233 /* loop through pf->vsi array and reinit the VSI if found */ 3234 for (i = 0; i < pf->num_alloc_vsi; i++) { 3235 int err; 3236 3237 if (!pf->vsi[i]) 3238 continue; 3239 3240 /* VF VSI rebuild isn't supported yet */ 3241 if (pf->vsi[i]->type == ICE_VSI_VF) 3242 continue; 3243 3244 err = ice_vsi_rebuild(pf->vsi[i]); 3245 if (err) { 3246 dev_err(&pf->pdev->dev, 3247 "VSI at index %d rebuild failed\n", 3248 pf->vsi[i]->idx); 3249 return err; 3250 } 3251 3252 dev_info(&pf->pdev->dev, 3253 "VSI at index %d rebuilt. vsi_num = 0x%x\n", 3254 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3255 } 3256 3257 return 0; 3258 } 3259 3260 /** 3261 * ice_vsi_replay_all - replay all VSIs configuration in the PF 3262 * @pf: the PF 3263 */ 3264 static int ice_vsi_replay_all(struct ice_pf *pf) 3265 { 3266 struct ice_hw *hw = &pf->hw; 3267 enum ice_status ret; 3268 int i; 3269 3270 /* loop through pf->vsi array and replay the VSI if found */ 3271 for (i = 0; i < pf->num_alloc_vsi; i++) { 3272 if (!pf->vsi[i]) 3273 continue; 3274 3275 ret = ice_replay_vsi(hw, pf->vsi[i]->idx); 3276 if (ret) { 3277 dev_err(&pf->pdev->dev, 3278 "VSI at index %d replay failed %d\n", 3279 pf->vsi[i]->idx, ret); 3280 return -EIO; 3281 } 3282 3283 /* Re-map HW VSI number, using VSI handle that has been 3284 * previously validated in ice_replay_vsi() call above 3285 */ 3286 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); 3287 3288 dev_info(&pf->pdev->dev, 3289 "VSI at index %d filter replayed successfully - vsi_num %i\n", 3290 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3291 } 3292 3293 /* Clean up replay filter after successful re-configuration */ 3294 ice_replay_post(hw); 3295 return 0; 3296 } 3297 3298 /** 3299 * ice_rebuild - rebuild after reset 3300 * @pf: pf to rebuild 3301 */ 3302 static void ice_rebuild(struct ice_pf *pf) 3303 { 3304 struct device *dev = &pf->pdev->dev; 3305 struct ice_hw *hw = &pf->hw; 3306 enum ice_status ret; 3307 int err, i; 3308 3309 if (test_bit(__ICE_DOWN, pf->state)) 3310 goto clear_recovery; 3311 3312 dev_dbg(dev, "rebuilding pf\n"); 3313 3314 ret = ice_init_all_ctrlq(hw); 3315 if (ret) { 3316 dev_err(dev, "control queues init failed %d\n", ret); 3317 goto err_init_ctrlq; 3318 } 3319 3320 ret = ice_clear_pf_cfg(hw); 3321 if (ret) { 3322 dev_err(dev, "clear PF configuration failed %d\n", ret); 3323 goto err_init_ctrlq; 3324 } 3325 3326 ice_clear_pxe_mode(hw); 3327 3328 ret = ice_get_caps(hw); 3329 if (ret) { 3330 dev_err(dev, "ice_get_caps failed %d\n", ret); 3331 goto err_init_ctrlq; 3332 } 3333 3334 err = ice_sched_init_port(hw->port_info); 3335 if (err) 3336 goto err_sched_init_port; 3337 3338 /* reset search_hint of irq_trackers to 0 since interrupts are 3339 * reclaimed and could be allocated from beginning during VSI rebuild 3340 */ 3341 pf->sw_irq_tracker->search_hint = 0; 3342 pf->hw_irq_tracker->search_hint = 0; 3343 3344 err = ice_vsi_rebuild_all(pf); 3345 if (err) { 3346 dev_err(dev, "ice_vsi_rebuild_all failed\n"); 3347 goto err_vsi_rebuild; 3348 } 3349 3350 err = ice_update_link_info(hw->port_info); 3351 if (err) 3352 dev_err(&pf->pdev->dev, "Get link status error %d\n", err); 3353 3354 /* Replay all VSIs Configuration, including filters after reset */ 3355 if (ice_vsi_replay_all(pf)) { 3356 dev_err(&pf->pdev->dev, 3357 "error replaying VSI configurations with switch filter rules\n"); 3358 goto err_vsi_rebuild; 3359 } 3360 3361 /* start misc vector */ 3362 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 3363 err = ice_req_irq_msix_misc(pf); 3364 if (err) { 3365 dev_err(dev, "misc vector setup failed: %d\n", err); 3366 goto err_vsi_rebuild; 3367 } 3368 } 3369 3370 /* restart the VSIs that were rebuilt and running before the reset */ 3371 err = ice_pf_ena_all_vsi(pf); 3372 if (err) { 3373 dev_err(&pf->pdev->dev, "error enabling VSIs\n"); 3374 /* no need to disable VSIs in tear down path in ice_rebuild() 3375 * since its already taken care in ice_vsi_open() 3376 */ 3377 goto err_vsi_rebuild; 3378 } 3379 3380 ice_reset_all_vfs(pf, true); 3381 3382 for (i = 0; i < pf->num_alloc_vsi; i++) { 3383 bool link_up; 3384 3385 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) 3386 continue; 3387 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 3388 if (link_up) { 3389 netif_carrier_on(pf->vsi[i]->netdev); 3390 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 3391 } else { 3392 netif_carrier_off(pf->vsi[i]->netdev); 3393 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 3394 } 3395 } 3396 3397 /* if we get here, reset flow is successful */ 3398 clear_bit(__ICE_RESET_FAILED, pf->state); 3399 return; 3400 3401 err_vsi_rebuild: 3402 ice_vsi_release_all(pf); 3403 err_sched_init_port: 3404 ice_sched_cleanup_all(hw); 3405 err_init_ctrlq: 3406 ice_shutdown_all_ctrlq(hw); 3407 set_bit(__ICE_RESET_FAILED, pf->state); 3408 clear_recovery: 3409 /* set this bit in PF state to control service task scheduling */ 3410 set_bit(__ICE_NEEDS_RESTART, pf->state); 3411 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 3412 } 3413 3414 /** 3415 * ice_change_mtu - NDO callback to change the MTU 3416 * @netdev: network interface device structure 3417 * @new_mtu: new value for maximum frame size 3418 * 3419 * Returns 0 on success, negative on failure 3420 */ 3421 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 3422 { 3423 struct ice_netdev_priv *np = netdev_priv(netdev); 3424 struct ice_vsi *vsi = np->vsi; 3425 struct ice_pf *pf = vsi->back; 3426 u8 count = 0; 3427 3428 if (new_mtu == netdev->mtu) { 3429 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); 3430 return 0; 3431 } 3432 3433 if (new_mtu < netdev->min_mtu) { 3434 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", 3435 netdev->min_mtu); 3436 return -EINVAL; 3437 } else if (new_mtu > netdev->max_mtu) { 3438 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", 3439 netdev->min_mtu); 3440 return -EINVAL; 3441 } 3442 /* if a reset is in progress, wait for some time for it to complete */ 3443 do { 3444 if (ice_is_reset_in_progress(pf->state)) { 3445 count++; 3446 usleep_range(1000, 2000); 3447 } else { 3448 break; 3449 } 3450 3451 } while (count < 100); 3452 3453 if (count == 100) { 3454 netdev_err(netdev, "can't change mtu. Device is busy\n"); 3455 return -EBUSY; 3456 } 3457 3458 netdev->mtu = new_mtu; 3459 3460 /* if VSI is up, bring it down and then back up */ 3461 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 3462 int err; 3463 3464 err = ice_down(vsi); 3465 if (err) { 3466 netdev_err(netdev, "change mtu if_up err %d\n", err); 3467 return err; 3468 } 3469 3470 err = ice_up(vsi); 3471 if (err) { 3472 netdev_err(netdev, "change mtu if_up err %d\n", err); 3473 return err; 3474 } 3475 } 3476 3477 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); 3478 return 0; 3479 } 3480 3481 /** 3482 * ice_set_rss - Set RSS keys and lut 3483 * @vsi: Pointer to VSI structure 3484 * @seed: RSS hash seed 3485 * @lut: Lookup table 3486 * @lut_size: Lookup table size 3487 * 3488 * Returns 0 on success, negative on failure 3489 */ 3490 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3491 { 3492 struct ice_pf *pf = vsi->back; 3493 struct ice_hw *hw = &pf->hw; 3494 enum ice_status status; 3495 3496 if (seed) { 3497 struct ice_aqc_get_set_rss_keys *buf = 3498 (struct ice_aqc_get_set_rss_keys *)seed; 3499 3500 status = ice_aq_set_rss_key(hw, vsi->idx, buf); 3501 3502 if (status) { 3503 dev_err(&pf->pdev->dev, 3504 "Cannot set RSS key, err %d aq_err %d\n", 3505 status, hw->adminq.rq_last_status); 3506 return -EIO; 3507 } 3508 } 3509 3510 if (lut) { 3511 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3512 lut, lut_size); 3513 if (status) { 3514 dev_err(&pf->pdev->dev, 3515 "Cannot set RSS lut, err %d aq_err %d\n", 3516 status, hw->adminq.rq_last_status); 3517 return -EIO; 3518 } 3519 } 3520 3521 return 0; 3522 } 3523 3524 /** 3525 * ice_get_rss - Get RSS keys and lut 3526 * @vsi: Pointer to VSI structure 3527 * @seed: Buffer to store the keys 3528 * @lut: Buffer to store the lookup table entries 3529 * @lut_size: Size of buffer to store the lookup table entries 3530 * 3531 * Returns 0 on success, negative on failure 3532 */ 3533 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3534 { 3535 struct ice_pf *pf = vsi->back; 3536 struct ice_hw *hw = &pf->hw; 3537 enum ice_status status; 3538 3539 if (seed) { 3540 struct ice_aqc_get_set_rss_keys *buf = 3541 (struct ice_aqc_get_set_rss_keys *)seed; 3542 3543 status = ice_aq_get_rss_key(hw, vsi->idx, buf); 3544 if (status) { 3545 dev_err(&pf->pdev->dev, 3546 "Cannot get RSS key, err %d aq_err %d\n", 3547 status, hw->adminq.rq_last_status); 3548 return -EIO; 3549 } 3550 } 3551 3552 if (lut) { 3553 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3554 lut, lut_size); 3555 if (status) { 3556 dev_err(&pf->pdev->dev, 3557 "Cannot get RSS lut, err %d aq_err %d\n", 3558 status, hw->adminq.rq_last_status); 3559 return -EIO; 3560 } 3561 } 3562 3563 return 0; 3564 } 3565 3566 /** 3567 * ice_bridge_getlink - Get the hardware bridge mode 3568 * @skb: skb buff 3569 * @pid: process id 3570 * @seq: RTNL message seq 3571 * @dev: the netdev being configured 3572 * @filter_mask: filter mask passed in 3573 * @nlflags: netlink flags passed in 3574 * 3575 * Return the bridge mode (VEB/VEPA) 3576 */ 3577 static int 3578 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 3579 struct net_device *dev, u32 filter_mask, int nlflags) 3580 { 3581 struct ice_netdev_priv *np = netdev_priv(dev); 3582 struct ice_vsi *vsi = np->vsi; 3583 struct ice_pf *pf = vsi->back; 3584 u16 bmode; 3585 3586 bmode = pf->first_sw->bridge_mode; 3587 3588 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 3589 filter_mask, NULL); 3590 } 3591 3592 /** 3593 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 3594 * @vsi: Pointer to VSI structure 3595 * @bmode: Hardware bridge mode (VEB/VEPA) 3596 * 3597 * Returns 0 on success, negative on failure 3598 */ 3599 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 3600 { 3601 struct device *dev = &vsi->back->pdev->dev; 3602 struct ice_aqc_vsi_props *vsi_props; 3603 struct ice_hw *hw = &vsi->back->hw; 3604 struct ice_vsi_ctx ctxt = { 0 }; 3605 enum ice_status status; 3606 3607 vsi_props = &vsi->info; 3608 ctxt.info = vsi->info; 3609 3610 if (bmode == BRIDGE_MODE_VEB) 3611 /* change from VEPA to VEB mode */ 3612 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3613 else 3614 /* change from VEB to VEPA mode */ 3615 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3616 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 3617 3618 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 3619 if (status) { 3620 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", 3621 bmode, status, hw->adminq.sq_last_status); 3622 return -EIO; 3623 } 3624 /* Update sw flags for book keeping */ 3625 vsi_props->sw_flags = ctxt.info.sw_flags; 3626 3627 return 0; 3628 } 3629 3630 /** 3631 * ice_bridge_setlink - Set the hardware bridge mode 3632 * @dev: the netdev being configured 3633 * @nlh: RTNL message 3634 * @flags: bridge setlink flags 3635 * @extack: netlink extended ack 3636 * 3637 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 3638 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 3639 * not already set for all VSIs connected to this switch. And also update the 3640 * unicast switch filter rules for the corresponding switch of the netdev. 3641 */ 3642 static int 3643 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 3644 u16 __always_unused flags, struct netlink_ext_ack *extack) 3645 { 3646 struct ice_netdev_priv *np = netdev_priv(dev); 3647 struct ice_pf *pf = np->vsi->back; 3648 struct nlattr *attr, *br_spec; 3649 struct ice_hw *hw = &pf->hw; 3650 enum ice_status status; 3651 struct ice_sw *pf_sw; 3652 int rem, v, err = 0; 3653 3654 pf_sw = pf->first_sw; 3655 /* find the attribute in the netlink message */ 3656 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 3657 3658 nla_for_each_nested(attr, br_spec, rem) { 3659 __u16 mode; 3660 3661 if (nla_type(attr) != IFLA_BRIDGE_MODE) 3662 continue; 3663 mode = nla_get_u16(attr); 3664 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 3665 return -EINVAL; 3666 /* Continue if bridge mode is not being flipped */ 3667 if (mode == pf_sw->bridge_mode) 3668 continue; 3669 /* Iterates through the PF VSI list and update the loopback 3670 * mode of the VSI 3671 */ 3672 ice_for_each_vsi(pf, v) { 3673 if (!pf->vsi[v]) 3674 continue; 3675 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 3676 if (err) 3677 return err; 3678 } 3679 3680 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 3681 /* Update the unicast switch filter rules for the corresponding 3682 * switch of the netdev 3683 */ 3684 status = ice_update_sw_rule_bridge_mode(hw); 3685 if (status) { 3686 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", 3687 mode, status, hw->adminq.sq_last_status); 3688 /* revert hw->evb_veb */ 3689 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 3690 return -EIO; 3691 } 3692 3693 pf_sw->bridge_mode = mode; 3694 } 3695 3696 return 0; 3697 } 3698 3699 /** 3700 * ice_tx_timeout - Respond to a Tx Hang 3701 * @netdev: network interface device structure 3702 */ 3703 static void ice_tx_timeout(struct net_device *netdev) 3704 { 3705 struct ice_netdev_priv *np = netdev_priv(netdev); 3706 struct ice_ring *tx_ring = NULL; 3707 struct ice_vsi *vsi = np->vsi; 3708 struct ice_pf *pf = vsi->back; 3709 int hung_queue = -1; 3710 u32 i; 3711 3712 pf->tx_timeout_count++; 3713 3714 /* find the stopped queue the same way dev_watchdog() does */ 3715 for (i = 0; i < netdev->num_tx_queues; i++) { 3716 unsigned long trans_start; 3717 struct netdev_queue *q; 3718 3719 q = netdev_get_tx_queue(netdev, i); 3720 trans_start = q->trans_start; 3721 if (netif_xmit_stopped(q) && 3722 time_after(jiffies, 3723 trans_start + netdev->watchdog_timeo)) { 3724 hung_queue = i; 3725 break; 3726 } 3727 } 3728 3729 if (i == netdev->num_tx_queues) 3730 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 3731 else 3732 /* now that we have an index, find the tx_ring struct */ 3733 for (i = 0; i < vsi->num_txq; i++) 3734 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 3735 if (hung_queue == vsi->tx_rings[i]->q_index) { 3736 tx_ring = vsi->tx_rings[i]; 3737 break; 3738 } 3739 3740 /* Reset recovery level if enough time has elapsed after last timeout. 3741 * Also ensure no new reset action happens before next timeout period. 3742 */ 3743 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 3744 pf->tx_timeout_recovery_level = 1; 3745 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 3746 netdev->watchdog_timeo))) 3747 return; 3748 3749 if (tx_ring) { 3750 struct ice_hw *hw = &pf->hw; 3751 u32 head, val = 0; 3752 3753 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & 3754 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 3755 /* Read interrupt register */ 3756 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3757 val = rd32(hw, 3758 GLINT_DYN_CTL(tx_ring->q_vector->v_idx + 3759 tx_ring->vsi->hw_base_vector)); 3760 3761 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 3762 vsi->vsi_num, hung_queue, tx_ring->next_to_clean, 3763 head, tx_ring->next_to_use, val); 3764 } 3765 3766 pf->tx_timeout_last_recovery = jiffies; 3767 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 3768 pf->tx_timeout_recovery_level, hung_queue); 3769 3770 switch (pf->tx_timeout_recovery_level) { 3771 case 1: 3772 set_bit(__ICE_PFR_REQ, pf->state); 3773 break; 3774 case 2: 3775 set_bit(__ICE_CORER_REQ, pf->state); 3776 break; 3777 case 3: 3778 set_bit(__ICE_GLOBR_REQ, pf->state); 3779 break; 3780 default: 3781 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 3782 set_bit(__ICE_DOWN, pf->state); 3783 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3784 set_bit(__ICE_SERVICE_DIS, pf->state); 3785 break; 3786 } 3787 3788 ice_service_task_schedule(pf); 3789 pf->tx_timeout_recovery_level++; 3790 } 3791 3792 /** 3793 * ice_open - Called when a network interface becomes active 3794 * @netdev: network interface device structure 3795 * 3796 * The open entry point is called when a network interface is made 3797 * active by the system (IFF_UP). At this point all resources needed 3798 * for transmit and receive operations are allocated, the interrupt 3799 * handler is registered with the OS, the netdev watchdog is enabled, 3800 * and the stack is notified that the interface is ready. 3801 * 3802 * Returns 0 on success, negative value on failure 3803 */ 3804 static int ice_open(struct net_device *netdev) 3805 { 3806 struct ice_netdev_priv *np = netdev_priv(netdev); 3807 struct ice_vsi *vsi = np->vsi; 3808 int err; 3809 3810 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { 3811 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 3812 return -EIO; 3813 } 3814 3815 netif_carrier_off(netdev); 3816 3817 err = ice_vsi_open(vsi); 3818 3819 if (err) 3820 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 3821 vsi->vsi_num, vsi->vsw->sw_id); 3822 return err; 3823 } 3824 3825 /** 3826 * ice_stop - Disables a network interface 3827 * @netdev: network interface device structure 3828 * 3829 * The stop entry point is called when an interface is de-activated by the OS, 3830 * and the netdevice enters the DOWN state. The hardware is still under the 3831 * driver's control, but the netdev interface is disabled. 3832 * 3833 * Returns success only - not allowed to fail 3834 */ 3835 static int ice_stop(struct net_device *netdev) 3836 { 3837 struct ice_netdev_priv *np = netdev_priv(netdev); 3838 struct ice_vsi *vsi = np->vsi; 3839 3840 ice_vsi_close(vsi); 3841 3842 return 0; 3843 } 3844 3845 /** 3846 * ice_features_check - Validate encapsulated packet conforms to limits 3847 * @skb: skb buffer 3848 * @netdev: This port's netdev 3849 * @features: Offload features that the stack believes apply 3850 */ 3851 static netdev_features_t 3852 ice_features_check(struct sk_buff *skb, 3853 struct net_device __always_unused *netdev, 3854 netdev_features_t features) 3855 { 3856 size_t len; 3857 3858 /* No point in doing any of this if neither checksum nor GSO are 3859 * being requested for this frame. We can rule out both by just 3860 * checking for CHECKSUM_PARTIAL 3861 */ 3862 if (skb->ip_summed != CHECKSUM_PARTIAL) 3863 return features; 3864 3865 /* We cannot support GSO if the MSS is going to be less than 3866 * 64 bytes. If it is then we need to drop support for GSO. 3867 */ 3868 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3869 features &= ~NETIF_F_GSO_MASK; 3870 3871 len = skb_network_header(skb) - skb->data; 3872 if (len & ~(ICE_TXD_MACLEN_MAX)) 3873 goto out_rm_features; 3874 3875 len = skb_transport_header(skb) - skb_network_header(skb); 3876 if (len & ~(ICE_TXD_IPLEN_MAX)) 3877 goto out_rm_features; 3878 3879 if (skb->encapsulation) { 3880 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3881 if (len & ~(ICE_TXD_L4LEN_MAX)) 3882 goto out_rm_features; 3883 3884 len = skb_inner_transport_header(skb) - 3885 skb_inner_network_header(skb); 3886 if (len & ~(ICE_TXD_IPLEN_MAX)) 3887 goto out_rm_features; 3888 } 3889 3890 return features; 3891 out_rm_features: 3892 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3893 } 3894 3895 static const struct net_device_ops ice_netdev_ops = { 3896 .ndo_open = ice_open, 3897 .ndo_stop = ice_stop, 3898 .ndo_start_xmit = ice_start_xmit, 3899 .ndo_features_check = ice_features_check, 3900 .ndo_set_rx_mode = ice_set_rx_mode, 3901 .ndo_set_mac_address = ice_set_mac_address, 3902 .ndo_validate_addr = eth_validate_addr, 3903 .ndo_change_mtu = ice_change_mtu, 3904 .ndo_get_stats64 = ice_get_stats64, 3905 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 3906 .ndo_set_vf_mac = ice_set_vf_mac, 3907 .ndo_get_vf_config = ice_get_vf_cfg, 3908 .ndo_set_vf_trust = ice_set_vf_trust, 3909 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 3910 .ndo_set_vf_link_state = ice_set_vf_link_state, 3911 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 3912 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 3913 .ndo_set_features = ice_set_features, 3914 .ndo_bridge_getlink = ice_bridge_getlink, 3915 .ndo_bridge_setlink = ice_bridge_setlink, 3916 .ndo_fdb_add = ice_fdb_add, 3917 .ndo_fdb_del = ice_fdb_del, 3918 .ndo_tx_timeout = ice_tx_timeout, 3919 }; 3920