1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include "ice.h" 9 #include "ice_lib.h" 10 11 #define DRV_VERSION "0.7.2-k" 12 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 13 const char ice_drv_ver[] = DRV_VERSION; 14 static const char ice_driver_string[] = DRV_SUMMARY; 15 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 16 17 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 18 MODULE_DESCRIPTION(DRV_SUMMARY); 19 MODULE_LICENSE("GPL v2"); 20 MODULE_VERSION(DRV_VERSION); 21 22 static int debug = -1; 23 module_param(debug, int, 0644); 24 #ifndef CONFIG_DYNAMIC_DEBUG 25 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 26 #else 27 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 28 #endif /* !CONFIG_DYNAMIC_DEBUG */ 29 30 static struct workqueue_struct *ice_wq; 31 static const struct net_device_ops ice_netdev_ops; 32 33 static void ice_pf_dis_all_vsi(struct ice_pf *pf); 34 static void ice_rebuild(struct ice_pf *pf); 35 36 static void ice_vsi_release_all(struct ice_pf *pf); 37 static void ice_update_vsi_stats(struct ice_vsi *vsi); 38 static void ice_update_pf_stats(struct ice_pf *pf); 39 40 /** 41 * ice_get_tx_pending - returns number of Tx descriptors not processed 42 * @ring: the ring of descriptors 43 */ 44 static u32 ice_get_tx_pending(struct ice_ring *ring) 45 { 46 u32 head, tail; 47 48 head = ring->next_to_clean; 49 tail = readl(ring->tail); 50 51 if (head != tail) 52 return (head < tail) ? 53 tail - head : (tail + ring->count - head); 54 return 0; 55 } 56 57 /** 58 * ice_check_for_hang_subtask - check for and recover hung queues 59 * @pf: pointer to PF struct 60 */ 61 static void ice_check_for_hang_subtask(struct ice_pf *pf) 62 { 63 struct ice_vsi *vsi = NULL; 64 unsigned int i; 65 u32 v, v_idx; 66 int packets; 67 68 ice_for_each_vsi(pf, v) 69 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 70 vsi = pf->vsi[v]; 71 break; 72 } 73 74 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 75 return; 76 77 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 78 return; 79 80 for (i = 0; i < vsi->num_txq; i++) { 81 struct ice_ring *tx_ring = vsi->tx_rings[i]; 82 83 if (tx_ring && tx_ring->desc) { 84 int itr = ICE_ITR_NONE; 85 86 /* If packet counter has not changed the queue is 87 * likely stalled, so force an interrupt for this 88 * queue. 89 * 90 * prev_pkt would be negative if there was no 91 * pending work. 92 */ 93 packets = tx_ring->stats.pkts & INT_MAX; 94 if (tx_ring->tx_stats.prev_pkt == packets) { 95 /* Trigger sw interrupt to revive the queue */ 96 v_idx = tx_ring->q_vector->v_idx; 97 wr32(&vsi->back->hw, 98 GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 99 (itr << GLINT_DYN_CTL_ITR_INDX_S) | 100 GLINT_DYN_CTL_SWINT_TRIG_M | 101 GLINT_DYN_CTL_INTENA_MSK_M); 102 continue; 103 } 104 105 /* Memory barrier between read of packet count and call 106 * to ice_get_tx_pending() 107 */ 108 smp_rmb(); 109 tx_ring->tx_stats.prev_pkt = 110 ice_get_tx_pending(tx_ring) ? packets : -1; 111 } 112 } 113 } 114 115 /** 116 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced 117 * @netdev: the net device on which the sync is happening 118 * @addr: mac address to sync 119 * 120 * This is a callback function which is called by the in kernel device sync 121 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 122 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 123 * mac filters from the hardware. 124 */ 125 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 126 { 127 struct ice_netdev_priv *np = netdev_priv(netdev); 128 struct ice_vsi *vsi = np->vsi; 129 130 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) 131 return -EINVAL; 132 133 return 0; 134 } 135 136 /** 137 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced 138 * @netdev: the net device on which the unsync is happening 139 * @addr: mac address to unsync 140 * 141 * This is a callback function which is called by the in kernel device unsync 142 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 143 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 144 * delete the mac filters from the hardware. 145 */ 146 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 147 { 148 struct ice_netdev_priv *np = netdev_priv(netdev); 149 struct ice_vsi *vsi = np->vsi; 150 151 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) 152 return -EINVAL; 153 154 return 0; 155 } 156 157 /** 158 * ice_vsi_fltr_changed - check if filter state changed 159 * @vsi: VSI to be checked 160 * 161 * returns true if filter state has changed, false otherwise. 162 */ 163 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 164 { 165 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || 166 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || 167 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 168 } 169 170 /** 171 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 172 * @vsi: ptr to the VSI 173 * 174 * Push any outstanding VSI filter changes through the AdminQ. 175 */ 176 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 177 { 178 struct device *dev = &vsi->back->pdev->dev; 179 struct net_device *netdev = vsi->netdev; 180 bool promisc_forced_on = false; 181 struct ice_pf *pf = vsi->back; 182 struct ice_hw *hw = &pf->hw; 183 enum ice_status status = 0; 184 u32 changed_flags = 0; 185 int err = 0; 186 187 if (!vsi->netdev) 188 return -EINVAL; 189 190 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) 191 usleep_range(1000, 2000); 192 193 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 194 vsi->current_netdev_flags = vsi->netdev->flags; 195 196 INIT_LIST_HEAD(&vsi->tmp_sync_list); 197 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 198 199 if (ice_vsi_fltr_changed(vsi)) { 200 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 201 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 202 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 203 204 /* grab the netdev's addr_list_lock */ 205 netif_addr_lock_bh(netdev); 206 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 207 ice_add_mac_to_unsync_list); 208 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 209 ice_add_mac_to_unsync_list); 210 /* our temp lists are populated. release lock */ 211 netif_addr_unlock_bh(netdev); 212 } 213 214 /* Remove mac addresses in the unsync list */ 215 status = ice_remove_mac(hw, &vsi->tmp_unsync_list); 216 ice_free_fltr_list(dev, &vsi->tmp_unsync_list); 217 if (status) { 218 netdev_err(netdev, "Failed to delete MAC filters\n"); 219 /* if we failed because of alloc failures, just bail */ 220 if (status == ICE_ERR_NO_MEMORY) { 221 err = -ENOMEM; 222 goto out; 223 } 224 } 225 226 /* Add mac addresses in the sync list */ 227 status = ice_add_mac(hw, &vsi->tmp_sync_list); 228 ice_free_fltr_list(dev, &vsi->tmp_sync_list); 229 if (status) { 230 netdev_err(netdev, "Failed to add MAC filters\n"); 231 /* If there is no more space for new umac filters, vsi 232 * should go into promiscuous mode. There should be some 233 * space reserved for promiscuous filters. 234 */ 235 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 236 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, 237 vsi->state)) { 238 promisc_forced_on = true; 239 netdev_warn(netdev, 240 "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 241 vsi->vsi_num); 242 } else { 243 err = -EIO; 244 goto out; 245 } 246 } 247 /* check for changes in promiscuous modes */ 248 if (changed_flags & IFF_ALLMULTI) 249 netdev_warn(netdev, "Unsupported configuration\n"); 250 251 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 252 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { 253 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 254 if (vsi->current_netdev_flags & IFF_PROMISC) { 255 /* Apply TX filter rule to get traffic from VMs */ 256 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 257 ICE_FLTR_TX); 258 if (status) { 259 netdev_err(netdev, "Error setting default VSI %i tx rule\n", 260 vsi->vsi_num); 261 vsi->current_netdev_flags &= ~IFF_PROMISC; 262 err = -EIO; 263 goto out_promisc; 264 } 265 /* Apply RX filter rule to get traffic from wire */ 266 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 267 ICE_FLTR_RX); 268 if (status) { 269 netdev_err(netdev, "Error setting default VSI %i rx rule\n", 270 vsi->vsi_num); 271 vsi->current_netdev_flags &= ~IFF_PROMISC; 272 err = -EIO; 273 goto out_promisc; 274 } 275 } else { 276 /* Clear TX filter rule to stop traffic from VMs */ 277 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 278 ICE_FLTR_TX); 279 if (status) { 280 netdev_err(netdev, "Error clearing default VSI %i tx rule\n", 281 vsi->vsi_num); 282 vsi->current_netdev_flags |= IFF_PROMISC; 283 err = -EIO; 284 goto out_promisc; 285 } 286 /* Clear RX filter to remove traffic from wire */ 287 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 288 ICE_FLTR_RX); 289 if (status) { 290 netdev_err(netdev, "Error clearing default VSI %i rx rule\n", 291 vsi->vsi_num); 292 vsi->current_netdev_flags |= IFF_PROMISC; 293 err = -EIO; 294 goto out_promisc; 295 } 296 } 297 } 298 goto exit; 299 300 out_promisc: 301 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 302 goto exit; 303 out: 304 /* if something went wrong then set the changed flag so we try again */ 305 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 306 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 307 exit: 308 clear_bit(__ICE_CFG_BUSY, vsi->state); 309 return err; 310 } 311 312 /** 313 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 314 * @pf: board private structure 315 */ 316 static void ice_sync_fltr_subtask(struct ice_pf *pf) 317 { 318 int v; 319 320 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 321 return; 322 323 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 324 325 for (v = 0; v < pf->num_alloc_vsi; v++) 326 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 327 ice_vsi_sync_fltr(pf->vsi[v])) { 328 /* come back and try again later */ 329 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 330 break; 331 } 332 } 333 334 /** 335 * ice_prepare_for_reset - prep for the core to reset 336 * @pf: board private structure 337 * 338 * Inform or close all dependent features in prep for reset. 339 */ 340 static void 341 ice_prepare_for_reset(struct ice_pf *pf) 342 { 343 struct ice_hw *hw = &pf->hw; 344 345 /* Notify VFs of impending reset */ 346 if (ice_check_sq_alive(hw, &hw->mailboxq)) 347 ice_vc_notify_reset(pf); 348 349 /* disable the VSIs and their queues that are not already DOWN */ 350 ice_pf_dis_all_vsi(pf); 351 352 if (hw->port_info) 353 ice_sched_clear_port(hw->port_info); 354 355 ice_shutdown_all_ctrlq(hw); 356 357 set_bit(__ICE_PREPARED_FOR_RESET, pf->state); 358 } 359 360 /** 361 * ice_do_reset - Initiate one of many types of resets 362 * @pf: board private structure 363 * @reset_type: reset type requested 364 * before this function was called. 365 */ 366 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 367 { 368 struct device *dev = &pf->pdev->dev; 369 struct ice_hw *hw = &pf->hw; 370 371 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 372 WARN_ON(in_interrupt()); 373 374 ice_prepare_for_reset(pf); 375 376 /* trigger the reset */ 377 if (ice_reset(hw, reset_type)) { 378 dev_err(dev, "reset %d failed\n", reset_type); 379 set_bit(__ICE_RESET_FAILED, pf->state); 380 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 381 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 382 clear_bit(__ICE_PFR_REQ, pf->state); 383 clear_bit(__ICE_CORER_REQ, pf->state); 384 clear_bit(__ICE_GLOBR_REQ, pf->state); 385 return; 386 } 387 388 /* PFR is a bit of a special case because it doesn't result in an OICR 389 * interrupt. So for PFR, rebuild after the reset and clear the reset- 390 * associated state bits. 391 */ 392 if (reset_type == ICE_RESET_PFR) { 393 pf->pfr_count++; 394 ice_rebuild(pf); 395 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 396 clear_bit(__ICE_PFR_REQ, pf->state); 397 } 398 } 399 400 /** 401 * ice_reset_subtask - Set up for resetting the device and driver 402 * @pf: board private structure 403 */ 404 static void ice_reset_subtask(struct ice_pf *pf) 405 { 406 enum ice_reset_req reset_type = ICE_RESET_INVAL; 407 408 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 409 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 410 * of reset is pending and sets bits in pf->state indicating the reset 411 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set 412 * prepare for pending reset if not already (for PF software-initiated 413 * global resets the software should already be prepared for it as 414 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated 415 * by firmware or software on other PFs, that bit is not set so prepare 416 * for the reset now), poll for reset done, rebuild and return. 417 */ 418 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { 419 clear_bit(__ICE_GLOBR_RECV, pf->state); 420 clear_bit(__ICE_CORER_RECV, pf->state); 421 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) 422 ice_prepare_for_reset(pf); 423 424 /* make sure we are ready to rebuild */ 425 if (ice_check_reset(&pf->hw)) { 426 set_bit(__ICE_RESET_FAILED, pf->state); 427 } else { 428 /* done with reset. start rebuild */ 429 pf->hw.reset_ongoing = false; 430 ice_rebuild(pf); 431 /* clear bit to resume normal operations, but 432 * ICE_NEEDS_RESTART bit is set incase rebuild failed 433 */ 434 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 435 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 436 clear_bit(__ICE_PFR_REQ, pf->state); 437 clear_bit(__ICE_CORER_REQ, pf->state); 438 clear_bit(__ICE_GLOBR_REQ, pf->state); 439 } 440 441 return; 442 } 443 444 /* No pending resets to finish processing. Check for new resets */ 445 if (test_bit(__ICE_PFR_REQ, pf->state)) 446 reset_type = ICE_RESET_PFR; 447 if (test_bit(__ICE_CORER_REQ, pf->state)) 448 reset_type = ICE_RESET_CORER; 449 if (test_bit(__ICE_GLOBR_REQ, pf->state)) 450 reset_type = ICE_RESET_GLOBR; 451 /* If no valid reset type requested just return */ 452 if (reset_type == ICE_RESET_INVAL) 453 return; 454 455 /* reset if not already down or busy */ 456 if (!test_bit(__ICE_DOWN, pf->state) && 457 !test_bit(__ICE_CFG_BUSY, pf->state)) { 458 ice_do_reset(pf, reset_type); 459 } 460 } 461 462 /** 463 * ice_print_link_msg - print link up or down message 464 * @vsi: the VSI whose link status is being queried 465 * @isup: boolean for if the link is now up or down 466 */ 467 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 468 { 469 const char *speed; 470 const char *fc; 471 472 if (vsi->current_isup == isup) 473 return; 474 475 vsi->current_isup = isup; 476 477 if (!isup) { 478 netdev_info(vsi->netdev, "NIC Link is Down\n"); 479 return; 480 } 481 482 switch (vsi->port_info->phy.link_info.link_speed) { 483 case ICE_AQ_LINK_SPEED_40GB: 484 speed = "40 G"; 485 break; 486 case ICE_AQ_LINK_SPEED_25GB: 487 speed = "25 G"; 488 break; 489 case ICE_AQ_LINK_SPEED_20GB: 490 speed = "20 G"; 491 break; 492 case ICE_AQ_LINK_SPEED_10GB: 493 speed = "10 G"; 494 break; 495 case ICE_AQ_LINK_SPEED_5GB: 496 speed = "5 G"; 497 break; 498 case ICE_AQ_LINK_SPEED_2500MB: 499 speed = "2.5 G"; 500 break; 501 case ICE_AQ_LINK_SPEED_1000MB: 502 speed = "1 G"; 503 break; 504 case ICE_AQ_LINK_SPEED_100MB: 505 speed = "100 M"; 506 break; 507 default: 508 speed = "Unknown"; 509 break; 510 } 511 512 switch (vsi->port_info->fc.current_mode) { 513 case ICE_FC_FULL: 514 fc = "RX/TX"; 515 break; 516 case ICE_FC_TX_PAUSE: 517 fc = "TX"; 518 break; 519 case ICE_FC_RX_PAUSE: 520 fc = "RX"; 521 break; 522 default: 523 fc = "Unknown"; 524 break; 525 } 526 527 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", 528 speed, fc); 529 } 530 531 /** 532 * ice_vsi_link_event - update the vsi's netdev 533 * @vsi: the vsi on which the link event occurred 534 * @link_up: whether or not the vsi needs to be set up or down 535 */ 536 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 537 { 538 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 539 return; 540 541 if (vsi->type == ICE_VSI_PF) { 542 if (!vsi->netdev) { 543 dev_dbg(&vsi->back->pdev->dev, 544 "vsi->netdev is not initialized!\n"); 545 return; 546 } 547 if (link_up) { 548 netif_carrier_on(vsi->netdev); 549 netif_tx_wake_all_queues(vsi->netdev); 550 } else { 551 netif_carrier_off(vsi->netdev); 552 netif_tx_stop_all_queues(vsi->netdev); 553 } 554 } 555 } 556 557 /** 558 * ice_link_event - process the link event 559 * @pf: pf that the link event is associated with 560 * @pi: port_info for the port that the link event is associated with 561 * 562 * Returns -EIO if ice_get_link_status() fails 563 * Returns 0 on success 564 */ 565 static int 566 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) 567 { 568 u8 new_link_speed, old_link_speed; 569 struct ice_phy_info *phy_info; 570 bool new_link_same_as_old; 571 bool new_link, old_link; 572 u8 lport; 573 u16 v; 574 575 phy_info = &pi->phy; 576 phy_info->link_info_old = phy_info->link_info; 577 /* Force ice_get_link_status() to update link info */ 578 phy_info->get_link_info = true; 579 580 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 581 old_link_speed = phy_info->link_info_old.link_speed; 582 583 lport = pi->lport; 584 if (ice_get_link_status(pi, &new_link)) { 585 dev_dbg(&pf->pdev->dev, 586 "Could not get link status for port %d\n", lport); 587 return -EIO; 588 } 589 590 new_link_speed = phy_info->link_info.link_speed; 591 592 new_link_same_as_old = (new_link == old_link && 593 new_link_speed == old_link_speed); 594 595 ice_for_each_vsi(pf, v) { 596 struct ice_vsi *vsi = pf->vsi[v]; 597 598 if (!vsi || !vsi->port_info) 599 continue; 600 601 if (new_link_same_as_old && 602 (test_bit(__ICE_DOWN, vsi->state) || 603 new_link == netif_carrier_ok(vsi->netdev))) 604 continue; 605 606 if (vsi->port_info->lport == lport) { 607 ice_print_link_msg(vsi, new_link); 608 ice_vsi_link_event(vsi, new_link); 609 } 610 } 611 612 ice_vc_notify_link_state(pf); 613 614 return 0; 615 } 616 617 /** 618 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 619 * @pf: board private structure 620 */ 621 static void ice_watchdog_subtask(struct ice_pf *pf) 622 { 623 int i; 624 625 /* if interface is down do nothing */ 626 if (test_bit(__ICE_DOWN, pf->state) || 627 test_bit(__ICE_CFG_BUSY, pf->state)) 628 return; 629 630 /* make sure we don't do these things too often */ 631 if (time_before(jiffies, 632 pf->serv_tmr_prev + pf->serv_tmr_period)) 633 return; 634 635 pf->serv_tmr_prev = jiffies; 636 637 if (ice_link_event(pf, pf->hw.port_info)) 638 dev_dbg(&pf->pdev->dev, "ice_link_event failed\n"); 639 640 /* Update the stats for active netdevs so the network stack 641 * can look at updated numbers whenever it cares to 642 */ 643 ice_update_pf_stats(pf); 644 for (i = 0; i < pf->num_alloc_vsi; i++) 645 if (pf->vsi[i] && pf->vsi[i]->netdev) 646 ice_update_vsi_stats(pf->vsi[i]); 647 } 648 649 /** 650 * __ice_clean_ctrlq - helper function to clean controlq rings 651 * @pf: ptr to struct ice_pf 652 * @q_type: specific Control queue type 653 */ 654 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 655 { 656 struct ice_rq_event_info event; 657 struct ice_hw *hw = &pf->hw; 658 struct ice_ctl_q_info *cq; 659 u16 pending, i = 0; 660 const char *qtype; 661 u32 oldval, val; 662 663 /* Do not clean control queue if/when PF reset fails */ 664 if (test_bit(__ICE_RESET_FAILED, pf->state)) 665 return 0; 666 667 switch (q_type) { 668 case ICE_CTL_Q_ADMIN: 669 cq = &hw->adminq; 670 qtype = "Admin"; 671 break; 672 case ICE_CTL_Q_MAILBOX: 673 cq = &hw->mailboxq; 674 qtype = "Mailbox"; 675 break; 676 default: 677 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", 678 q_type); 679 return 0; 680 } 681 682 /* check for error indications - PF_xx_AxQLEN register layout for 683 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 684 */ 685 val = rd32(hw, cq->rq.len); 686 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 687 PF_FW_ARQLEN_ARQCRIT_M)) { 688 oldval = val; 689 if (val & PF_FW_ARQLEN_ARQVFE_M) 690 dev_dbg(&pf->pdev->dev, 691 "%s Receive Queue VF Error detected\n", qtype); 692 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 693 dev_dbg(&pf->pdev->dev, 694 "%s Receive Queue Overflow Error detected\n", 695 qtype); 696 } 697 if (val & PF_FW_ARQLEN_ARQCRIT_M) 698 dev_dbg(&pf->pdev->dev, 699 "%s Receive Queue Critical Error detected\n", 700 qtype); 701 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 702 PF_FW_ARQLEN_ARQCRIT_M); 703 if (oldval != val) 704 wr32(hw, cq->rq.len, val); 705 } 706 707 val = rd32(hw, cq->sq.len); 708 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 709 PF_FW_ATQLEN_ATQCRIT_M)) { 710 oldval = val; 711 if (val & PF_FW_ATQLEN_ATQVFE_M) 712 dev_dbg(&pf->pdev->dev, 713 "%s Send Queue VF Error detected\n", qtype); 714 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 715 dev_dbg(&pf->pdev->dev, 716 "%s Send Queue Overflow Error detected\n", 717 qtype); 718 } 719 if (val & PF_FW_ATQLEN_ATQCRIT_M) 720 dev_dbg(&pf->pdev->dev, 721 "%s Send Queue Critical Error detected\n", 722 qtype); 723 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 724 PF_FW_ATQLEN_ATQCRIT_M); 725 if (oldval != val) 726 wr32(hw, cq->sq.len, val); 727 } 728 729 event.buf_len = cq->rq_buf_size; 730 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, 731 GFP_KERNEL); 732 if (!event.msg_buf) 733 return 0; 734 735 do { 736 enum ice_status ret; 737 u16 opcode; 738 739 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 740 if (ret == ICE_ERR_AQ_NO_WORK) 741 break; 742 if (ret) { 743 dev_err(&pf->pdev->dev, 744 "%s Receive Queue event error %d\n", qtype, 745 ret); 746 break; 747 } 748 749 opcode = le16_to_cpu(event.desc.opcode); 750 751 switch (opcode) { 752 case ice_mbx_opc_send_msg_to_pf: 753 ice_vc_process_vf_msg(pf, &event); 754 break; 755 case ice_aqc_opc_fw_logging: 756 ice_output_fw_log(hw, &event.desc, event.msg_buf); 757 break; 758 default: 759 dev_dbg(&pf->pdev->dev, 760 "%s Receive Queue unknown event 0x%04x ignored\n", 761 qtype, opcode); 762 break; 763 } 764 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 765 766 devm_kfree(&pf->pdev->dev, event.msg_buf); 767 768 return pending && (i == ICE_DFLT_IRQ_WORK); 769 } 770 771 /** 772 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 773 * @hw: pointer to hardware info 774 * @cq: control queue information 775 * 776 * returns true if there are pending messages in a queue, false if there aren't 777 */ 778 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 779 { 780 u16 ntu; 781 782 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 783 return cq->rq.next_to_clean != ntu; 784 } 785 786 /** 787 * ice_clean_adminq_subtask - clean the AdminQ rings 788 * @pf: board private structure 789 */ 790 static void ice_clean_adminq_subtask(struct ice_pf *pf) 791 { 792 struct ice_hw *hw = &pf->hw; 793 794 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 795 return; 796 797 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 798 return; 799 800 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 801 802 /* There might be a situation where new messages arrive to a control 803 * queue between processing the last message and clearing the 804 * EVENT_PENDING bit. So before exiting, check queue head again (using 805 * ice_ctrlq_pending) and process new messages if any. 806 */ 807 if (ice_ctrlq_pending(hw, &hw->adminq)) 808 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 809 810 ice_flush(hw); 811 } 812 813 /** 814 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 815 * @pf: board private structure 816 */ 817 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 818 { 819 struct ice_hw *hw = &pf->hw; 820 821 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 822 return; 823 824 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 825 return; 826 827 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 828 829 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 830 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 831 832 ice_flush(hw); 833 } 834 835 /** 836 * ice_service_task_schedule - schedule the service task to wake up 837 * @pf: board private structure 838 * 839 * If not already scheduled, this puts the task into the work queue. 840 */ 841 static void ice_service_task_schedule(struct ice_pf *pf) 842 { 843 if (!test_bit(__ICE_SERVICE_DIS, pf->state) && 844 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && 845 !test_bit(__ICE_NEEDS_RESTART, pf->state)) 846 queue_work(ice_wq, &pf->serv_task); 847 } 848 849 /** 850 * ice_service_task_complete - finish up the service task 851 * @pf: board private structure 852 */ 853 static void ice_service_task_complete(struct ice_pf *pf) 854 { 855 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); 856 857 /* force memory (pf->state) to sync before next service task */ 858 smp_mb__before_atomic(); 859 clear_bit(__ICE_SERVICE_SCHED, pf->state); 860 } 861 862 /** 863 * ice_service_task_stop - stop service task and cancel works 864 * @pf: board private structure 865 */ 866 static void ice_service_task_stop(struct ice_pf *pf) 867 { 868 set_bit(__ICE_SERVICE_DIS, pf->state); 869 870 if (pf->serv_tmr.function) 871 del_timer_sync(&pf->serv_tmr); 872 if (pf->serv_task.func) 873 cancel_work_sync(&pf->serv_task); 874 875 clear_bit(__ICE_SERVICE_SCHED, pf->state); 876 } 877 878 /** 879 * ice_service_timer - timer callback to schedule service task 880 * @t: pointer to timer_list 881 */ 882 static void ice_service_timer(struct timer_list *t) 883 { 884 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 885 886 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 887 ice_service_task_schedule(pf); 888 } 889 890 /** 891 * ice_handle_mdd_event - handle malicious driver detect event 892 * @pf: pointer to the PF structure 893 * 894 * Called from service task. OICR interrupt handler indicates MDD event 895 */ 896 static void ice_handle_mdd_event(struct ice_pf *pf) 897 { 898 struct ice_hw *hw = &pf->hw; 899 bool mdd_detected = false; 900 u32 reg; 901 int i; 902 903 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) 904 return; 905 906 /* find what triggered the MDD event */ 907 reg = rd32(hw, GL_MDET_TX_PQM); 908 if (reg & GL_MDET_TX_PQM_VALID_M) { 909 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 910 GL_MDET_TX_PQM_PF_NUM_S; 911 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 912 GL_MDET_TX_PQM_VF_NUM_S; 913 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 914 GL_MDET_TX_PQM_MAL_TYPE_S; 915 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 916 GL_MDET_TX_PQM_QNUM_S); 917 918 if (netif_msg_tx_err(pf)) 919 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 920 event, queue, pf_num, vf_num); 921 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 922 mdd_detected = true; 923 } 924 925 reg = rd32(hw, GL_MDET_TX_TCLAN); 926 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 927 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 928 GL_MDET_TX_TCLAN_PF_NUM_S; 929 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 930 GL_MDET_TX_TCLAN_VF_NUM_S; 931 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 932 GL_MDET_TX_TCLAN_MAL_TYPE_S; 933 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 934 GL_MDET_TX_TCLAN_QNUM_S); 935 936 if (netif_msg_rx_err(pf)) 937 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 938 event, queue, pf_num, vf_num); 939 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 940 mdd_detected = true; 941 } 942 943 reg = rd32(hw, GL_MDET_RX); 944 if (reg & GL_MDET_RX_VALID_M) { 945 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 946 GL_MDET_RX_PF_NUM_S; 947 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 948 GL_MDET_RX_VF_NUM_S; 949 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 950 GL_MDET_RX_MAL_TYPE_S; 951 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 952 GL_MDET_RX_QNUM_S); 953 954 if (netif_msg_rx_err(pf)) 955 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 956 event, queue, pf_num, vf_num); 957 wr32(hw, GL_MDET_RX, 0xffffffff); 958 mdd_detected = true; 959 } 960 961 if (mdd_detected) { 962 bool pf_mdd_detected = false; 963 964 reg = rd32(hw, PF_MDET_TX_PQM); 965 if (reg & PF_MDET_TX_PQM_VALID_M) { 966 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 967 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 968 pf_mdd_detected = true; 969 } 970 971 reg = rd32(hw, PF_MDET_TX_TCLAN); 972 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 973 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 974 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 975 pf_mdd_detected = true; 976 } 977 978 reg = rd32(hw, PF_MDET_RX); 979 if (reg & PF_MDET_RX_VALID_M) { 980 wr32(hw, PF_MDET_RX, 0xFFFF); 981 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 982 pf_mdd_detected = true; 983 } 984 /* Queue belongs to the PF initiate a reset */ 985 if (pf_mdd_detected) { 986 set_bit(__ICE_NEEDS_RESTART, pf->state); 987 ice_service_task_schedule(pf); 988 } 989 } 990 991 /* see if one of the VFs needs to be reset */ 992 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 993 struct ice_vf *vf = &pf->vf[i]; 994 995 reg = rd32(hw, VP_MDET_TX_PQM(i)); 996 if (reg & VP_MDET_TX_PQM_VALID_M) { 997 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 998 vf->num_mdd_events++; 999 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1000 i); 1001 } 1002 1003 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1004 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1005 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1006 vf->num_mdd_events++; 1007 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1008 i); 1009 } 1010 1011 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1012 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1013 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1014 vf->num_mdd_events++; 1015 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1016 i); 1017 } 1018 1019 reg = rd32(hw, VP_MDET_RX(i)); 1020 if (reg & VP_MDET_RX_VALID_M) { 1021 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1022 vf->num_mdd_events++; 1023 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 1024 i); 1025 } 1026 1027 if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { 1028 dev_info(&pf->pdev->dev, 1029 "Too many MDD events on VF %d, disabled\n", i); 1030 dev_info(&pf->pdev->dev, 1031 "Use PF Control I/F to re-enable the VF\n"); 1032 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 1033 } 1034 } 1035 1036 /* re-enable MDD interrupt cause */ 1037 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1038 reg = rd32(hw, PFINT_OICR_ENA); 1039 reg |= PFINT_OICR_MAL_DETECT_M; 1040 wr32(hw, PFINT_OICR_ENA, reg); 1041 ice_flush(hw); 1042 } 1043 1044 /** 1045 * ice_service_task - manage and run subtasks 1046 * @work: pointer to work_struct contained by the PF struct 1047 */ 1048 static void ice_service_task(struct work_struct *work) 1049 { 1050 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 1051 unsigned long start_time = jiffies; 1052 1053 /* subtasks */ 1054 1055 /* process reset requests first */ 1056 ice_reset_subtask(pf); 1057 1058 /* bail if a reset/recovery cycle is pending or rebuild failed */ 1059 if (ice_is_reset_in_progress(pf->state) || 1060 test_bit(__ICE_SUSPENDED, pf->state) || 1061 test_bit(__ICE_NEEDS_RESTART, pf->state)) { 1062 ice_service_task_complete(pf); 1063 return; 1064 } 1065 1066 ice_check_for_hang_subtask(pf); 1067 ice_sync_fltr_subtask(pf); 1068 ice_handle_mdd_event(pf); 1069 ice_process_vflr_event(pf); 1070 ice_watchdog_subtask(pf); 1071 ice_clean_adminq_subtask(pf); 1072 ice_clean_mailboxq_subtask(pf); 1073 1074 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1075 ice_service_task_complete(pf); 1076 1077 /* If the tasks have taken longer than one service timer period 1078 * or there is more work to be done, reset the service timer to 1079 * schedule the service task now. 1080 */ 1081 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 1082 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || 1083 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || 1084 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 1085 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1086 mod_timer(&pf->serv_tmr, jiffies); 1087 } 1088 1089 /** 1090 * ice_set_ctrlq_len - helper function to set controlq length 1091 * @hw: pointer to the hw instance 1092 */ 1093 static void ice_set_ctrlq_len(struct ice_hw *hw) 1094 { 1095 hw->adminq.num_rq_entries = ICE_AQ_LEN; 1096 hw->adminq.num_sq_entries = ICE_AQ_LEN; 1097 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 1098 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 1099 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 1100 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 1101 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1102 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1103 } 1104 1105 /** 1106 * ice_irq_affinity_notify - Callback for affinity changes 1107 * @notify: context as to what irq was changed 1108 * @mask: the new affinity mask 1109 * 1110 * This is a callback function used by the irq_set_affinity_notifier function 1111 * so that we may register to receive changes to the irq affinity masks. 1112 */ 1113 static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, 1114 const cpumask_t *mask) 1115 { 1116 struct ice_q_vector *q_vector = 1117 container_of(notify, struct ice_q_vector, affinity_notify); 1118 1119 cpumask_copy(&q_vector->affinity_mask, mask); 1120 } 1121 1122 /** 1123 * ice_irq_affinity_release - Callback for affinity notifier release 1124 * @ref: internal core kernel usage 1125 * 1126 * This is a callback function used by the irq_set_affinity_notifier function 1127 * to inform the current notification subscriber that they will no longer 1128 * receive notifications. 1129 */ 1130 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 1131 1132 /** 1133 * ice_vsi_ena_irq - Enable IRQ for the given VSI 1134 * @vsi: the VSI being configured 1135 */ 1136 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 1137 { 1138 struct ice_pf *pf = vsi->back; 1139 struct ice_hw *hw = &pf->hw; 1140 1141 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 1142 int i; 1143 1144 for (i = 0; i < vsi->num_q_vectors; i++) 1145 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 1146 } 1147 1148 ice_flush(hw); 1149 return 0; 1150 } 1151 1152 /** 1153 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 1154 * @vsi: the VSI being configured 1155 * @basename: name for the vector 1156 */ 1157 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 1158 { 1159 int q_vectors = vsi->num_q_vectors; 1160 struct ice_pf *pf = vsi->back; 1161 int base = vsi->sw_base_vector; 1162 int rx_int_idx = 0; 1163 int tx_int_idx = 0; 1164 int vector, err; 1165 int irq_num; 1166 1167 for (vector = 0; vector < q_vectors; vector++) { 1168 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 1169 1170 irq_num = pf->msix_entries[base + vector].vector; 1171 1172 if (q_vector->tx.ring && q_vector->rx.ring) { 1173 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1174 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 1175 tx_int_idx++; 1176 } else if (q_vector->rx.ring) { 1177 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1178 "%s-%s-%d", basename, "rx", rx_int_idx++); 1179 } else if (q_vector->tx.ring) { 1180 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1181 "%s-%s-%d", basename, "tx", tx_int_idx++); 1182 } else { 1183 /* skip this unused q_vector */ 1184 continue; 1185 } 1186 err = devm_request_irq(&pf->pdev->dev, 1187 pf->msix_entries[base + vector].vector, 1188 vsi->irq_handler, 0, q_vector->name, 1189 q_vector); 1190 if (err) { 1191 netdev_err(vsi->netdev, 1192 "MSIX request_irq failed, error: %d\n", err); 1193 goto free_q_irqs; 1194 } 1195 1196 /* register for affinity change notifications */ 1197 q_vector->affinity_notify.notify = ice_irq_affinity_notify; 1198 q_vector->affinity_notify.release = ice_irq_affinity_release; 1199 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 1200 1201 /* assign the mask for this irq */ 1202 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 1203 } 1204 1205 vsi->irqs_ready = true; 1206 return 0; 1207 1208 free_q_irqs: 1209 while (vector) { 1210 vector--; 1211 irq_num = pf->msix_entries[base + vector].vector, 1212 irq_set_affinity_notifier(irq_num, NULL); 1213 irq_set_affinity_hint(irq_num, NULL); 1214 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); 1215 } 1216 return err; 1217 } 1218 1219 /** 1220 * ice_ena_misc_vector - enable the non-queue interrupts 1221 * @pf: board private structure 1222 */ 1223 static void ice_ena_misc_vector(struct ice_pf *pf) 1224 { 1225 struct ice_hw *hw = &pf->hw; 1226 u32 val; 1227 1228 /* clear things first */ 1229 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1230 rd32(hw, PFINT_OICR); /* read to clear */ 1231 1232 val = (PFINT_OICR_ECC_ERR_M | 1233 PFINT_OICR_MAL_DETECT_M | 1234 PFINT_OICR_GRST_M | 1235 PFINT_OICR_PCI_EXCEPTION_M | 1236 PFINT_OICR_VFLR_M | 1237 PFINT_OICR_HMC_ERR_M | 1238 PFINT_OICR_PE_CRITERR_M); 1239 1240 wr32(hw, PFINT_OICR_ENA, val); 1241 1242 /* SW_ITR_IDX = 0, but don't change INTENA */ 1243 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), 1244 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 1245 } 1246 1247 /** 1248 * ice_misc_intr - misc interrupt handler 1249 * @irq: interrupt number 1250 * @data: pointer to a q_vector 1251 */ 1252 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 1253 { 1254 struct ice_pf *pf = (struct ice_pf *)data; 1255 struct ice_hw *hw = &pf->hw; 1256 irqreturn_t ret = IRQ_NONE; 1257 u32 oicr, ena_mask; 1258 1259 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 1260 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1261 1262 oicr = rd32(hw, PFINT_OICR); 1263 ena_mask = rd32(hw, PFINT_OICR_ENA); 1264 1265 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1266 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 1267 set_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1268 } 1269 if (oicr & PFINT_OICR_VFLR_M) { 1270 ena_mask &= ~PFINT_OICR_VFLR_M; 1271 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); 1272 } 1273 1274 if (oicr & PFINT_OICR_GRST_M) { 1275 u32 reset; 1276 1277 /* we have a reset warning */ 1278 ena_mask &= ~PFINT_OICR_GRST_M; 1279 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 1280 GLGEN_RSTAT_RESET_TYPE_S; 1281 1282 if (reset == ICE_RESET_CORER) 1283 pf->corer_count++; 1284 else if (reset == ICE_RESET_GLOBR) 1285 pf->globr_count++; 1286 else if (reset == ICE_RESET_EMPR) 1287 pf->empr_count++; 1288 else 1289 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", 1290 reset); 1291 1292 /* If a reset cycle isn't already in progress, we set a bit in 1293 * pf->state so that the service task can start a reset/rebuild. 1294 * We also make note of which reset happened so that peer 1295 * devices/drivers can be informed. 1296 */ 1297 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { 1298 if (reset == ICE_RESET_CORER) 1299 set_bit(__ICE_CORER_RECV, pf->state); 1300 else if (reset == ICE_RESET_GLOBR) 1301 set_bit(__ICE_GLOBR_RECV, pf->state); 1302 else 1303 set_bit(__ICE_EMPR_RECV, pf->state); 1304 1305 /* There are couple of different bits at play here. 1306 * hw->reset_ongoing indicates whether the hardware is 1307 * in reset. This is set to true when a reset interrupt 1308 * is received and set back to false after the driver 1309 * has determined that the hardware is out of reset. 1310 * 1311 * __ICE_RESET_OICR_RECV in pf->state indicates 1312 * that a post reset rebuild is required before the 1313 * driver is operational again. This is set above. 1314 * 1315 * As this is the start of the reset/rebuild cycle, set 1316 * both to indicate that. 1317 */ 1318 hw->reset_ongoing = true; 1319 } 1320 } 1321 1322 if (oicr & PFINT_OICR_HMC_ERR_M) { 1323 ena_mask &= ~PFINT_OICR_HMC_ERR_M; 1324 dev_dbg(&pf->pdev->dev, 1325 "HMC Error interrupt - info 0x%x, data 0x%x\n", 1326 rd32(hw, PFHMC_ERRORINFO), 1327 rd32(hw, PFHMC_ERRORDATA)); 1328 } 1329 1330 /* Report and mask off any remaining unexpected interrupts */ 1331 oicr &= ena_mask; 1332 if (oicr) { 1333 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", 1334 oicr); 1335 /* If a critical error is pending there is no choice but to 1336 * reset the device. 1337 */ 1338 if (oicr & (PFINT_OICR_PE_CRITERR_M | 1339 PFINT_OICR_PCI_EXCEPTION_M | 1340 PFINT_OICR_ECC_ERR_M)) { 1341 set_bit(__ICE_PFR_REQ, pf->state); 1342 ice_service_task_schedule(pf); 1343 } 1344 ena_mask &= ~oicr; 1345 } 1346 ret = IRQ_HANDLED; 1347 1348 /* re-enable interrupt causes that are not handled during this pass */ 1349 wr32(hw, PFINT_OICR_ENA, ena_mask); 1350 if (!test_bit(__ICE_DOWN, pf->state)) { 1351 ice_service_task_schedule(pf); 1352 ice_irq_dynamic_ena(hw, NULL, NULL); 1353 } 1354 1355 return ret; 1356 } 1357 1358 /** 1359 * ice_free_irq_msix_misc - Unroll misc vector setup 1360 * @pf: board private structure 1361 */ 1362 static void ice_free_irq_msix_misc(struct ice_pf *pf) 1363 { 1364 /* disable OICR interrupt */ 1365 wr32(&pf->hw, PFINT_OICR_ENA, 0); 1366 ice_flush(&pf->hw); 1367 1368 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { 1369 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); 1370 devm_free_irq(&pf->pdev->dev, 1371 pf->msix_entries[pf->sw_oicr_idx].vector, pf); 1372 } 1373 1374 pf->num_avail_sw_msix += 1; 1375 ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); 1376 pf->num_avail_hw_msix += 1; 1377 ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); 1378 } 1379 1380 /** 1381 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 1382 * @pf: board private structure 1383 * 1384 * This sets up the handler for MSIX 0, which is used to manage the 1385 * non-queue interrupts, e.g. AdminQ and errors. This is not used 1386 * when in MSI or Legacy interrupt mode. 1387 */ 1388 static int ice_req_irq_msix_misc(struct ice_pf *pf) 1389 { 1390 struct ice_hw *hw = &pf->hw; 1391 int oicr_idx, err = 0; 1392 u32 val; 1393 1394 if (!pf->int_name[0]) 1395 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 1396 dev_driver_string(&pf->pdev->dev), 1397 dev_name(&pf->pdev->dev)); 1398 1399 /* Do not request IRQ but do enable OICR interrupt since settings are 1400 * lost during reset. Note that this function is called only during 1401 * rebuild path and not while reset is in progress. 1402 */ 1403 if (ice_is_reset_in_progress(pf->state)) 1404 goto skip_req_irq; 1405 1406 /* reserve one vector in sw_irq_tracker for misc interrupts */ 1407 oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1408 if (oicr_idx < 0) 1409 return oicr_idx; 1410 1411 pf->num_avail_sw_msix -= 1; 1412 pf->sw_oicr_idx = oicr_idx; 1413 1414 /* reserve one vector in hw_irq_tracker for misc interrupts */ 1415 oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1416 if (oicr_idx < 0) { 1417 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1418 pf->num_avail_sw_msix += 1; 1419 return oicr_idx; 1420 } 1421 pf->num_avail_hw_msix -= 1; 1422 pf->hw_oicr_idx = oicr_idx; 1423 1424 err = devm_request_irq(&pf->pdev->dev, 1425 pf->msix_entries[pf->sw_oicr_idx].vector, 1426 ice_misc_intr, 0, pf->int_name, pf); 1427 if (err) { 1428 dev_err(&pf->pdev->dev, 1429 "devm_request_irq for %s failed: %d\n", 1430 pf->int_name, err); 1431 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1432 pf->num_avail_sw_msix += 1; 1433 ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1434 pf->num_avail_hw_msix += 1; 1435 return err; 1436 } 1437 1438 skip_req_irq: 1439 ice_ena_misc_vector(pf); 1440 1441 val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 1442 PFINT_OICR_CTL_CAUSE_ENA_M); 1443 wr32(hw, PFINT_OICR_CTL, val); 1444 1445 /* This enables Admin queue Interrupt causes */ 1446 val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 1447 PFINT_FW_CTL_CAUSE_ENA_M); 1448 wr32(hw, PFINT_FW_CTL, val); 1449 1450 /* This enables Mailbox queue Interrupt causes */ 1451 val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 1452 PFINT_MBX_CTL_CAUSE_ENA_M); 1453 wr32(hw, PFINT_MBX_CTL, val); 1454 1455 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), 1456 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 1457 1458 ice_flush(hw); 1459 ice_irq_dynamic_ena(hw, NULL, NULL); 1460 1461 return 0; 1462 } 1463 1464 /** 1465 * ice_napi_del - Remove NAPI handler for the VSI 1466 * @vsi: VSI for which NAPI handler is to be removed 1467 */ 1468 void ice_napi_del(struct ice_vsi *vsi) 1469 { 1470 int v_idx; 1471 1472 if (!vsi->netdev) 1473 return; 1474 1475 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1476 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 1477 } 1478 1479 /** 1480 * ice_napi_add - register NAPI handler for the VSI 1481 * @vsi: VSI for which NAPI handler is to be registered 1482 * 1483 * This function is only called in the driver's load path. Registering the NAPI 1484 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 1485 * reset/rebuild, etc.) 1486 */ 1487 static void ice_napi_add(struct ice_vsi *vsi) 1488 { 1489 int v_idx; 1490 1491 if (!vsi->netdev) 1492 return; 1493 1494 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1495 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 1496 ice_napi_poll, NAPI_POLL_WEIGHT); 1497 } 1498 1499 /** 1500 * ice_cfg_netdev - Allocate, configure and register a netdev 1501 * @vsi: the VSI associated with the new netdev 1502 * 1503 * Returns 0 on success, negative value on failure 1504 */ 1505 static int ice_cfg_netdev(struct ice_vsi *vsi) 1506 { 1507 netdev_features_t csumo_features; 1508 netdev_features_t vlano_features; 1509 netdev_features_t dflt_features; 1510 netdev_features_t tso_features; 1511 struct ice_netdev_priv *np; 1512 struct net_device *netdev; 1513 u8 mac_addr[ETH_ALEN]; 1514 int err; 1515 1516 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), 1517 vsi->alloc_txq, vsi->alloc_rxq); 1518 if (!netdev) 1519 return -ENOMEM; 1520 1521 vsi->netdev = netdev; 1522 np = netdev_priv(netdev); 1523 np->vsi = vsi; 1524 1525 dflt_features = NETIF_F_SG | 1526 NETIF_F_HIGHDMA | 1527 NETIF_F_RXHASH; 1528 1529 csumo_features = NETIF_F_RXCSUM | 1530 NETIF_F_IP_CSUM | 1531 NETIF_F_SCTP_CRC | 1532 NETIF_F_IPV6_CSUM; 1533 1534 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 1535 NETIF_F_HW_VLAN_CTAG_TX | 1536 NETIF_F_HW_VLAN_CTAG_RX; 1537 1538 tso_features = NETIF_F_TSO; 1539 1540 /* set features that user can change */ 1541 netdev->hw_features = dflt_features | csumo_features | 1542 vlano_features | tso_features; 1543 1544 /* enable features */ 1545 netdev->features |= netdev->hw_features; 1546 /* encap and VLAN devices inherit default, csumo and tso features */ 1547 netdev->hw_enc_features |= dflt_features | csumo_features | 1548 tso_features; 1549 netdev->vlan_features |= dflt_features | csumo_features | 1550 tso_features; 1551 1552 if (vsi->type == ICE_VSI_PF) { 1553 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); 1554 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 1555 1556 ether_addr_copy(netdev->dev_addr, mac_addr); 1557 ether_addr_copy(netdev->perm_addr, mac_addr); 1558 } 1559 1560 netdev->priv_flags |= IFF_UNICAST_FLT; 1561 1562 /* assign netdev_ops */ 1563 netdev->netdev_ops = &ice_netdev_ops; 1564 1565 /* setup watchdog timeout value to be 5 second */ 1566 netdev->watchdog_timeo = 5 * HZ; 1567 1568 ice_set_ethtool_ops(netdev); 1569 1570 netdev->min_mtu = ETH_MIN_MTU; 1571 netdev->max_mtu = ICE_MAX_MTU; 1572 1573 err = register_netdev(vsi->netdev); 1574 if (err) 1575 return err; 1576 1577 netif_carrier_off(vsi->netdev); 1578 1579 /* make sure transmit queues start off as stopped */ 1580 netif_tx_stop_all_queues(vsi->netdev); 1581 1582 return 0; 1583 } 1584 1585 /** 1586 * ice_fill_rss_lut - Fill the RSS lookup table with default values 1587 * @lut: Lookup table 1588 * @rss_table_size: Lookup table size 1589 * @rss_size: Range of queue number for hashing 1590 */ 1591 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 1592 { 1593 u16 i; 1594 1595 for (i = 0; i < rss_table_size; i++) 1596 lut[i] = i % rss_size; 1597 } 1598 1599 /** 1600 * ice_pf_vsi_setup - Set up a PF VSI 1601 * @pf: board private structure 1602 * @pi: pointer to the port_info instance 1603 * 1604 * Returns pointer to the successfully allocated VSI sw struct on success, 1605 * otherwise returns NULL on failure. 1606 */ 1607 static struct ice_vsi * 1608 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 1609 { 1610 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 1611 } 1612 1613 /** 1614 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload 1615 * @netdev: network interface to be adjusted 1616 * @proto: unused protocol 1617 * @vid: vlan id to be added 1618 * 1619 * net_device_ops implementation for adding vlan ids 1620 */ 1621 static int ice_vlan_rx_add_vid(struct net_device *netdev, 1622 __always_unused __be16 proto, u16 vid) 1623 { 1624 struct ice_netdev_priv *np = netdev_priv(netdev); 1625 struct ice_vsi *vsi = np->vsi; 1626 1627 if (vid >= VLAN_N_VID) { 1628 netdev_err(netdev, "VLAN id requested %d is out of range %d\n", 1629 vid, VLAN_N_VID); 1630 return -EINVAL; 1631 } 1632 1633 if (vsi->info.pvid) 1634 return -EINVAL; 1635 1636 /* Enable VLAN pruning when VLAN 0 is added */ 1637 if (unlikely(!vid)) { 1638 int ret = ice_cfg_vlan_pruning(vsi, true); 1639 1640 if (ret) 1641 return ret; 1642 } 1643 1644 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is 1645 * needed to continue allowing all untagged packets since VLAN prune 1646 * list is applied to all packets by the switch 1647 */ 1648 return ice_vsi_add_vlan(vsi, vid); 1649 } 1650 1651 /** 1652 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1653 * @netdev: network interface to be adjusted 1654 * @proto: unused protocol 1655 * @vid: vlan id to be removed 1656 * 1657 * net_device_ops implementation for removing vlan ids 1658 */ 1659 static int ice_vlan_rx_kill_vid(struct net_device *netdev, 1660 __always_unused __be16 proto, u16 vid) 1661 { 1662 struct ice_netdev_priv *np = netdev_priv(netdev); 1663 struct ice_vsi *vsi = np->vsi; 1664 int status; 1665 1666 if (vsi->info.pvid) 1667 return -EINVAL; 1668 1669 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 1670 * information 1671 */ 1672 status = ice_vsi_kill_vlan(vsi, vid); 1673 if (status) 1674 return status; 1675 1676 /* Disable VLAN pruning when VLAN 0 is removed */ 1677 if (unlikely(!vid)) 1678 status = ice_cfg_vlan_pruning(vsi, false); 1679 1680 return status; 1681 } 1682 1683 /** 1684 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 1685 * @pf: board private structure 1686 * 1687 * Returns 0 on success, negative value on failure 1688 */ 1689 static int ice_setup_pf_sw(struct ice_pf *pf) 1690 { 1691 LIST_HEAD(tmp_add_list); 1692 u8 broadcast[ETH_ALEN]; 1693 struct ice_vsi *vsi; 1694 int status = 0; 1695 1696 if (ice_is_reset_in_progress(pf->state)) 1697 return -EBUSY; 1698 1699 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 1700 if (!vsi) { 1701 status = -ENOMEM; 1702 goto unroll_vsi_setup; 1703 } 1704 1705 status = ice_cfg_netdev(vsi); 1706 if (status) { 1707 status = -ENODEV; 1708 goto unroll_vsi_setup; 1709 } 1710 1711 /* registering the NAPI handler requires both the queues and 1712 * netdev to be created, which are done in ice_pf_vsi_setup() 1713 * and ice_cfg_netdev() respectively 1714 */ 1715 ice_napi_add(vsi); 1716 1717 /* To add a MAC filter, first add the MAC to a list and then 1718 * pass the list to ice_add_mac. 1719 */ 1720 1721 /* Add a unicast MAC filter so the VSI can get its packets */ 1722 status = ice_add_mac_to_list(vsi, &tmp_add_list, 1723 vsi->port_info->mac.perm_addr); 1724 if (status) 1725 goto unroll_napi_add; 1726 1727 /* VSI needs to receive broadcast traffic, so add the broadcast 1728 * MAC address to the list as well. 1729 */ 1730 eth_broadcast_addr(broadcast); 1731 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); 1732 if (status) 1733 goto free_mac_list; 1734 1735 /* program MAC filters for entries in tmp_add_list */ 1736 status = ice_add_mac(&pf->hw, &tmp_add_list); 1737 if (status) { 1738 dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); 1739 status = -ENOMEM; 1740 goto free_mac_list; 1741 } 1742 1743 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1744 return status; 1745 1746 free_mac_list: 1747 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1748 1749 unroll_napi_add: 1750 if (vsi) { 1751 ice_napi_del(vsi); 1752 if (vsi->netdev) { 1753 if (vsi->netdev->reg_state == NETREG_REGISTERED) 1754 unregister_netdev(vsi->netdev); 1755 free_netdev(vsi->netdev); 1756 vsi->netdev = NULL; 1757 } 1758 } 1759 1760 unroll_vsi_setup: 1761 if (vsi) { 1762 ice_vsi_free_q_vectors(vsi); 1763 ice_vsi_delete(vsi); 1764 ice_vsi_put_qs(vsi); 1765 pf->q_left_tx += vsi->alloc_txq; 1766 pf->q_left_rx += vsi->alloc_rxq; 1767 ice_vsi_clear(vsi); 1768 } 1769 return status; 1770 } 1771 1772 /** 1773 * ice_determine_q_usage - Calculate queue distribution 1774 * @pf: board private structure 1775 * 1776 * Return -ENOMEM if we don't get enough queues for all ports 1777 */ 1778 static void ice_determine_q_usage(struct ice_pf *pf) 1779 { 1780 u16 q_left_tx, q_left_rx; 1781 1782 q_left_tx = pf->hw.func_caps.common_cap.num_txq; 1783 q_left_rx = pf->hw.func_caps.common_cap.num_rxq; 1784 1785 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); 1786 1787 /* only 1 Rx queue unless RSS is enabled */ 1788 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 1789 pf->num_lan_rx = 1; 1790 else 1791 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); 1792 1793 pf->q_left_tx = q_left_tx - pf->num_lan_tx; 1794 pf->q_left_rx = q_left_rx - pf->num_lan_rx; 1795 } 1796 1797 /** 1798 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 1799 * @pf: board private structure to initialize 1800 */ 1801 static void ice_deinit_pf(struct ice_pf *pf) 1802 { 1803 ice_service_task_stop(pf); 1804 mutex_destroy(&pf->sw_mutex); 1805 mutex_destroy(&pf->avail_q_mutex); 1806 } 1807 1808 /** 1809 * ice_init_pf - Initialize general software structures (struct ice_pf) 1810 * @pf: board private structure to initialize 1811 */ 1812 static void ice_init_pf(struct ice_pf *pf) 1813 { 1814 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); 1815 set_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1816 #ifdef CONFIG_PCI_IOV 1817 if (pf->hw.func_caps.common_cap.sr_iov_1_1) { 1818 struct ice_hw *hw = &pf->hw; 1819 1820 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 1821 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, 1822 ICE_MAX_VF_COUNT); 1823 } 1824 #endif /* CONFIG_PCI_IOV */ 1825 1826 mutex_init(&pf->sw_mutex); 1827 mutex_init(&pf->avail_q_mutex); 1828 1829 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ 1830 mutex_lock(&pf->avail_q_mutex); 1831 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); 1832 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); 1833 mutex_unlock(&pf->avail_q_mutex); 1834 1835 if (pf->hw.func_caps.common_cap.rss_table_size) 1836 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 1837 1838 /* setup service timer and periodic service task */ 1839 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 1840 pf->serv_tmr_period = HZ; 1841 INIT_WORK(&pf->serv_task, ice_service_task); 1842 clear_bit(__ICE_SERVICE_SCHED, pf->state); 1843 } 1844 1845 /** 1846 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 1847 * @pf: board private structure 1848 * 1849 * compute the number of MSIX vectors required (v_budget) and request from 1850 * the OS. Return the number of vectors reserved or negative on failure 1851 */ 1852 static int ice_ena_msix_range(struct ice_pf *pf) 1853 { 1854 int v_left, v_actual, v_budget = 0; 1855 int needed, err, i; 1856 1857 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 1858 1859 /* reserve one vector for miscellaneous handler */ 1860 needed = 1; 1861 v_budget += needed; 1862 v_left -= needed; 1863 1864 /* reserve vectors for LAN traffic */ 1865 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); 1866 v_budget += pf->num_lan_msix; 1867 v_left -= pf->num_lan_msix; 1868 1869 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, 1870 sizeof(struct msix_entry), GFP_KERNEL); 1871 1872 if (!pf->msix_entries) { 1873 err = -ENOMEM; 1874 goto exit_err; 1875 } 1876 1877 for (i = 0; i < v_budget; i++) 1878 pf->msix_entries[i].entry = i; 1879 1880 /* actually reserve the vectors */ 1881 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 1882 ICE_MIN_MSIX, v_budget); 1883 1884 if (v_actual < 0) { 1885 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); 1886 err = v_actual; 1887 goto msix_err; 1888 } 1889 1890 if (v_actual < v_budget) { 1891 dev_warn(&pf->pdev->dev, 1892 "not enough vectors. requested = %d, obtained = %d\n", 1893 v_budget, v_actual); 1894 if (v_actual >= (pf->num_lan_msix + 1)) { 1895 pf->num_avail_sw_msix = v_actual - 1896 (pf->num_lan_msix + 1); 1897 } else if (v_actual >= 2) { 1898 pf->num_lan_msix = 1; 1899 pf->num_avail_sw_msix = v_actual - 2; 1900 } else { 1901 pci_disable_msix(pf->pdev); 1902 err = -ERANGE; 1903 goto msix_err; 1904 } 1905 } 1906 1907 return v_actual; 1908 1909 msix_err: 1910 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1911 goto exit_err; 1912 1913 exit_err: 1914 pf->num_lan_msix = 0; 1915 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1916 return err; 1917 } 1918 1919 /** 1920 * ice_dis_msix - Disable MSI-X interrupt setup in OS 1921 * @pf: board private structure 1922 */ 1923 static void ice_dis_msix(struct ice_pf *pf) 1924 { 1925 pci_disable_msix(pf->pdev); 1926 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1927 pf->msix_entries = NULL; 1928 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1929 } 1930 1931 /** 1932 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 1933 * @pf: board private structure 1934 */ 1935 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 1936 { 1937 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1938 ice_dis_msix(pf); 1939 1940 if (pf->sw_irq_tracker) { 1941 devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); 1942 pf->sw_irq_tracker = NULL; 1943 } 1944 1945 if (pf->hw_irq_tracker) { 1946 devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); 1947 pf->hw_irq_tracker = NULL; 1948 } 1949 } 1950 1951 /** 1952 * ice_init_interrupt_scheme - Determine proper interrupt scheme 1953 * @pf: board private structure to initialize 1954 */ 1955 static int ice_init_interrupt_scheme(struct ice_pf *pf) 1956 { 1957 int vectors = 0, hw_vectors = 0; 1958 ssize_t size; 1959 1960 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1961 vectors = ice_ena_msix_range(pf); 1962 else 1963 return -ENODEV; 1964 1965 if (vectors < 0) 1966 return vectors; 1967 1968 /* set up vector assignment tracking */ 1969 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); 1970 1971 pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1972 if (!pf->sw_irq_tracker) { 1973 ice_dis_msix(pf); 1974 return -ENOMEM; 1975 } 1976 1977 /* populate SW interrupts pool with number of OS granted IRQs. */ 1978 pf->num_avail_sw_msix = vectors; 1979 pf->sw_irq_tracker->num_entries = vectors; 1980 1981 /* set up HW vector assignment tracking */ 1982 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 1983 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); 1984 1985 pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1986 if (!pf->hw_irq_tracker) { 1987 ice_clear_interrupt_scheme(pf); 1988 return -ENOMEM; 1989 } 1990 1991 /* populate HW interrupts pool with number of HW supported irqs. */ 1992 pf->num_avail_hw_msix = hw_vectors; 1993 pf->hw_irq_tracker->num_entries = hw_vectors; 1994 1995 return 0; 1996 } 1997 1998 /** 1999 * ice_verify_itr_gran - verify driver's assumption of ITR granularity 2000 * @pf: pointer to the PF structure 2001 * 2002 * There is no error returned here because the driver will be able to handle a 2003 * different ITR granularity, but interrupt moderation will not be accurate if 2004 * the driver's assumptions are not verified. This assumption is made so we can 2005 * use constants in the hot path instead of accessing structure members. 2006 */ 2007 static void ice_verify_itr_gran(struct ice_pf *pf) 2008 { 2009 if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1)) 2010 dev_warn(&pf->pdev->dev, 2011 "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n", 2012 (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran); 2013 } 2014 2015 /** 2016 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 2017 * @pf: pointer to the PF structure 2018 * 2019 * There is no error returned here because the driver should be able to handle 2020 * 128 Byte cache lines, so we only print a warning in case issues are seen, 2021 * specifically with Tx. 2022 */ 2023 static void ice_verify_cacheline_size(struct ice_pf *pf) 2024 { 2025 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 2026 dev_warn(&pf->pdev->dev, 2027 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 2028 ICE_CACHE_LINE_BYTES); 2029 } 2030 2031 /** 2032 * ice_probe - Device initialization routine 2033 * @pdev: PCI device information struct 2034 * @ent: entry in ice_pci_tbl 2035 * 2036 * Returns 0 on success, negative on failure 2037 */ 2038 static int ice_probe(struct pci_dev *pdev, 2039 const struct pci_device_id __always_unused *ent) 2040 { 2041 struct ice_pf *pf; 2042 struct ice_hw *hw; 2043 int err; 2044 2045 /* this driver uses devres, see Documentation/driver-model/devres.txt */ 2046 err = pcim_enable_device(pdev); 2047 if (err) 2048 return err; 2049 2050 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 2051 if (err) { 2052 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); 2053 return err; 2054 } 2055 2056 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); 2057 if (!pf) 2058 return -ENOMEM; 2059 2060 /* set up for high or low dma */ 2061 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2062 if (err) 2063 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2064 if (err) { 2065 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 2066 return err; 2067 } 2068 2069 pci_enable_pcie_error_reporting(pdev); 2070 pci_set_master(pdev); 2071 2072 pf->pdev = pdev; 2073 pci_set_drvdata(pdev, pf); 2074 set_bit(__ICE_DOWN, pf->state); 2075 /* Disable service task until DOWN bit is cleared */ 2076 set_bit(__ICE_SERVICE_DIS, pf->state); 2077 2078 hw = &pf->hw; 2079 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 2080 hw->back = pf; 2081 hw->vendor_id = pdev->vendor; 2082 hw->device_id = pdev->device; 2083 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2084 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2085 hw->subsystem_device_id = pdev->subsystem_device; 2086 hw->bus.device = PCI_SLOT(pdev->devfn); 2087 hw->bus.func = PCI_FUNC(pdev->devfn); 2088 ice_set_ctrlq_len(hw); 2089 2090 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 2091 2092 #ifndef CONFIG_DYNAMIC_DEBUG 2093 if (debug < -1) 2094 hw->debug_mask = debug; 2095 #endif 2096 2097 err = ice_init_hw(hw); 2098 if (err) { 2099 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); 2100 err = -EIO; 2101 goto err_exit_unroll; 2102 } 2103 2104 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", 2105 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2106 hw->api_maj_ver, hw->api_min_ver); 2107 2108 ice_init_pf(pf); 2109 2110 ice_determine_q_usage(pf); 2111 2112 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 2113 if (!pf->num_alloc_vsi) { 2114 err = -EIO; 2115 goto err_init_pf_unroll; 2116 } 2117 2118 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, 2119 sizeof(struct ice_vsi *), GFP_KERNEL); 2120 if (!pf->vsi) { 2121 err = -ENOMEM; 2122 goto err_init_pf_unroll; 2123 } 2124 2125 err = ice_init_interrupt_scheme(pf); 2126 if (err) { 2127 dev_err(&pdev->dev, 2128 "ice_init_interrupt_scheme failed: %d\n", err); 2129 err = -EIO; 2130 goto err_init_interrupt_unroll; 2131 } 2132 2133 /* Driver is mostly up */ 2134 clear_bit(__ICE_DOWN, pf->state); 2135 2136 /* In case of MSIX we are going to setup the misc vector right here 2137 * to handle admin queue events etc. In case of legacy and MSI 2138 * the misc functionality and queue processing is combined in 2139 * the same vector and that gets setup at open. 2140 */ 2141 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2142 err = ice_req_irq_msix_misc(pf); 2143 if (err) { 2144 dev_err(&pdev->dev, 2145 "setup of misc vector failed: %d\n", err); 2146 goto err_init_interrupt_unroll; 2147 } 2148 } 2149 2150 /* create switch struct for the switch element created by FW on boot */ 2151 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), 2152 GFP_KERNEL); 2153 if (!pf->first_sw) { 2154 err = -ENOMEM; 2155 goto err_msix_misc_unroll; 2156 } 2157 2158 if (hw->evb_veb) 2159 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 2160 else 2161 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 2162 2163 pf->first_sw->pf = pf; 2164 2165 /* record the sw_id available for later use */ 2166 pf->first_sw->sw_id = hw->port_info->sw_id; 2167 2168 err = ice_setup_pf_sw(pf); 2169 if (err) { 2170 dev_err(&pdev->dev, 2171 "probe failed due to setup pf switch:%d\n", err); 2172 goto err_alloc_sw_unroll; 2173 } 2174 2175 clear_bit(__ICE_SERVICE_DIS, pf->state); 2176 2177 /* since everything is good, start the service timer */ 2178 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 2179 2180 ice_verify_cacheline_size(pf); 2181 ice_verify_itr_gran(pf); 2182 2183 return 0; 2184 2185 err_alloc_sw_unroll: 2186 set_bit(__ICE_SERVICE_DIS, pf->state); 2187 set_bit(__ICE_DOWN, pf->state); 2188 devm_kfree(&pf->pdev->dev, pf->first_sw); 2189 err_msix_misc_unroll: 2190 ice_free_irq_msix_misc(pf); 2191 err_init_interrupt_unroll: 2192 ice_clear_interrupt_scheme(pf); 2193 devm_kfree(&pdev->dev, pf->vsi); 2194 err_init_pf_unroll: 2195 ice_deinit_pf(pf); 2196 ice_deinit_hw(hw); 2197 err_exit_unroll: 2198 pci_disable_pcie_error_reporting(pdev); 2199 return err; 2200 } 2201 2202 /** 2203 * ice_remove - Device removal routine 2204 * @pdev: PCI device information struct 2205 */ 2206 static void ice_remove(struct pci_dev *pdev) 2207 { 2208 struct ice_pf *pf = pci_get_drvdata(pdev); 2209 int i; 2210 2211 if (!pf) 2212 return; 2213 2214 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 2215 if (!ice_is_reset_in_progress(pf->state)) 2216 break; 2217 msleep(100); 2218 } 2219 2220 set_bit(__ICE_DOWN, pf->state); 2221 ice_service_task_stop(pf); 2222 2223 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 2224 ice_free_vfs(pf); 2225 ice_vsi_release_all(pf); 2226 ice_free_irq_msix_misc(pf); 2227 ice_for_each_vsi(pf, i) { 2228 if (!pf->vsi[i]) 2229 continue; 2230 ice_vsi_free_q_vectors(pf->vsi[i]); 2231 } 2232 ice_clear_interrupt_scheme(pf); 2233 ice_deinit_pf(pf); 2234 ice_deinit_hw(&pf->hw); 2235 pci_disable_pcie_error_reporting(pdev); 2236 } 2237 2238 /* ice_pci_tbl - PCI Device ID Table 2239 * 2240 * Wildcard entries (PCI_ANY_ID) should come last 2241 * Last entry must be all 0s 2242 * 2243 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 2244 * Class, Class Mask, private data (not used) } 2245 */ 2246 static const struct pci_device_id ice_pci_tbl[] = { 2247 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 2248 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 2249 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 2250 /* required last entry */ 2251 { 0, } 2252 }; 2253 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 2254 2255 static struct pci_driver ice_driver = { 2256 .name = KBUILD_MODNAME, 2257 .id_table = ice_pci_tbl, 2258 .probe = ice_probe, 2259 .remove = ice_remove, 2260 .sriov_configure = ice_sriov_configure, 2261 }; 2262 2263 /** 2264 * ice_module_init - Driver registration routine 2265 * 2266 * ice_module_init is the first routine called when the driver is 2267 * loaded. All it does is register with the PCI subsystem. 2268 */ 2269 static int __init ice_module_init(void) 2270 { 2271 int status; 2272 2273 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); 2274 pr_info("%s\n", ice_copyright); 2275 2276 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 2277 if (!ice_wq) { 2278 pr_err("Failed to create workqueue\n"); 2279 return -ENOMEM; 2280 } 2281 2282 status = pci_register_driver(&ice_driver); 2283 if (status) { 2284 pr_err("failed to register pci driver, err %d\n", status); 2285 destroy_workqueue(ice_wq); 2286 } 2287 2288 return status; 2289 } 2290 module_init(ice_module_init); 2291 2292 /** 2293 * ice_module_exit - Driver exit cleanup routine 2294 * 2295 * ice_module_exit is called just before the driver is removed 2296 * from memory. 2297 */ 2298 static void __exit ice_module_exit(void) 2299 { 2300 pci_unregister_driver(&ice_driver); 2301 destroy_workqueue(ice_wq); 2302 pr_info("module unloaded\n"); 2303 } 2304 module_exit(ice_module_exit); 2305 2306 /** 2307 * ice_set_mac_address - NDO callback to set mac address 2308 * @netdev: network interface device structure 2309 * @pi: pointer to an address structure 2310 * 2311 * Returns 0 on success, negative on failure 2312 */ 2313 static int ice_set_mac_address(struct net_device *netdev, void *pi) 2314 { 2315 struct ice_netdev_priv *np = netdev_priv(netdev); 2316 struct ice_vsi *vsi = np->vsi; 2317 struct ice_pf *pf = vsi->back; 2318 struct ice_hw *hw = &pf->hw; 2319 struct sockaddr *addr = pi; 2320 enum ice_status status; 2321 LIST_HEAD(a_mac_list); 2322 LIST_HEAD(r_mac_list); 2323 u8 flags = 0; 2324 int err; 2325 u8 *mac; 2326 2327 mac = (u8 *)addr->sa_data; 2328 2329 if (!is_valid_ether_addr(mac)) 2330 return -EADDRNOTAVAIL; 2331 2332 if (ether_addr_equal(netdev->dev_addr, mac)) { 2333 netdev_warn(netdev, "already using mac %pM\n", mac); 2334 return 0; 2335 } 2336 2337 if (test_bit(__ICE_DOWN, pf->state) || 2338 ice_is_reset_in_progress(pf->state)) { 2339 netdev_err(netdev, "can't set mac %pM. device not ready\n", 2340 mac); 2341 return -EBUSY; 2342 } 2343 2344 /* When we change the mac address we also have to change the mac address 2345 * based filter rules that were created previously for the old mac 2346 * address. So first, we remove the old filter rule using ice_remove_mac 2347 * and then create a new filter rule using ice_add_mac. Note that for 2348 * both these operations, we first need to form a "list" of mac 2349 * addresses (even though in this case, we have only 1 mac address to be 2350 * added/removed) and this done using ice_add_mac_to_list. Depending on 2351 * the ensuing operation this "list" of mac addresses is either to be 2352 * added or removed from the filter. 2353 */ 2354 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); 2355 if (err) { 2356 err = -EADDRNOTAVAIL; 2357 goto free_lists; 2358 } 2359 2360 status = ice_remove_mac(hw, &r_mac_list); 2361 if (status) { 2362 err = -EADDRNOTAVAIL; 2363 goto free_lists; 2364 } 2365 2366 err = ice_add_mac_to_list(vsi, &a_mac_list, mac); 2367 if (err) { 2368 err = -EADDRNOTAVAIL; 2369 goto free_lists; 2370 } 2371 2372 status = ice_add_mac(hw, &a_mac_list); 2373 if (status) { 2374 err = -EADDRNOTAVAIL; 2375 goto free_lists; 2376 } 2377 2378 free_lists: 2379 /* free list entries */ 2380 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); 2381 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); 2382 2383 if (err) { 2384 netdev_err(netdev, "can't set mac %pM. filter update failed\n", 2385 mac); 2386 return err; 2387 } 2388 2389 /* change the netdev's mac address */ 2390 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2391 netdev_dbg(vsi->netdev, "updated mac address to %pM\n", 2392 netdev->dev_addr); 2393 2394 /* write new mac address to the firmware */ 2395 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 2396 status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 2397 if (status) { 2398 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", 2399 mac); 2400 } 2401 return 0; 2402 } 2403 2404 /** 2405 * ice_set_rx_mode - NDO callback to set the netdev filters 2406 * @netdev: network interface device structure 2407 */ 2408 static void ice_set_rx_mode(struct net_device *netdev) 2409 { 2410 struct ice_netdev_priv *np = netdev_priv(netdev); 2411 struct ice_vsi *vsi = np->vsi; 2412 2413 if (!vsi) 2414 return; 2415 2416 /* Set the flags to synchronize filters 2417 * ndo_set_rx_mode may be triggered even without a change in netdev 2418 * flags 2419 */ 2420 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 2421 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 2422 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 2423 2424 /* schedule our worker thread which will take care of 2425 * applying the new filter changes 2426 */ 2427 ice_service_task_schedule(vsi->back); 2428 } 2429 2430 /** 2431 * ice_fdb_add - add an entry to the hardware database 2432 * @ndm: the input from the stack 2433 * @tb: pointer to array of nladdr (unused) 2434 * @dev: the net device pointer 2435 * @addr: the MAC address entry being added 2436 * @vid: VLAN id 2437 * @flags: instructions from stack about fdb operation 2438 */ 2439 static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 2440 struct net_device *dev, const unsigned char *addr, 2441 u16 vid, u16 flags, 2442 struct netlink_ext_ack *extack) 2443 { 2444 int err; 2445 2446 if (vid) { 2447 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 2448 return -EINVAL; 2449 } 2450 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 2451 netdev_err(dev, "FDB only supports static addresses\n"); 2452 return -EINVAL; 2453 } 2454 2455 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 2456 err = dev_uc_add_excl(dev, addr); 2457 else if (is_multicast_ether_addr(addr)) 2458 err = dev_mc_add_excl(dev, addr); 2459 else 2460 err = -EINVAL; 2461 2462 /* Only return duplicate errors if NLM_F_EXCL is set */ 2463 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 2464 err = 0; 2465 2466 return err; 2467 } 2468 2469 /** 2470 * ice_fdb_del - delete an entry from the hardware database 2471 * @ndm: the input from the stack 2472 * @tb: pointer to array of nladdr (unused) 2473 * @dev: the net device pointer 2474 * @addr: the MAC address entry being added 2475 * @vid: VLAN id 2476 */ 2477 static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 2478 struct net_device *dev, const unsigned char *addr, 2479 __always_unused u16 vid) 2480 { 2481 int err; 2482 2483 if (ndm->ndm_state & NUD_PERMANENT) { 2484 netdev_err(dev, "FDB only supports static addresses\n"); 2485 return -EINVAL; 2486 } 2487 2488 if (is_unicast_ether_addr(addr)) 2489 err = dev_uc_del(dev, addr); 2490 else if (is_multicast_ether_addr(addr)) 2491 err = dev_mc_del(dev, addr); 2492 else 2493 err = -EINVAL; 2494 2495 return err; 2496 } 2497 2498 /** 2499 * ice_set_features - set the netdev feature flags 2500 * @netdev: ptr to the netdev being adjusted 2501 * @features: the feature set that the stack is suggesting 2502 */ 2503 static int ice_set_features(struct net_device *netdev, 2504 netdev_features_t features) 2505 { 2506 struct ice_netdev_priv *np = netdev_priv(netdev); 2507 struct ice_vsi *vsi = np->vsi; 2508 int ret = 0; 2509 2510 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 2511 ret = ice_vsi_manage_rss_lut(vsi, true); 2512 else if (!(features & NETIF_F_RXHASH) && 2513 netdev->features & NETIF_F_RXHASH) 2514 ret = ice_vsi_manage_rss_lut(vsi, false); 2515 2516 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 2517 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2518 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2519 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 2520 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2521 ret = ice_vsi_manage_vlan_stripping(vsi, false); 2522 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 2523 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2524 ret = ice_vsi_manage_vlan_insertion(vsi); 2525 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 2526 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2527 ret = ice_vsi_manage_vlan_insertion(vsi); 2528 2529 return ret; 2530 } 2531 2532 /** 2533 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI 2534 * @vsi: VSI to setup vlan properties for 2535 */ 2536 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 2537 { 2538 int ret = 0; 2539 2540 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2541 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2542 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 2543 ret = ice_vsi_manage_vlan_insertion(vsi); 2544 2545 return ret; 2546 } 2547 2548 /** 2549 * ice_vsi_cfg - Setup the VSI 2550 * @vsi: the VSI being configured 2551 * 2552 * Return 0 on success and negative value on error 2553 */ 2554 static int ice_vsi_cfg(struct ice_vsi *vsi) 2555 { 2556 int err; 2557 2558 if (vsi->netdev) { 2559 ice_set_rx_mode(vsi->netdev); 2560 2561 err = ice_vsi_vlan_setup(vsi); 2562 2563 if (err) 2564 return err; 2565 } 2566 2567 err = ice_vsi_cfg_lan_txqs(vsi); 2568 if (!err) 2569 err = ice_vsi_cfg_rxqs(vsi); 2570 2571 return err; 2572 } 2573 2574 /** 2575 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 2576 * @vsi: the VSI being configured 2577 */ 2578 static void ice_napi_enable_all(struct ice_vsi *vsi) 2579 { 2580 int q_idx; 2581 2582 if (!vsi->netdev) 2583 return; 2584 2585 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { 2586 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 2587 2588 if (q_vector->rx.ring || q_vector->tx.ring) 2589 napi_enable(&q_vector->napi); 2590 } 2591 } 2592 2593 /** 2594 * ice_up_complete - Finish the last steps of bringing up a connection 2595 * @vsi: The VSI being configured 2596 * 2597 * Return 0 on success and negative value on error 2598 */ 2599 static int ice_up_complete(struct ice_vsi *vsi) 2600 { 2601 struct ice_pf *pf = vsi->back; 2602 int err; 2603 2604 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 2605 ice_vsi_cfg_msix(vsi); 2606 else 2607 return -ENOTSUPP; 2608 2609 /* Enable only Rx rings, Tx rings were enabled by the FW when the 2610 * Tx queue group list was configured and the context bits were 2611 * programmed using ice_vsi_cfg_txqs 2612 */ 2613 err = ice_vsi_start_rx_rings(vsi); 2614 if (err) 2615 return err; 2616 2617 clear_bit(__ICE_DOWN, vsi->state); 2618 ice_napi_enable_all(vsi); 2619 ice_vsi_ena_irq(vsi); 2620 2621 if (vsi->port_info && 2622 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 2623 vsi->netdev) { 2624 ice_print_link_msg(vsi, true); 2625 netif_tx_start_all_queues(vsi->netdev); 2626 netif_carrier_on(vsi->netdev); 2627 } 2628 2629 ice_service_task_schedule(pf); 2630 2631 return err; 2632 } 2633 2634 /** 2635 * ice_up - Bring the connection back up after being down 2636 * @vsi: VSI being configured 2637 */ 2638 int ice_up(struct ice_vsi *vsi) 2639 { 2640 int err; 2641 2642 err = ice_vsi_cfg(vsi); 2643 if (!err) 2644 err = ice_up_complete(vsi); 2645 2646 return err; 2647 } 2648 2649 /** 2650 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 2651 * @ring: Tx or Rx ring to read stats from 2652 * @pkts: packets stats counter 2653 * @bytes: bytes stats counter 2654 * 2655 * This function fetches stats from the ring considering the atomic operations 2656 * that needs to be performed to read u64 values in 32 bit machine. 2657 */ 2658 static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, 2659 u64 *bytes) 2660 { 2661 unsigned int start; 2662 *pkts = 0; 2663 *bytes = 0; 2664 2665 if (!ring) 2666 return; 2667 do { 2668 start = u64_stats_fetch_begin_irq(&ring->syncp); 2669 *pkts = ring->stats.pkts; 2670 *bytes = ring->stats.bytes; 2671 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 2672 } 2673 2674 /** 2675 * ice_update_vsi_ring_stats - Update VSI stats counters 2676 * @vsi: the VSI to be updated 2677 */ 2678 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 2679 { 2680 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 2681 struct ice_ring *ring; 2682 u64 pkts, bytes; 2683 int i; 2684 2685 /* reset netdev stats */ 2686 vsi_stats->tx_packets = 0; 2687 vsi_stats->tx_bytes = 0; 2688 vsi_stats->rx_packets = 0; 2689 vsi_stats->rx_bytes = 0; 2690 2691 /* reset non-netdev (extended) stats */ 2692 vsi->tx_restart = 0; 2693 vsi->tx_busy = 0; 2694 vsi->tx_linearize = 0; 2695 vsi->rx_buf_failed = 0; 2696 vsi->rx_page_failed = 0; 2697 2698 rcu_read_lock(); 2699 2700 /* update Tx rings counters */ 2701 ice_for_each_txq(vsi, i) { 2702 ring = READ_ONCE(vsi->tx_rings[i]); 2703 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2704 vsi_stats->tx_packets += pkts; 2705 vsi_stats->tx_bytes += bytes; 2706 vsi->tx_restart += ring->tx_stats.restart_q; 2707 vsi->tx_busy += ring->tx_stats.tx_busy; 2708 vsi->tx_linearize += ring->tx_stats.tx_linearize; 2709 } 2710 2711 /* update Rx rings counters */ 2712 ice_for_each_rxq(vsi, i) { 2713 ring = READ_ONCE(vsi->rx_rings[i]); 2714 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2715 vsi_stats->rx_packets += pkts; 2716 vsi_stats->rx_bytes += bytes; 2717 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 2718 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 2719 } 2720 2721 rcu_read_unlock(); 2722 } 2723 2724 /** 2725 * ice_update_vsi_stats - Update VSI stats counters 2726 * @vsi: the VSI to be updated 2727 */ 2728 static void ice_update_vsi_stats(struct ice_vsi *vsi) 2729 { 2730 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 2731 struct ice_eth_stats *cur_es = &vsi->eth_stats; 2732 struct ice_pf *pf = vsi->back; 2733 2734 if (test_bit(__ICE_DOWN, vsi->state) || 2735 test_bit(__ICE_CFG_BUSY, pf->state)) 2736 return; 2737 2738 /* get stats as recorded by Tx/Rx rings */ 2739 ice_update_vsi_ring_stats(vsi); 2740 2741 /* get VSI stats as recorded by the hardware */ 2742 ice_update_eth_stats(vsi); 2743 2744 cur_ns->tx_errors = cur_es->tx_errors; 2745 cur_ns->rx_dropped = cur_es->rx_discards; 2746 cur_ns->tx_dropped = cur_es->tx_discards; 2747 cur_ns->multicast = cur_es->rx_multicast; 2748 2749 /* update some more netdev stats if this is main VSI */ 2750 if (vsi->type == ICE_VSI_PF) { 2751 cur_ns->rx_crc_errors = pf->stats.crc_errors; 2752 cur_ns->rx_errors = pf->stats.crc_errors + 2753 pf->stats.illegal_bytes; 2754 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 2755 } 2756 } 2757 2758 /** 2759 * ice_update_pf_stats - Update PF port stats counters 2760 * @pf: PF whose stats needs to be updated 2761 */ 2762 static void ice_update_pf_stats(struct ice_pf *pf) 2763 { 2764 struct ice_hw_port_stats *prev_ps, *cur_ps; 2765 struct ice_hw *hw = &pf->hw; 2766 u8 pf_id; 2767 2768 prev_ps = &pf->stats_prev; 2769 cur_ps = &pf->stats; 2770 pf_id = hw->pf_id; 2771 2772 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), 2773 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, 2774 &cur_ps->eth.rx_bytes); 2775 2776 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), 2777 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, 2778 &cur_ps->eth.rx_unicast); 2779 2780 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), 2781 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, 2782 &cur_ps->eth.rx_multicast); 2783 2784 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), 2785 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, 2786 &cur_ps->eth.rx_broadcast); 2787 2788 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), 2789 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, 2790 &cur_ps->eth.tx_bytes); 2791 2792 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), 2793 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, 2794 &cur_ps->eth.tx_unicast); 2795 2796 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), 2797 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, 2798 &cur_ps->eth.tx_multicast); 2799 2800 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), 2801 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, 2802 &cur_ps->eth.tx_broadcast); 2803 2804 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, 2805 &prev_ps->tx_dropped_link_down, 2806 &cur_ps->tx_dropped_link_down); 2807 2808 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), 2809 pf->stat_prev_loaded, &prev_ps->rx_size_64, 2810 &cur_ps->rx_size_64); 2811 2812 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), 2813 pf->stat_prev_loaded, &prev_ps->rx_size_127, 2814 &cur_ps->rx_size_127); 2815 2816 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), 2817 pf->stat_prev_loaded, &prev_ps->rx_size_255, 2818 &cur_ps->rx_size_255); 2819 2820 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), 2821 pf->stat_prev_loaded, &prev_ps->rx_size_511, 2822 &cur_ps->rx_size_511); 2823 2824 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), 2825 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, 2826 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 2827 2828 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), 2829 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, 2830 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 2831 2832 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), 2833 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, 2834 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 2835 2836 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), 2837 pf->stat_prev_loaded, &prev_ps->tx_size_64, 2838 &cur_ps->tx_size_64); 2839 2840 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), 2841 pf->stat_prev_loaded, &prev_ps->tx_size_127, 2842 &cur_ps->tx_size_127); 2843 2844 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), 2845 pf->stat_prev_loaded, &prev_ps->tx_size_255, 2846 &cur_ps->tx_size_255); 2847 2848 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), 2849 pf->stat_prev_loaded, &prev_ps->tx_size_511, 2850 &cur_ps->tx_size_511); 2851 2852 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), 2853 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, 2854 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 2855 2856 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), 2857 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, 2858 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 2859 2860 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), 2861 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, 2862 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 2863 2864 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, 2865 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 2866 2867 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, 2868 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 2869 2870 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, 2871 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 2872 2873 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, 2874 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 2875 2876 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, 2877 &prev_ps->crc_errors, &cur_ps->crc_errors); 2878 2879 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, 2880 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 2881 2882 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, 2883 &prev_ps->mac_local_faults, 2884 &cur_ps->mac_local_faults); 2885 2886 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, 2887 &prev_ps->mac_remote_faults, 2888 &cur_ps->mac_remote_faults); 2889 2890 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, 2891 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 2892 2893 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, 2894 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 2895 2896 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, 2897 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 2898 2899 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, 2900 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 2901 2902 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, 2903 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 2904 2905 pf->stat_prev_loaded = true; 2906 } 2907 2908 /** 2909 * ice_get_stats64 - get statistics for network device structure 2910 * @netdev: network interface device structure 2911 * @stats: main device statistics structure 2912 */ 2913 static 2914 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 2915 { 2916 struct ice_netdev_priv *np = netdev_priv(netdev); 2917 struct rtnl_link_stats64 *vsi_stats; 2918 struct ice_vsi *vsi = np->vsi; 2919 2920 vsi_stats = &vsi->net_stats; 2921 2922 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) 2923 return; 2924 /* netdev packet/byte stats come from ring counter. These are obtained 2925 * by summing up ring counters (done by ice_update_vsi_ring_stats). 2926 */ 2927 ice_update_vsi_ring_stats(vsi); 2928 stats->tx_packets = vsi_stats->tx_packets; 2929 stats->tx_bytes = vsi_stats->tx_bytes; 2930 stats->rx_packets = vsi_stats->rx_packets; 2931 stats->rx_bytes = vsi_stats->rx_bytes; 2932 2933 /* The rest of the stats can be read from the hardware but instead we 2934 * just return values that the watchdog task has already obtained from 2935 * the hardware. 2936 */ 2937 stats->multicast = vsi_stats->multicast; 2938 stats->tx_errors = vsi_stats->tx_errors; 2939 stats->tx_dropped = vsi_stats->tx_dropped; 2940 stats->rx_errors = vsi_stats->rx_errors; 2941 stats->rx_dropped = vsi_stats->rx_dropped; 2942 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 2943 stats->rx_length_errors = vsi_stats->rx_length_errors; 2944 } 2945 2946 /** 2947 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 2948 * @vsi: VSI having NAPI disabled 2949 */ 2950 static void ice_napi_disable_all(struct ice_vsi *vsi) 2951 { 2952 int q_idx; 2953 2954 if (!vsi->netdev) 2955 return; 2956 2957 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { 2958 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 2959 2960 if (q_vector->rx.ring || q_vector->tx.ring) 2961 napi_disable(&q_vector->napi); 2962 } 2963 } 2964 2965 /** 2966 * ice_force_phys_link_state - Force the physical link state 2967 * @vsi: VSI to force the physical link state to up/down 2968 * @link_up: true/false indicates to set the physical link to up/down 2969 * 2970 * Force the physical link state by getting the current PHY capabilities from 2971 * hardware and setting the PHY config based on the determined capabilities. If 2972 * link changes a link event will be triggered because both the Enable Automatic 2973 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 2974 * 2975 * Returns 0 on success, negative on failure 2976 */ 2977 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 2978 { 2979 struct ice_aqc_get_phy_caps_data *pcaps; 2980 struct ice_aqc_set_phy_cfg_data *cfg; 2981 struct ice_port_info *pi; 2982 struct device *dev; 2983 int retcode; 2984 2985 if (!vsi || !vsi->port_info || !vsi->back) 2986 return -EINVAL; 2987 if (vsi->type != ICE_VSI_PF) 2988 return 0; 2989 2990 dev = &vsi->back->pdev->dev; 2991 2992 pi = vsi->port_info; 2993 2994 pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); 2995 if (!pcaps) 2996 return -ENOMEM; 2997 2998 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 2999 NULL); 3000 if (retcode) { 3001 dev_err(dev, 3002 "Failed to get phy capabilities, VSI %d error %d\n", 3003 vsi->vsi_num, retcode); 3004 retcode = -EIO; 3005 goto out; 3006 } 3007 3008 /* No change in link */ 3009 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 3010 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 3011 goto out; 3012 3013 cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); 3014 if (!cfg) { 3015 retcode = -ENOMEM; 3016 goto out; 3017 } 3018 3019 cfg->phy_type_low = pcaps->phy_type_low; 3020 cfg->phy_type_high = pcaps->phy_type_high; 3021 cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3022 cfg->low_power_ctrl = pcaps->low_power_ctrl; 3023 cfg->eee_cap = pcaps->eee_cap; 3024 cfg->eeer_value = pcaps->eeer_value; 3025 cfg->link_fec_opt = pcaps->link_fec_options; 3026 if (link_up) 3027 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 3028 else 3029 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 3030 3031 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); 3032 if (retcode) { 3033 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 3034 vsi->vsi_num, retcode); 3035 retcode = -EIO; 3036 } 3037 3038 devm_kfree(dev, cfg); 3039 out: 3040 devm_kfree(dev, pcaps); 3041 return retcode; 3042 } 3043 3044 /** 3045 * ice_down - Shutdown the connection 3046 * @vsi: The VSI being stopped 3047 */ 3048 int ice_down(struct ice_vsi *vsi) 3049 { 3050 int i, tx_err, rx_err, link_err = 0; 3051 3052 /* Caller of this function is expected to set the 3053 * vsi->state __ICE_DOWN bit 3054 */ 3055 if (vsi->netdev) { 3056 netif_carrier_off(vsi->netdev); 3057 netif_tx_disable(vsi->netdev); 3058 } 3059 3060 ice_vsi_dis_irq(vsi); 3061 3062 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 3063 if (tx_err) 3064 netdev_err(vsi->netdev, 3065 "Failed stop Tx rings, VSI %d error %d\n", 3066 vsi->vsi_num, tx_err); 3067 3068 rx_err = ice_vsi_stop_rx_rings(vsi); 3069 if (rx_err) 3070 netdev_err(vsi->netdev, 3071 "Failed stop Rx rings, VSI %d error %d\n", 3072 vsi->vsi_num, rx_err); 3073 3074 ice_napi_disable_all(vsi); 3075 3076 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 3077 link_err = ice_force_phys_link_state(vsi, false); 3078 if (link_err) 3079 netdev_err(vsi->netdev, 3080 "Failed to set physical link down, VSI %d error %d\n", 3081 vsi->vsi_num, link_err); 3082 } 3083 3084 ice_for_each_txq(vsi, i) 3085 ice_clean_tx_ring(vsi->tx_rings[i]); 3086 3087 ice_for_each_rxq(vsi, i) 3088 ice_clean_rx_ring(vsi->rx_rings[i]); 3089 3090 if (tx_err || rx_err || link_err) { 3091 netdev_err(vsi->netdev, 3092 "Failed to close VSI 0x%04X on switch 0x%04X\n", 3093 vsi->vsi_num, vsi->vsw->sw_id); 3094 return -EIO; 3095 } 3096 3097 return 0; 3098 } 3099 3100 /** 3101 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 3102 * @vsi: VSI having resources allocated 3103 * 3104 * Return 0 on success, negative on failure 3105 */ 3106 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 3107 { 3108 int i, err = 0; 3109 3110 if (!vsi->num_txq) { 3111 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 3112 vsi->vsi_num); 3113 return -EINVAL; 3114 } 3115 3116 ice_for_each_txq(vsi, i) { 3117 vsi->tx_rings[i]->netdev = vsi->netdev; 3118 err = ice_setup_tx_ring(vsi->tx_rings[i]); 3119 if (err) 3120 break; 3121 } 3122 3123 return err; 3124 } 3125 3126 /** 3127 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 3128 * @vsi: VSI having resources allocated 3129 * 3130 * Return 0 on success, negative on failure 3131 */ 3132 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 3133 { 3134 int i, err = 0; 3135 3136 if (!vsi->num_rxq) { 3137 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 3138 vsi->vsi_num); 3139 return -EINVAL; 3140 } 3141 3142 ice_for_each_rxq(vsi, i) { 3143 vsi->rx_rings[i]->netdev = vsi->netdev; 3144 err = ice_setup_rx_ring(vsi->rx_rings[i]); 3145 if (err) 3146 break; 3147 } 3148 3149 return err; 3150 } 3151 3152 /** 3153 * ice_vsi_req_irq - Request IRQ from the OS 3154 * @vsi: The VSI IRQ is being requested for 3155 * @basename: name for the vector 3156 * 3157 * Return 0 on success and a negative value on error 3158 */ 3159 static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) 3160 { 3161 struct ice_pf *pf = vsi->back; 3162 int err = -EINVAL; 3163 3164 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3165 err = ice_vsi_req_irq_msix(vsi, basename); 3166 3167 return err; 3168 } 3169 3170 /** 3171 * ice_vsi_open - Called when a network interface is made active 3172 * @vsi: the VSI to open 3173 * 3174 * Initialization of the VSI 3175 * 3176 * Returns 0 on success, negative value on error 3177 */ 3178 static int ice_vsi_open(struct ice_vsi *vsi) 3179 { 3180 char int_name[ICE_INT_NAME_STR_LEN]; 3181 struct ice_pf *pf = vsi->back; 3182 int err; 3183 3184 /* allocate descriptors */ 3185 err = ice_vsi_setup_tx_rings(vsi); 3186 if (err) 3187 goto err_setup_tx; 3188 3189 err = ice_vsi_setup_rx_rings(vsi); 3190 if (err) 3191 goto err_setup_rx; 3192 3193 err = ice_vsi_cfg(vsi); 3194 if (err) 3195 goto err_setup_rx; 3196 3197 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 3198 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 3199 err = ice_vsi_req_irq(vsi, int_name); 3200 if (err) 3201 goto err_setup_rx; 3202 3203 /* Notify the stack of the actual queue counts. */ 3204 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 3205 if (err) 3206 goto err_set_qs; 3207 3208 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 3209 if (err) 3210 goto err_set_qs; 3211 3212 err = ice_up_complete(vsi); 3213 if (err) 3214 goto err_up_complete; 3215 3216 return 0; 3217 3218 err_up_complete: 3219 ice_down(vsi); 3220 err_set_qs: 3221 ice_vsi_free_irq(vsi); 3222 err_setup_rx: 3223 ice_vsi_free_rx_rings(vsi); 3224 err_setup_tx: 3225 ice_vsi_free_tx_rings(vsi); 3226 3227 return err; 3228 } 3229 3230 /** 3231 * ice_vsi_release_all - Delete all VSIs 3232 * @pf: PF from which all VSIs are being removed 3233 */ 3234 static void ice_vsi_release_all(struct ice_pf *pf) 3235 { 3236 int err, i; 3237 3238 if (!pf->vsi) 3239 return; 3240 3241 for (i = 0; i < pf->num_alloc_vsi; i++) { 3242 if (!pf->vsi[i]) 3243 continue; 3244 3245 err = ice_vsi_release(pf->vsi[i]); 3246 if (err) 3247 dev_dbg(&pf->pdev->dev, 3248 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 3249 i, err, pf->vsi[i]->vsi_num); 3250 } 3251 } 3252 3253 /** 3254 * ice_dis_vsi - pause a VSI 3255 * @vsi: the VSI being paused 3256 * @locked: is the rtnl_lock already held 3257 */ 3258 static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 3259 { 3260 if (test_bit(__ICE_DOWN, vsi->state)) 3261 return; 3262 3263 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3264 3265 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 3266 if (netif_running(vsi->netdev)) { 3267 if (!locked) { 3268 rtnl_lock(); 3269 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3270 rtnl_unlock(); 3271 } else { 3272 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3273 } 3274 } else { 3275 ice_vsi_close(vsi); 3276 } 3277 } 3278 } 3279 3280 /** 3281 * ice_ena_vsi - resume a VSI 3282 * @vsi: the VSI being resume 3283 */ 3284 static int ice_ena_vsi(struct ice_vsi *vsi) 3285 { 3286 int err = 0; 3287 3288 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && 3289 vsi->netdev) { 3290 if (netif_running(vsi->netdev)) { 3291 rtnl_lock(); 3292 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3293 rtnl_unlock(); 3294 } else { 3295 err = ice_vsi_open(vsi); 3296 } 3297 } 3298 3299 return err; 3300 } 3301 3302 /** 3303 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 3304 * @pf: the PF 3305 */ 3306 static void ice_pf_dis_all_vsi(struct ice_pf *pf) 3307 { 3308 int v; 3309 3310 ice_for_each_vsi(pf, v) 3311 if (pf->vsi[v]) 3312 ice_dis_vsi(pf->vsi[v], false); 3313 } 3314 3315 /** 3316 * ice_pf_ena_all_vsi - Resume all VSIs on a PF 3317 * @pf: the PF 3318 */ 3319 static int ice_pf_ena_all_vsi(struct ice_pf *pf) 3320 { 3321 int v; 3322 3323 ice_for_each_vsi(pf, v) 3324 if (pf->vsi[v]) 3325 if (ice_ena_vsi(pf->vsi[v])) 3326 return -EIO; 3327 3328 return 0; 3329 } 3330 3331 /** 3332 * ice_vsi_rebuild_all - rebuild all VSIs in pf 3333 * @pf: the PF 3334 */ 3335 static int ice_vsi_rebuild_all(struct ice_pf *pf) 3336 { 3337 int i; 3338 3339 /* loop through pf->vsi array and reinit the VSI if found */ 3340 for (i = 0; i < pf->num_alloc_vsi; i++) { 3341 int err; 3342 3343 if (!pf->vsi[i]) 3344 continue; 3345 3346 /* VF VSI rebuild isn't supported yet */ 3347 if (pf->vsi[i]->type == ICE_VSI_VF) 3348 continue; 3349 3350 err = ice_vsi_rebuild(pf->vsi[i]); 3351 if (err) { 3352 dev_err(&pf->pdev->dev, 3353 "VSI at index %d rebuild failed\n", 3354 pf->vsi[i]->idx); 3355 return err; 3356 } 3357 3358 dev_info(&pf->pdev->dev, 3359 "VSI at index %d rebuilt. vsi_num = 0x%x\n", 3360 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3361 } 3362 3363 return 0; 3364 } 3365 3366 /** 3367 * ice_vsi_replay_all - replay all VSIs configuration in the PF 3368 * @pf: the PF 3369 */ 3370 static int ice_vsi_replay_all(struct ice_pf *pf) 3371 { 3372 struct ice_hw *hw = &pf->hw; 3373 enum ice_status ret; 3374 int i; 3375 3376 /* loop through pf->vsi array and replay the VSI if found */ 3377 for (i = 0; i < pf->num_alloc_vsi; i++) { 3378 if (!pf->vsi[i]) 3379 continue; 3380 3381 ret = ice_replay_vsi(hw, pf->vsi[i]->idx); 3382 if (ret) { 3383 dev_err(&pf->pdev->dev, 3384 "VSI at index %d replay failed %d\n", 3385 pf->vsi[i]->idx, ret); 3386 return -EIO; 3387 } 3388 3389 /* Re-map HW VSI number, using VSI handle that has been 3390 * previously validated in ice_replay_vsi() call above 3391 */ 3392 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); 3393 3394 dev_info(&pf->pdev->dev, 3395 "VSI at index %d filter replayed successfully - vsi_num %i\n", 3396 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3397 } 3398 3399 /* Clean up replay filter after successful re-configuration */ 3400 ice_replay_post(hw); 3401 return 0; 3402 } 3403 3404 /** 3405 * ice_rebuild - rebuild after reset 3406 * @pf: pf to rebuild 3407 */ 3408 static void ice_rebuild(struct ice_pf *pf) 3409 { 3410 struct device *dev = &pf->pdev->dev; 3411 struct ice_hw *hw = &pf->hw; 3412 enum ice_status ret; 3413 int err, i; 3414 3415 if (test_bit(__ICE_DOWN, pf->state)) 3416 goto clear_recovery; 3417 3418 dev_dbg(dev, "rebuilding pf\n"); 3419 3420 ret = ice_init_all_ctrlq(hw); 3421 if (ret) { 3422 dev_err(dev, "control queues init failed %d\n", ret); 3423 goto err_init_ctrlq; 3424 } 3425 3426 ret = ice_clear_pf_cfg(hw); 3427 if (ret) { 3428 dev_err(dev, "clear PF configuration failed %d\n", ret); 3429 goto err_init_ctrlq; 3430 } 3431 3432 ice_clear_pxe_mode(hw); 3433 3434 ret = ice_get_caps(hw); 3435 if (ret) { 3436 dev_err(dev, "ice_get_caps failed %d\n", ret); 3437 goto err_init_ctrlq; 3438 } 3439 3440 err = ice_sched_init_port(hw->port_info); 3441 if (err) 3442 goto err_sched_init_port; 3443 3444 /* reset search_hint of irq_trackers to 0 since interrupts are 3445 * reclaimed and could be allocated from beginning during VSI rebuild 3446 */ 3447 pf->sw_irq_tracker->search_hint = 0; 3448 pf->hw_irq_tracker->search_hint = 0; 3449 3450 err = ice_vsi_rebuild_all(pf); 3451 if (err) { 3452 dev_err(dev, "ice_vsi_rebuild_all failed\n"); 3453 goto err_vsi_rebuild; 3454 } 3455 3456 err = ice_update_link_info(hw->port_info); 3457 if (err) 3458 dev_err(&pf->pdev->dev, "Get link status error %d\n", err); 3459 3460 /* Replay all VSIs Configuration, including filters after reset */ 3461 if (ice_vsi_replay_all(pf)) { 3462 dev_err(&pf->pdev->dev, 3463 "error replaying VSI configurations with switch filter rules\n"); 3464 goto err_vsi_rebuild; 3465 } 3466 3467 /* start misc vector */ 3468 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 3469 err = ice_req_irq_msix_misc(pf); 3470 if (err) { 3471 dev_err(dev, "misc vector setup failed: %d\n", err); 3472 goto err_vsi_rebuild; 3473 } 3474 } 3475 3476 /* restart the VSIs that were rebuilt and running before the reset */ 3477 err = ice_pf_ena_all_vsi(pf); 3478 if (err) { 3479 dev_err(&pf->pdev->dev, "error enabling VSIs\n"); 3480 /* no need to disable VSIs in tear down path in ice_rebuild() 3481 * since its already taken care in ice_vsi_open() 3482 */ 3483 goto err_vsi_rebuild; 3484 } 3485 3486 ice_reset_all_vfs(pf, true); 3487 3488 for (i = 0; i < pf->num_alloc_vsi; i++) { 3489 bool link_up; 3490 3491 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) 3492 continue; 3493 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 3494 if (link_up) { 3495 netif_carrier_on(pf->vsi[i]->netdev); 3496 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 3497 } else { 3498 netif_carrier_off(pf->vsi[i]->netdev); 3499 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 3500 } 3501 } 3502 3503 /* if we get here, reset flow is successful */ 3504 clear_bit(__ICE_RESET_FAILED, pf->state); 3505 return; 3506 3507 err_vsi_rebuild: 3508 ice_vsi_release_all(pf); 3509 err_sched_init_port: 3510 ice_sched_cleanup_all(hw); 3511 err_init_ctrlq: 3512 ice_shutdown_all_ctrlq(hw); 3513 set_bit(__ICE_RESET_FAILED, pf->state); 3514 clear_recovery: 3515 /* set this bit in PF state to control service task scheduling */ 3516 set_bit(__ICE_NEEDS_RESTART, pf->state); 3517 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 3518 } 3519 3520 /** 3521 * ice_change_mtu - NDO callback to change the MTU 3522 * @netdev: network interface device structure 3523 * @new_mtu: new value for maximum frame size 3524 * 3525 * Returns 0 on success, negative on failure 3526 */ 3527 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 3528 { 3529 struct ice_netdev_priv *np = netdev_priv(netdev); 3530 struct ice_vsi *vsi = np->vsi; 3531 struct ice_pf *pf = vsi->back; 3532 u8 count = 0; 3533 3534 if (new_mtu == netdev->mtu) { 3535 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); 3536 return 0; 3537 } 3538 3539 if (new_mtu < netdev->min_mtu) { 3540 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", 3541 netdev->min_mtu); 3542 return -EINVAL; 3543 } else if (new_mtu > netdev->max_mtu) { 3544 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", 3545 netdev->min_mtu); 3546 return -EINVAL; 3547 } 3548 /* if a reset is in progress, wait for some time for it to complete */ 3549 do { 3550 if (ice_is_reset_in_progress(pf->state)) { 3551 count++; 3552 usleep_range(1000, 2000); 3553 } else { 3554 break; 3555 } 3556 3557 } while (count < 100); 3558 3559 if (count == 100) { 3560 netdev_err(netdev, "can't change mtu. Device is busy\n"); 3561 return -EBUSY; 3562 } 3563 3564 netdev->mtu = new_mtu; 3565 3566 /* if VSI is up, bring it down and then back up */ 3567 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 3568 int err; 3569 3570 err = ice_down(vsi); 3571 if (err) { 3572 netdev_err(netdev, "change mtu if_up err %d\n", err); 3573 return err; 3574 } 3575 3576 err = ice_up(vsi); 3577 if (err) { 3578 netdev_err(netdev, "change mtu if_up err %d\n", err); 3579 return err; 3580 } 3581 } 3582 3583 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); 3584 return 0; 3585 } 3586 3587 /** 3588 * ice_set_rss - Set RSS keys and lut 3589 * @vsi: Pointer to VSI structure 3590 * @seed: RSS hash seed 3591 * @lut: Lookup table 3592 * @lut_size: Lookup table size 3593 * 3594 * Returns 0 on success, negative on failure 3595 */ 3596 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3597 { 3598 struct ice_pf *pf = vsi->back; 3599 struct ice_hw *hw = &pf->hw; 3600 enum ice_status status; 3601 3602 if (seed) { 3603 struct ice_aqc_get_set_rss_keys *buf = 3604 (struct ice_aqc_get_set_rss_keys *)seed; 3605 3606 status = ice_aq_set_rss_key(hw, vsi->idx, buf); 3607 3608 if (status) { 3609 dev_err(&pf->pdev->dev, 3610 "Cannot set RSS key, err %d aq_err %d\n", 3611 status, hw->adminq.rq_last_status); 3612 return -EIO; 3613 } 3614 } 3615 3616 if (lut) { 3617 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3618 lut, lut_size); 3619 if (status) { 3620 dev_err(&pf->pdev->dev, 3621 "Cannot set RSS lut, err %d aq_err %d\n", 3622 status, hw->adminq.rq_last_status); 3623 return -EIO; 3624 } 3625 } 3626 3627 return 0; 3628 } 3629 3630 /** 3631 * ice_get_rss - Get RSS keys and lut 3632 * @vsi: Pointer to VSI structure 3633 * @seed: Buffer to store the keys 3634 * @lut: Buffer to store the lookup table entries 3635 * @lut_size: Size of buffer to store the lookup table entries 3636 * 3637 * Returns 0 on success, negative on failure 3638 */ 3639 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3640 { 3641 struct ice_pf *pf = vsi->back; 3642 struct ice_hw *hw = &pf->hw; 3643 enum ice_status status; 3644 3645 if (seed) { 3646 struct ice_aqc_get_set_rss_keys *buf = 3647 (struct ice_aqc_get_set_rss_keys *)seed; 3648 3649 status = ice_aq_get_rss_key(hw, vsi->idx, buf); 3650 if (status) { 3651 dev_err(&pf->pdev->dev, 3652 "Cannot get RSS key, err %d aq_err %d\n", 3653 status, hw->adminq.rq_last_status); 3654 return -EIO; 3655 } 3656 } 3657 3658 if (lut) { 3659 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3660 lut, lut_size); 3661 if (status) { 3662 dev_err(&pf->pdev->dev, 3663 "Cannot get RSS lut, err %d aq_err %d\n", 3664 status, hw->adminq.rq_last_status); 3665 return -EIO; 3666 } 3667 } 3668 3669 return 0; 3670 } 3671 3672 /** 3673 * ice_bridge_getlink - Get the hardware bridge mode 3674 * @skb: skb buff 3675 * @pid: process id 3676 * @seq: RTNL message seq 3677 * @dev: the netdev being configured 3678 * @filter_mask: filter mask passed in 3679 * @nlflags: netlink flags passed in 3680 * 3681 * Return the bridge mode (VEB/VEPA) 3682 */ 3683 static int 3684 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 3685 struct net_device *dev, u32 filter_mask, int nlflags) 3686 { 3687 struct ice_netdev_priv *np = netdev_priv(dev); 3688 struct ice_vsi *vsi = np->vsi; 3689 struct ice_pf *pf = vsi->back; 3690 u16 bmode; 3691 3692 bmode = pf->first_sw->bridge_mode; 3693 3694 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 3695 filter_mask, NULL); 3696 } 3697 3698 /** 3699 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 3700 * @vsi: Pointer to VSI structure 3701 * @bmode: Hardware bridge mode (VEB/VEPA) 3702 * 3703 * Returns 0 on success, negative on failure 3704 */ 3705 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 3706 { 3707 struct device *dev = &vsi->back->pdev->dev; 3708 struct ice_aqc_vsi_props *vsi_props; 3709 struct ice_hw *hw = &vsi->back->hw; 3710 struct ice_vsi_ctx ctxt = { 0 }; 3711 enum ice_status status; 3712 3713 vsi_props = &vsi->info; 3714 ctxt.info = vsi->info; 3715 3716 if (bmode == BRIDGE_MODE_VEB) 3717 /* change from VEPA to VEB mode */ 3718 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3719 else 3720 /* change from VEB to VEPA mode */ 3721 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3722 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 3723 3724 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 3725 if (status) { 3726 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", 3727 bmode, status, hw->adminq.sq_last_status); 3728 return -EIO; 3729 } 3730 /* Update sw flags for book keeping */ 3731 vsi_props->sw_flags = ctxt.info.sw_flags; 3732 3733 return 0; 3734 } 3735 3736 /** 3737 * ice_bridge_setlink - Set the hardware bridge mode 3738 * @dev: the netdev being configured 3739 * @nlh: RTNL message 3740 * @flags: bridge setlink flags 3741 * @extack: netlink extended ack 3742 * 3743 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 3744 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 3745 * not already set for all VSIs connected to this switch. And also update the 3746 * unicast switch filter rules for the corresponding switch of the netdev. 3747 */ 3748 static int 3749 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 3750 u16 __always_unused flags, 3751 struct netlink_ext_ack __always_unused *extack) 3752 { 3753 struct ice_netdev_priv *np = netdev_priv(dev); 3754 struct ice_pf *pf = np->vsi->back; 3755 struct nlattr *attr, *br_spec; 3756 struct ice_hw *hw = &pf->hw; 3757 enum ice_status status; 3758 struct ice_sw *pf_sw; 3759 int rem, v, err = 0; 3760 3761 pf_sw = pf->first_sw; 3762 /* find the attribute in the netlink message */ 3763 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 3764 3765 nla_for_each_nested(attr, br_spec, rem) { 3766 __u16 mode; 3767 3768 if (nla_type(attr) != IFLA_BRIDGE_MODE) 3769 continue; 3770 mode = nla_get_u16(attr); 3771 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 3772 return -EINVAL; 3773 /* Continue if bridge mode is not being flipped */ 3774 if (mode == pf_sw->bridge_mode) 3775 continue; 3776 /* Iterates through the PF VSI list and update the loopback 3777 * mode of the VSI 3778 */ 3779 ice_for_each_vsi(pf, v) { 3780 if (!pf->vsi[v]) 3781 continue; 3782 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 3783 if (err) 3784 return err; 3785 } 3786 3787 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 3788 /* Update the unicast switch filter rules for the corresponding 3789 * switch of the netdev 3790 */ 3791 status = ice_update_sw_rule_bridge_mode(hw); 3792 if (status) { 3793 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", 3794 mode, status, hw->adminq.sq_last_status); 3795 /* revert hw->evb_veb */ 3796 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 3797 return -EIO; 3798 } 3799 3800 pf_sw->bridge_mode = mode; 3801 } 3802 3803 return 0; 3804 } 3805 3806 /** 3807 * ice_tx_timeout - Respond to a Tx Hang 3808 * @netdev: network interface device structure 3809 */ 3810 static void ice_tx_timeout(struct net_device *netdev) 3811 { 3812 struct ice_netdev_priv *np = netdev_priv(netdev); 3813 struct ice_ring *tx_ring = NULL; 3814 struct ice_vsi *vsi = np->vsi; 3815 struct ice_pf *pf = vsi->back; 3816 int hung_queue = -1; 3817 u32 i; 3818 3819 pf->tx_timeout_count++; 3820 3821 /* find the stopped queue the same way dev_watchdog() does */ 3822 for (i = 0; i < netdev->num_tx_queues; i++) { 3823 unsigned long trans_start; 3824 struct netdev_queue *q; 3825 3826 q = netdev_get_tx_queue(netdev, i); 3827 trans_start = q->trans_start; 3828 if (netif_xmit_stopped(q) && 3829 time_after(jiffies, 3830 trans_start + netdev->watchdog_timeo)) { 3831 hung_queue = i; 3832 break; 3833 } 3834 } 3835 3836 if (i == netdev->num_tx_queues) 3837 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 3838 else 3839 /* now that we have an index, find the tx_ring struct */ 3840 for (i = 0; i < vsi->num_txq; i++) 3841 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 3842 if (hung_queue == vsi->tx_rings[i]->q_index) { 3843 tx_ring = vsi->tx_rings[i]; 3844 break; 3845 } 3846 3847 /* Reset recovery level if enough time has elapsed after last timeout. 3848 * Also ensure no new reset action happens before next timeout period. 3849 */ 3850 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 3851 pf->tx_timeout_recovery_level = 1; 3852 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 3853 netdev->watchdog_timeo))) 3854 return; 3855 3856 if (tx_ring) { 3857 struct ice_hw *hw = &pf->hw; 3858 u32 head, val = 0; 3859 3860 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & 3861 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 3862 /* Read interrupt register */ 3863 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3864 val = rd32(hw, 3865 GLINT_DYN_CTL(tx_ring->q_vector->v_idx + 3866 tx_ring->vsi->hw_base_vector)); 3867 3868 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 3869 vsi->vsi_num, hung_queue, tx_ring->next_to_clean, 3870 head, tx_ring->next_to_use, val); 3871 } 3872 3873 pf->tx_timeout_last_recovery = jiffies; 3874 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 3875 pf->tx_timeout_recovery_level, hung_queue); 3876 3877 switch (pf->tx_timeout_recovery_level) { 3878 case 1: 3879 set_bit(__ICE_PFR_REQ, pf->state); 3880 break; 3881 case 2: 3882 set_bit(__ICE_CORER_REQ, pf->state); 3883 break; 3884 case 3: 3885 set_bit(__ICE_GLOBR_REQ, pf->state); 3886 break; 3887 default: 3888 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 3889 set_bit(__ICE_DOWN, pf->state); 3890 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3891 set_bit(__ICE_SERVICE_DIS, pf->state); 3892 break; 3893 } 3894 3895 ice_service_task_schedule(pf); 3896 pf->tx_timeout_recovery_level++; 3897 } 3898 3899 /** 3900 * ice_open - Called when a network interface becomes active 3901 * @netdev: network interface device structure 3902 * 3903 * The open entry point is called when a network interface is made 3904 * active by the system (IFF_UP). At this point all resources needed 3905 * for transmit and receive operations are allocated, the interrupt 3906 * handler is registered with the OS, the netdev watchdog is enabled, 3907 * and the stack is notified that the interface is ready. 3908 * 3909 * Returns 0 on success, negative value on failure 3910 */ 3911 static int ice_open(struct net_device *netdev) 3912 { 3913 struct ice_netdev_priv *np = netdev_priv(netdev); 3914 struct ice_vsi *vsi = np->vsi; 3915 int err; 3916 3917 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { 3918 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 3919 return -EIO; 3920 } 3921 3922 netif_carrier_off(netdev); 3923 3924 err = ice_force_phys_link_state(vsi, true); 3925 if (err) { 3926 netdev_err(netdev, 3927 "Failed to set physical link up, error %d\n", err); 3928 return err; 3929 } 3930 3931 err = ice_vsi_open(vsi); 3932 if (err) 3933 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 3934 vsi->vsi_num, vsi->vsw->sw_id); 3935 return err; 3936 } 3937 3938 /** 3939 * ice_stop - Disables a network interface 3940 * @netdev: network interface device structure 3941 * 3942 * The stop entry point is called when an interface is de-activated by the OS, 3943 * and the netdevice enters the DOWN state. The hardware is still under the 3944 * driver's control, but the netdev interface is disabled. 3945 * 3946 * Returns success only - not allowed to fail 3947 */ 3948 static int ice_stop(struct net_device *netdev) 3949 { 3950 struct ice_netdev_priv *np = netdev_priv(netdev); 3951 struct ice_vsi *vsi = np->vsi; 3952 3953 ice_vsi_close(vsi); 3954 3955 return 0; 3956 } 3957 3958 /** 3959 * ice_features_check - Validate encapsulated packet conforms to limits 3960 * @skb: skb buffer 3961 * @netdev: This port's netdev 3962 * @features: Offload features that the stack believes apply 3963 */ 3964 static netdev_features_t 3965 ice_features_check(struct sk_buff *skb, 3966 struct net_device __always_unused *netdev, 3967 netdev_features_t features) 3968 { 3969 size_t len; 3970 3971 /* No point in doing any of this if neither checksum nor GSO are 3972 * being requested for this frame. We can rule out both by just 3973 * checking for CHECKSUM_PARTIAL 3974 */ 3975 if (skb->ip_summed != CHECKSUM_PARTIAL) 3976 return features; 3977 3978 /* We cannot support GSO if the MSS is going to be less than 3979 * 64 bytes. If it is then we need to drop support for GSO. 3980 */ 3981 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3982 features &= ~NETIF_F_GSO_MASK; 3983 3984 len = skb_network_header(skb) - skb->data; 3985 if (len & ~(ICE_TXD_MACLEN_MAX)) 3986 goto out_rm_features; 3987 3988 len = skb_transport_header(skb) - skb_network_header(skb); 3989 if (len & ~(ICE_TXD_IPLEN_MAX)) 3990 goto out_rm_features; 3991 3992 if (skb->encapsulation) { 3993 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3994 if (len & ~(ICE_TXD_L4LEN_MAX)) 3995 goto out_rm_features; 3996 3997 len = skb_inner_transport_header(skb) - 3998 skb_inner_network_header(skb); 3999 if (len & ~(ICE_TXD_IPLEN_MAX)) 4000 goto out_rm_features; 4001 } 4002 4003 return features; 4004 out_rm_features: 4005 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 4006 } 4007 4008 static const struct net_device_ops ice_netdev_ops = { 4009 .ndo_open = ice_open, 4010 .ndo_stop = ice_stop, 4011 .ndo_start_xmit = ice_start_xmit, 4012 .ndo_features_check = ice_features_check, 4013 .ndo_set_rx_mode = ice_set_rx_mode, 4014 .ndo_set_mac_address = ice_set_mac_address, 4015 .ndo_validate_addr = eth_validate_addr, 4016 .ndo_change_mtu = ice_change_mtu, 4017 .ndo_get_stats64 = ice_get_stats64, 4018 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 4019 .ndo_set_vf_mac = ice_set_vf_mac, 4020 .ndo_get_vf_config = ice_get_vf_cfg, 4021 .ndo_set_vf_trust = ice_set_vf_trust, 4022 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 4023 .ndo_set_vf_link_state = ice_set_vf_link_state, 4024 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 4025 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 4026 .ndo_set_features = ice_set_features, 4027 .ndo_bridge_getlink = ice_bridge_getlink, 4028 .ndo_bridge_setlink = ice_bridge_setlink, 4029 .ndo_fdb_add = ice_fdb_add, 4030 .ndo_fdb_del = ice_fdb_del, 4031 .ndo_tx_timeout = ice_tx_timeout, 4032 }; 4033