1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include "ice.h" 10 #include "ice_base.h" 11 #include "ice_lib.h" 12 #include "ice_fltr.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_dcb_nl.h" 15 #include "ice_devlink.h" 16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 17 * ice tracepoint functions. This must be done exactly once across the 18 * ice driver. 19 */ 20 #define CREATE_TRACE_POINTS 21 #include "ice_trace.h" 22 #include "ice_eswitch.h" 23 #include "ice_tc_lib.h" 24 25 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 26 static const char ice_driver_string[] = DRV_SUMMARY; 27 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 28 29 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 30 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 31 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 32 33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 34 MODULE_DESCRIPTION(DRV_SUMMARY); 35 MODULE_LICENSE("GPL v2"); 36 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 37 38 static int debug = -1; 39 module_param(debug, int, 0644); 40 #ifndef CONFIG_DYNAMIC_DEBUG 41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 42 #else 43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 44 #endif /* !CONFIG_DYNAMIC_DEBUG */ 45 46 static DEFINE_IDA(ice_aux_ida); 47 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 48 EXPORT_SYMBOL(ice_xdp_locking_key); 49 50 static struct workqueue_struct *ice_wq; 51 static const struct net_device_ops ice_netdev_safe_mode_ops; 52 static const struct net_device_ops ice_netdev_ops; 53 54 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 55 56 static void ice_vsi_release_all(struct ice_pf *pf); 57 58 static int ice_rebuild_channels(struct ice_pf *pf); 59 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 60 61 static int 62 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 63 void *cb_priv, enum tc_setup_type type, void *type_data, 64 void *data, 65 void (*cleanup)(struct flow_block_cb *block_cb)); 66 67 bool netif_is_ice(struct net_device *dev) 68 { 69 return dev && (dev->netdev_ops == &ice_netdev_ops); 70 } 71 72 /** 73 * ice_get_tx_pending - returns number of Tx descriptors not processed 74 * @ring: the ring of descriptors 75 */ 76 static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 77 { 78 u16 head, tail; 79 80 head = ring->next_to_clean; 81 tail = ring->next_to_use; 82 83 if (head != tail) 84 return (head < tail) ? 85 tail - head : (tail + ring->count - head); 86 return 0; 87 } 88 89 /** 90 * ice_check_for_hang_subtask - check for and recover hung queues 91 * @pf: pointer to PF struct 92 */ 93 static void ice_check_for_hang_subtask(struct ice_pf *pf) 94 { 95 struct ice_vsi *vsi = NULL; 96 struct ice_hw *hw; 97 unsigned int i; 98 int packets; 99 u32 v; 100 101 ice_for_each_vsi(pf, v) 102 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 103 vsi = pf->vsi[v]; 104 break; 105 } 106 107 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 108 return; 109 110 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 111 return; 112 113 hw = &vsi->back->hw; 114 115 ice_for_each_txq(vsi, i) { 116 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 117 118 if (!tx_ring) 119 continue; 120 if (ice_ring_ch_enabled(tx_ring)) 121 continue; 122 123 if (tx_ring->desc) { 124 /* If packet counter has not changed the queue is 125 * likely stalled, so force an interrupt for this 126 * queue. 127 * 128 * prev_pkt would be negative if there was no 129 * pending work. 130 */ 131 packets = tx_ring->stats.pkts & INT_MAX; 132 if (tx_ring->tx_stats.prev_pkt == packets) { 133 /* Trigger sw interrupt to revive the queue */ 134 ice_trigger_sw_intr(hw, tx_ring->q_vector); 135 continue; 136 } 137 138 /* Memory barrier between read of packet count and call 139 * to ice_get_tx_pending() 140 */ 141 smp_rmb(); 142 tx_ring->tx_stats.prev_pkt = 143 ice_get_tx_pending(tx_ring) ? packets : -1; 144 } 145 } 146 } 147 148 /** 149 * ice_init_mac_fltr - Set initial MAC filters 150 * @pf: board private structure 151 * 152 * Set initial set of MAC filters for PF VSI; configure filters for permanent 153 * address and broadcast address. If an error is encountered, netdevice will be 154 * unregistered. 155 */ 156 static int ice_init_mac_fltr(struct ice_pf *pf) 157 { 158 struct ice_vsi *vsi; 159 u8 *perm_addr; 160 161 vsi = ice_get_main_vsi(pf); 162 if (!vsi) 163 return -EINVAL; 164 165 perm_addr = vsi->port_info->mac.perm_addr; 166 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 167 } 168 169 /** 170 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 171 * @netdev: the net device on which the sync is happening 172 * @addr: MAC address to sync 173 * 174 * This is a callback function which is called by the in kernel device sync 175 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 176 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 177 * MAC filters from the hardware. 178 */ 179 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 180 { 181 struct ice_netdev_priv *np = netdev_priv(netdev); 182 struct ice_vsi *vsi = np->vsi; 183 184 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 185 ICE_FWD_TO_VSI)) 186 return -EINVAL; 187 188 return 0; 189 } 190 191 /** 192 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 193 * @netdev: the net device on which the unsync is happening 194 * @addr: MAC address to unsync 195 * 196 * This is a callback function which is called by the in kernel device unsync 197 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 198 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 199 * delete the MAC filters from the hardware. 200 */ 201 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 202 { 203 struct ice_netdev_priv *np = netdev_priv(netdev); 204 struct ice_vsi *vsi = np->vsi; 205 206 /* Under some circumstances, we might receive a request to delete our 207 * own device address from our uc list. Because we store the device 208 * address in the VSI's MAC filter list, we need to ignore such 209 * requests and not delete our device address from this list. 210 */ 211 if (ether_addr_equal(addr, netdev->dev_addr)) 212 return 0; 213 214 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 215 ICE_FWD_TO_VSI)) 216 return -EINVAL; 217 218 return 0; 219 } 220 221 /** 222 * ice_vsi_fltr_changed - check if filter state changed 223 * @vsi: VSI to be checked 224 * 225 * returns true if filter state has changed, false otherwise. 226 */ 227 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 228 { 229 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 230 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || 231 test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 232 } 233 234 /** 235 * ice_set_promisc - Enable promiscuous mode for a given PF 236 * @vsi: the VSI being configured 237 * @promisc_m: mask of promiscuous config bits 238 * 239 */ 240 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 241 { 242 int status; 243 244 if (vsi->type != ICE_VSI_PF) 245 return 0; 246 247 if (vsi->num_vlan > 1) 248 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); 249 else 250 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); 251 return status; 252 } 253 254 /** 255 * ice_clear_promisc - Disable promiscuous mode for a given PF 256 * @vsi: the VSI being configured 257 * @promisc_m: mask of promiscuous config bits 258 * 259 */ 260 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 261 { 262 int status; 263 264 if (vsi->type != ICE_VSI_PF) 265 return 0; 266 267 if (vsi->num_vlan > 1) 268 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); 269 else 270 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); 271 return status; 272 } 273 274 /** 275 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 276 * @vsi: ptr to the VSI 277 * 278 * Push any outstanding VSI filter changes through the AdminQ. 279 */ 280 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 281 { 282 struct device *dev = ice_pf_to_dev(vsi->back); 283 struct net_device *netdev = vsi->netdev; 284 bool promisc_forced_on = false; 285 struct ice_pf *pf = vsi->back; 286 struct ice_hw *hw = &pf->hw; 287 u32 changed_flags = 0; 288 u8 promisc_m; 289 int err; 290 291 if (!vsi->netdev) 292 return -EINVAL; 293 294 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 295 usleep_range(1000, 2000); 296 297 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 298 vsi->current_netdev_flags = vsi->netdev->flags; 299 300 INIT_LIST_HEAD(&vsi->tmp_sync_list); 301 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 302 303 if (ice_vsi_fltr_changed(vsi)) { 304 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 305 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 306 clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 307 308 /* grab the netdev's addr_list_lock */ 309 netif_addr_lock_bh(netdev); 310 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 311 ice_add_mac_to_unsync_list); 312 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 313 ice_add_mac_to_unsync_list); 314 /* our temp lists are populated. release lock */ 315 netif_addr_unlock_bh(netdev); 316 } 317 318 /* Remove MAC addresses in the unsync list */ 319 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 320 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 321 if (err) { 322 netdev_err(netdev, "Failed to delete MAC filters\n"); 323 /* if we failed because of alloc failures, just bail */ 324 if (err == -ENOMEM) 325 goto out; 326 } 327 328 /* Add MAC addresses in the sync list */ 329 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 330 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 331 /* If filter is added successfully or already exists, do not go into 332 * 'if' condition and report it as error. Instead continue processing 333 * rest of the function. 334 */ 335 if (err && err != -EEXIST) { 336 netdev_err(netdev, "Failed to add MAC filters\n"); 337 /* If there is no more space for new umac filters, VSI 338 * should go into promiscuous mode. There should be some 339 * space reserved for promiscuous filters. 340 */ 341 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 342 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 343 vsi->state)) { 344 promisc_forced_on = true; 345 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 346 vsi->vsi_num); 347 } else { 348 goto out; 349 } 350 } 351 err = 0; 352 /* check for changes in promiscuous modes */ 353 if (changed_flags & IFF_ALLMULTI) { 354 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 355 if (vsi->num_vlan > 1) 356 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 357 else 358 promisc_m = ICE_MCAST_PROMISC_BITS; 359 360 err = ice_set_promisc(vsi, promisc_m); 361 if (err) { 362 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", 363 vsi->vsi_num); 364 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 365 goto out_promisc; 366 } 367 } else { 368 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 369 if (vsi->num_vlan > 1) 370 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 371 else 372 promisc_m = ICE_MCAST_PROMISC_BITS; 373 374 err = ice_clear_promisc(vsi, promisc_m); 375 if (err) { 376 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", 377 vsi->vsi_num); 378 vsi->current_netdev_flags |= IFF_ALLMULTI; 379 goto out_promisc; 380 } 381 } 382 } 383 384 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 385 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 386 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 387 if (vsi->current_netdev_flags & IFF_PROMISC) { 388 /* Apply Rx filter rule to get traffic from wire */ 389 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { 390 err = ice_set_dflt_vsi(pf->first_sw, vsi); 391 if (err && err != -EEXIST) { 392 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 393 err, vsi->vsi_num); 394 vsi->current_netdev_flags &= 395 ~IFF_PROMISC; 396 goto out_promisc; 397 } 398 err = 0; 399 ice_cfg_vlan_pruning(vsi, false); 400 } 401 } else { 402 /* Clear Rx filter to remove traffic from wire */ 403 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { 404 err = ice_clear_dflt_vsi(pf->first_sw); 405 if (err) { 406 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 407 err, vsi->vsi_num); 408 vsi->current_netdev_flags |= 409 IFF_PROMISC; 410 goto out_promisc; 411 } 412 if (vsi->num_vlan > 1) 413 ice_cfg_vlan_pruning(vsi, true); 414 } 415 } 416 } 417 goto exit; 418 419 out_promisc: 420 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 421 goto exit; 422 out: 423 /* if something went wrong then set the changed flag so we try again */ 424 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 425 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 426 exit: 427 clear_bit(ICE_CFG_BUSY, vsi->state); 428 return err; 429 } 430 431 /** 432 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 433 * @pf: board private structure 434 */ 435 static void ice_sync_fltr_subtask(struct ice_pf *pf) 436 { 437 int v; 438 439 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 440 return; 441 442 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 443 444 ice_for_each_vsi(pf, v) 445 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 446 ice_vsi_sync_fltr(pf->vsi[v])) { 447 /* come back and try again later */ 448 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 449 break; 450 } 451 } 452 453 /** 454 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 455 * @pf: the PF 456 * @locked: is the rtnl_lock already held 457 */ 458 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 459 { 460 int node; 461 int v; 462 463 ice_for_each_vsi(pf, v) 464 if (pf->vsi[v]) 465 ice_dis_vsi(pf->vsi[v], locked); 466 467 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 468 pf->pf_agg_node[node].num_vsis = 0; 469 470 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 471 pf->vf_agg_node[node].num_vsis = 0; 472 } 473 474 /** 475 * ice_clear_sw_switch_recipes - clear switch recipes 476 * @pf: board private structure 477 * 478 * Mark switch recipes as not created in sw structures. There are cases where 479 * rules (especially advanced rules) need to be restored, either re-read from 480 * hardware or added again. For example after the reset. 'recp_created' flag 481 * prevents from doing that and need to be cleared upfront. 482 */ 483 static void ice_clear_sw_switch_recipes(struct ice_pf *pf) 484 { 485 struct ice_sw_recipe *recp; 486 u8 i; 487 488 recp = pf->hw.switch_info->recp_list; 489 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 490 recp[i].recp_created = false; 491 } 492 493 /** 494 * ice_prepare_for_reset - prep for reset 495 * @pf: board private structure 496 * @reset_type: reset type requested 497 * 498 * Inform or close all dependent features in prep for reset. 499 */ 500 static void 501 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 502 { 503 struct ice_hw *hw = &pf->hw; 504 struct ice_vsi *vsi; 505 unsigned int i; 506 507 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 508 509 /* already prepared for reset */ 510 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 511 return; 512 513 ice_unplug_aux_dev(pf); 514 515 /* Notify VFs of impending reset */ 516 if (ice_check_sq_alive(hw, &hw->mailboxq)) 517 ice_vc_notify_reset(pf); 518 519 /* Disable VFs until reset is completed */ 520 ice_for_each_vf(pf, i) 521 ice_set_vf_state_qs_dis(&pf->vf[i]); 522 523 if (ice_is_eswitch_mode_switchdev(pf)) { 524 if (reset_type != ICE_RESET_PFR) 525 ice_clear_sw_switch_recipes(pf); 526 } 527 528 /* release ADQ specific HW and SW resources */ 529 vsi = ice_get_main_vsi(pf); 530 if (!vsi) 531 goto skip; 532 533 /* to be on safe side, reset orig_rss_size so that normal flow 534 * of deciding rss_size can take precedence 535 */ 536 vsi->orig_rss_size = 0; 537 538 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 539 if (reset_type == ICE_RESET_PFR) { 540 vsi->old_ena_tc = vsi->all_enatc; 541 vsi->old_numtc = vsi->all_numtc; 542 } else { 543 ice_remove_q_channels(vsi, true); 544 545 /* for other reset type, do not support channel rebuild 546 * hence reset needed info 547 */ 548 vsi->old_ena_tc = 0; 549 vsi->all_enatc = 0; 550 vsi->old_numtc = 0; 551 vsi->all_numtc = 0; 552 vsi->req_txq = 0; 553 vsi->req_rxq = 0; 554 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 555 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 556 } 557 } 558 skip: 559 560 /* clear SW filtering DB */ 561 ice_clear_hw_tbls(hw); 562 /* disable the VSIs and their queues that are not already DOWN */ 563 ice_pf_dis_all_vsi(pf, false); 564 565 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 566 ice_ptp_prepare_for_reset(pf); 567 568 if (hw->port_info) 569 ice_sched_clear_port(hw->port_info); 570 571 ice_shutdown_all_ctrlq(hw); 572 573 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 574 } 575 576 /** 577 * ice_do_reset - Initiate one of many types of resets 578 * @pf: board private structure 579 * @reset_type: reset type requested before this function was called. 580 */ 581 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 582 { 583 struct device *dev = ice_pf_to_dev(pf); 584 struct ice_hw *hw = &pf->hw; 585 586 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 587 588 ice_prepare_for_reset(pf, reset_type); 589 590 /* trigger the reset */ 591 if (ice_reset(hw, reset_type)) { 592 dev_err(dev, "reset %d failed\n", reset_type); 593 set_bit(ICE_RESET_FAILED, pf->state); 594 clear_bit(ICE_RESET_OICR_RECV, pf->state); 595 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 596 clear_bit(ICE_PFR_REQ, pf->state); 597 clear_bit(ICE_CORER_REQ, pf->state); 598 clear_bit(ICE_GLOBR_REQ, pf->state); 599 wake_up(&pf->reset_wait_queue); 600 return; 601 } 602 603 /* PFR is a bit of a special case because it doesn't result in an OICR 604 * interrupt. So for PFR, rebuild after the reset and clear the reset- 605 * associated state bits. 606 */ 607 if (reset_type == ICE_RESET_PFR) { 608 pf->pfr_count++; 609 ice_rebuild(pf, reset_type); 610 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 611 clear_bit(ICE_PFR_REQ, pf->state); 612 wake_up(&pf->reset_wait_queue); 613 ice_reset_all_vfs(pf, true); 614 } 615 } 616 617 /** 618 * ice_reset_subtask - Set up for resetting the device and driver 619 * @pf: board private structure 620 */ 621 static void ice_reset_subtask(struct ice_pf *pf) 622 { 623 enum ice_reset_req reset_type = ICE_RESET_INVAL; 624 625 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 626 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 627 * of reset is pending and sets bits in pf->state indicating the reset 628 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 629 * prepare for pending reset if not already (for PF software-initiated 630 * global resets the software should already be prepared for it as 631 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 632 * by firmware or software on other PFs, that bit is not set so prepare 633 * for the reset now), poll for reset done, rebuild and return. 634 */ 635 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 636 /* Perform the largest reset requested */ 637 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 638 reset_type = ICE_RESET_CORER; 639 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 640 reset_type = ICE_RESET_GLOBR; 641 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 642 reset_type = ICE_RESET_EMPR; 643 /* return if no valid reset type requested */ 644 if (reset_type == ICE_RESET_INVAL) 645 return; 646 ice_prepare_for_reset(pf, reset_type); 647 648 /* make sure we are ready to rebuild */ 649 if (ice_check_reset(&pf->hw)) { 650 set_bit(ICE_RESET_FAILED, pf->state); 651 } else { 652 /* done with reset. start rebuild */ 653 pf->hw.reset_ongoing = false; 654 ice_rebuild(pf, reset_type); 655 /* clear bit to resume normal operations, but 656 * ICE_NEEDS_RESTART bit is set in case rebuild failed 657 */ 658 clear_bit(ICE_RESET_OICR_RECV, pf->state); 659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 660 clear_bit(ICE_PFR_REQ, pf->state); 661 clear_bit(ICE_CORER_REQ, pf->state); 662 clear_bit(ICE_GLOBR_REQ, pf->state); 663 wake_up(&pf->reset_wait_queue); 664 ice_reset_all_vfs(pf, true); 665 } 666 667 return; 668 } 669 670 /* No pending resets to finish processing. Check for new resets */ 671 if (test_bit(ICE_PFR_REQ, pf->state)) 672 reset_type = ICE_RESET_PFR; 673 if (test_bit(ICE_CORER_REQ, pf->state)) 674 reset_type = ICE_RESET_CORER; 675 if (test_bit(ICE_GLOBR_REQ, pf->state)) 676 reset_type = ICE_RESET_GLOBR; 677 /* If no valid reset type requested just return */ 678 if (reset_type == ICE_RESET_INVAL) 679 return; 680 681 /* reset if not already down or busy */ 682 if (!test_bit(ICE_DOWN, pf->state) && 683 !test_bit(ICE_CFG_BUSY, pf->state)) { 684 ice_do_reset(pf, reset_type); 685 } 686 } 687 688 /** 689 * ice_print_topo_conflict - print topology conflict message 690 * @vsi: the VSI whose topology status is being checked 691 */ 692 static void ice_print_topo_conflict(struct ice_vsi *vsi) 693 { 694 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 695 case ICE_AQ_LINK_TOPO_CONFLICT: 696 case ICE_AQ_LINK_MEDIA_CONFLICT: 697 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 698 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 699 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 700 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 701 break; 702 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 703 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 704 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 705 else 706 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 707 break; 708 default: 709 break; 710 } 711 } 712 713 /** 714 * ice_print_link_msg - print link up or down message 715 * @vsi: the VSI whose link status is being queried 716 * @isup: boolean for if the link is now up or down 717 */ 718 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 719 { 720 struct ice_aqc_get_phy_caps_data *caps; 721 const char *an_advertised; 722 const char *fec_req; 723 const char *speed; 724 const char *fec; 725 const char *fc; 726 const char *an; 727 int status; 728 729 if (!vsi) 730 return; 731 732 if (vsi->current_isup == isup) 733 return; 734 735 vsi->current_isup = isup; 736 737 if (!isup) { 738 netdev_info(vsi->netdev, "NIC Link is Down\n"); 739 return; 740 } 741 742 switch (vsi->port_info->phy.link_info.link_speed) { 743 case ICE_AQ_LINK_SPEED_100GB: 744 speed = "100 G"; 745 break; 746 case ICE_AQ_LINK_SPEED_50GB: 747 speed = "50 G"; 748 break; 749 case ICE_AQ_LINK_SPEED_40GB: 750 speed = "40 G"; 751 break; 752 case ICE_AQ_LINK_SPEED_25GB: 753 speed = "25 G"; 754 break; 755 case ICE_AQ_LINK_SPEED_20GB: 756 speed = "20 G"; 757 break; 758 case ICE_AQ_LINK_SPEED_10GB: 759 speed = "10 G"; 760 break; 761 case ICE_AQ_LINK_SPEED_5GB: 762 speed = "5 G"; 763 break; 764 case ICE_AQ_LINK_SPEED_2500MB: 765 speed = "2.5 G"; 766 break; 767 case ICE_AQ_LINK_SPEED_1000MB: 768 speed = "1 G"; 769 break; 770 case ICE_AQ_LINK_SPEED_100MB: 771 speed = "100 M"; 772 break; 773 default: 774 speed = "Unknown "; 775 break; 776 } 777 778 switch (vsi->port_info->fc.current_mode) { 779 case ICE_FC_FULL: 780 fc = "Rx/Tx"; 781 break; 782 case ICE_FC_TX_PAUSE: 783 fc = "Tx"; 784 break; 785 case ICE_FC_RX_PAUSE: 786 fc = "Rx"; 787 break; 788 case ICE_FC_NONE: 789 fc = "None"; 790 break; 791 default: 792 fc = "Unknown"; 793 break; 794 } 795 796 /* Get FEC mode based on negotiated link info */ 797 switch (vsi->port_info->phy.link_info.fec_info) { 798 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 799 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 800 fec = "RS-FEC"; 801 break; 802 case ICE_AQ_LINK_25G_KR_FEC_EN: 803 fec = "FC-FEC/BASE-R"; 804 break; 805 default: 806 fec = "NONE"; 807 break; 808 } 809 810 /* check if autoneg completed, might be false due to not supported */ 811 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 812 an = "True"; 813 else 814 an = "False"; 815 816 /* Get FEC mode requested based on PHY caps last SW configuration */ 817 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 818 if (!caps) { 819 fec_req = "Unknown"; 820 an_advertised = "Unknown"; 821 goto done; 822 } 823 824 status = ice_aq_get_phy_caps(vsi->port_info, false, 825 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 826 if (status) 827 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 828 829 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 830 831 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 832 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 833 fec_req = "RS-FEC"; 834 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 835 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 836 fec_req = "FC-FEC/BASE-R"; 837 else 838 fec_req = "NONE"; 839 840 kfree(caps); 841 842 done: 843 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 844 speed, fec_req, fec, an_advertised, an, fc); 845 ice_print_topo_conflict(vsi); 846 } 847 848 /** 849 * ice_vsi_link_event - update the VSI's netdev 850 * @vsi: the VSI on which the link event occurred 851 * @link_up: whether or not the VSI needs to be set up or down 852 */ 853 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 854 { 855 if (!vsi) 856 return; 857 858 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 859 return; 860 861 if (vsi->type == ICE_VSI_PF) { 862 if (link_up == netif_carrier_ok(vsi->netdev)) 863 return; 864 865 if (link_up) { 866 netif_carrier_on(vsi->netdev); 867 netif_tx_wake_all_queues(vsi->netdev); 868 } else { 869 netif_carrier_off(vsi->netdev); 870 netif_tx_stop_all_queues(vsi->netdev); 871 } 872 } 873 } 874 875 /** 876 * ice_set_dflt_mib - send a default config MIB to the FW 877 * @pf: private PF struct 878 * 879 * This function sends a default configuration MIB to the FW. 880 * 881 * If this function errors out at any point, the driver is still able to 882 * function. The main impact is that LFC may not operate as expected. 883 * Therefore an error state in this function should be treated with a DBG 884 * message and continue on with driver rebuild/reenable. 885 */ 886 static void ice_set_dflt_mib(struct ice_pf *pf) 887 { 888 struct device *dev = ice_pf_to_dev(pf); 889 u8 mib_type, *buf, *lldpmib = NULL; 890 u16 len, typelen, offset = 0; 891 struct ice_lldp_org_tlv *tlv; 892 struct ice_hw *hw = &pf->hw; 893 u32 ouisubtype; 894 895 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 896 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 897 if (!lldpmib) { 898 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 899 __func__); 900 return; 901 } 902 903 /* Add ETS CFG TLV */ 904 tlv = (struct ice_lldp_org_tlv *)lldpmib; 905 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 906 ICE_IEEE_ETS_TLV_LEN); 907 tlv->typelen = htons(typelen); 908 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 909 ICE_IEEE_SUBTYPE_ETS_CFG); 910 tlv->ouisubtype = htonl(ouisubtype); 911 912 buf = tlv->tlvinfo; 913 buf[0] = 0; 914 915 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 916 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 917 * Octets 13 - 20 are TSA values - leave as zeros 918 */ 919 buf[5] = 0x64; 920 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 921 offset += len + 2; 922 tlv = (struct ice_lldp_org_tlv *) 923 ((char *)tlv + sizeof(tlv->typelen) + len); 924 925 /* Add ETS REC TLV */ 926 buf = tlv->tlvinfo; 927 tlv->typelen = htons(typelen); 928 929 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 930 ICE_IEEE_SUBTYPE_ETS_REC); 931 tlv->ouisubtype = htonl(ouisubtype); 932 933 /* First octet of buf is reserved 934 * Octets 1 - 4 map UP to TC - all UPs map to zero 935 * Octets 5 - 12 are BW values - set TC 0 to 100%. 936 * Octets 13 - 20 are TSA value - leave as zeros 937 */ 938 buf[5] = 0x64; 939 offset += len + 2; 940 tlv = (struct ice_lldp_org_tlv *) 941 ((char *)tlv + sizeof(tlv->typelen) + len); 942 943 /* Add PFC CFG TLV */ 944 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 945 ICE_IEEE_PFC_TLV_LEN); 946 tlv->typelen = htons(typelen); 947 948 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 949 ICE_IEEE_SUBTYPE_PFC_CFG); 950 tlv->ouisubtype = htonl(ouisubtype); 951 952 /* Octet 1 left as all zeros - PFC disabled */ 953 buf[0] = 0x08; 954 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 955 offset += len + 2; 956 957 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 958 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 959 960 kfree(lldpmib); 961 } 962 963 /** 964 * ice_check_phy_fw_load - check if PHY FW load failed 965 * @pf: pointer to PF struct 966 * @link_cfg_err: bitmap from the link info structure 967 * 968 * check if external PHY FW load failed and print an error message if it did 969 */ 970 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 971 { 972 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 973 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 974 return; 975 } 976 977 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 978 return; 979 980 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 981 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 982 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 983 } 984 } 985 986 /** 987 * ice_check_module_power 988 * @pf: pointer to PF struct 989 * @link_cfg_err: bitmap from the link info structure 990 * 991 * check module power level returned by a previous call to aq_get_link_info 992 * and print error messages if module power level is not supported 993 */ 994 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 995 { 996 /* if module power level is supported, clear the flag */ 997 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 998 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 999 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1000 return; 1001 } 1002 1003 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1004 * above block didn't clear this bit, there's nothing to do 1005 */ 1006 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1007 return; 1008 1009 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1010 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1011 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1012 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1013 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1014 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1015 } 1016 } 1017 1018 /** 1019 * ice_check_link_cfg_err - check if link configuration failed 1020 * @pf: pointer to the PF struct 1021 * @link_cfg_err: bitmap from the link info structure 1022 * 1023 * print if any link configuration failure happens due to the value in the 1024 * link_cfg_err parameter in the link info structure 1025 */ 1026 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 1027 { 1028 ice_check_module_power(pf, link_cfg_err); 1029 ice_check_phy_fw_load(pf, link_cfg_err); 1030 } 1031 1032 /** 1033 * ice_link_event - process the link event 1034 * @pf: PF that the link event is associated with 1035 * @pi: port_info for the port that the link event is associated with 1036 * @link_up: true if the physical link is up and false if it is down 1037 * @link_speed: current link speed received from the link event 1038 * 1039 * Returns 0 on success and negative on failure 1040 */ 1041 static int 1042 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1043 u16 link_speed) 1044 { 1045 struct device *dev = ice_pf_to_dev(pf); 1046 struct ice_phy_info *phy_info; 1047 struct ice_vsi *vsi; 1048 u16 old_link_speed; 1049 bool old_link; 1050 int status; 1051 1052 phy_info = &pi->phy; 1053 phy_info->link_info_old = phy_info->link_info; 1054 1055 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 1056 old_link_speed = phy_info->link_info_old.link_speed; 1057 1058 /* update the link info structures and re-enable link events, 1059 * don't bail on failure due to other book keeping needed 1060 */ 1061 status = ice_update_link_info(pi); 1062 if (status) 1063 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 1064 pi->lport, status, 1065 ice_aq_str(pi->hw->adminq.sq_last_status)); 1066 1067 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1068 1069 /* Check if the link state is up after updating link info, and treat 1070 * this event as an UP event since the link is actually UP now. 1071 */ 1072 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 1073 link_up = true; 1074 1075 vsi = ice_get_main_vsi(pf); 1076 if (!vsi || !vsi->port_info) 1077 return -EINVAL; 1078 1079 /* turn off PHY if media was removed */ 1080 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 1081 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1082 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1083 ice_set_link(vsi, false); 1084 } 1085 1086 /* if the old link up/down and speed is the same as the new */ 1087 if (link_up == old_link && link_speed == old_link_speed) 1088 return 0; 1089 1090 if (!ice_is_e810(&pf->hw)) 1091 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); 1092 1093 if (ice_is_dcb_active(pf)) { 1094 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1095 ice_dcb_rebuild(pf); 1096 } else { 1097 if (link_up) 1098 ice_set_dflt_mib(pf); 1099 } 1100 ice_vsi_link_event(vsi, link_up); 1101 ice_print_link_msg(vsi, link_up); 1102 1103 ice_vc_notify_link_state(pf); 1104 1105 return 0; 1106 } 1107 1108 /** 1109 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 1110 * @pf: board private structure 1111 */ 1112 static void ice_watchdog_subtask(struct ice_pf *pf) 1113 { 1114 int i; 1115 1116 /* if interface is down do nothing */ 1117 if (test_bit(ICE_DOWN, pf->state) || 1118 test_bit(ICE_CFG_BUSY, pf->state)) 1119 return; 1120 1121 /* make sure we don't do these things too often */ 1122 if (time_before(jiffies, 1123 pf->serv_tmr_prev + pf->serv_tmr_period)) 1124 return; 1125 1126 pf->serv_tmr_prev = jiffies; 1127 1128 /* Update the stats for active netdevs so the network stack 1129 * can look at updated numbers whenever it cares to 1130 */ 1131 ice_update_pf_stats(pf); 1132 ice_for_each_vsi(pf, i) 1133 if (pf->vsi[i] && pf->vsi[i]->netdev) 1134 ice_update_vsi_stats(pf->vsi[i]); 1135 } 1136 1137 /** 1138 * ice_init_link_events - enable/initialize link events 1139 * @pi: pointer to the port_info instance 1140 * 1141 * Returns -EIO on failure, 0 on success 1142 */ 1143 static int ice_init_link_events(struct ice_port_info *pi) 1144 { 1145 u16 mask; 1146 1147 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1148 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 1149 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1150 1151 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1152 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1153 pi->lport); 1154 return -EIO; 1155 } 1156 1157 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1158 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1159 pi->lport); 1160 return -EIO; 1161 } 1162 1163 return 0; 1164 } 1165 1166 /** 1167 * ice_handle_link_event - handle link event via ARQ 1168 * @pf: PF that the link event is associated with 1169 * @event: event structure containing link status info 1170 */ 1171 static int 1172 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1173 { 1174 struct ice_aqc_get_link_status_data *link_data; 1175 struct ice_port_info *port_info; 1176 int status; 1177 1178 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1179 port_info = pf->hw.port_info; 1180 if (!port_info) 1181 return -EINVAL; 1182 1183 status = ice_link_event(pf, port_info, 1184 !!(link_data->link_info & ICE_AQ_LINK_UP), 1185 le16_to_cpu(link_data->link_speed)); 1186 if (status) 1187 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1188 status); 1189 1190 return status; 1191 } 1192 1193 enum ice_aq_task_state { 1194 ICE_AQ_TASK_WAITING = 0, 1195 ICE_AQ_TASK_COMPLETE, 1196 ICE_AQ_TASK_CANCELED, 1197 }; 1198 1199 struct ice_aq_task { 1200 struct hlist_node entry; 1201 1202 u16 opcode; 1203 struct ice_rq_event_info *event; 1204 enum ice_aq_task_state state; 1205 }; 1206 1207 /** 1208 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1209 * @pf: pointer to the PF private structure 1210 * @opcode: the opcode to wait for 1211 * @timeout: how long to wait, in jiffies 1212 * @event: storage for the event info 1213 * 1214 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1215 * current thread will be put to sleep until the specified event occurs or 1216 * until the given timeout is reached. 1217 * 1218 * To obtain only the descriptor contents, pass an event without an allocated 1219 * msg_buf. If the complete data buffer is desired, allocate the 1220 * event->msg_buf with enough space ahead of time. 1221 * 1222 * Returns: zero on success, or a negative error code on failure. 1223 */ 1224 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1225 struct ice_rq_event_info *event) 1226 { 1227 struct device *dev = ice_pf_to_dev(pf); 1228 struct ice_aq_task *task; 1229 unsigned long start; 1230 long ret; 1231 int err; 1232 1233 task = kzalloc(sizeof(*task), GFP_KERNEL); 1234 if (!task) 1235 return -ENOMEM; 1236 1237 INIT_HLIST_NODE(&task->entry); 1238 task->opcode = opcode; 1239 task->event = event; 1240 task->state = ICE_AQ_TASK_WAITING; 1241 1242 spin_lock_bh(&pf->aq_wait_lock); 1243 hlist_add_head(&task->entry, &pf->aq_wait_list); 1244 spin_unlock_bh(&pf->aq_wait_lock); 1245 1246 start = jiffies; 1247 1248 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1249 timeout); 1250 switch (task->state) { 1251 case ICE_AQ_TASK_WAITING: 1252 err = ret < 0 ? ret : -ETIMEDOUT; 1253 break; 1254 case ICE_AQ_TASK_CANCELED: 1255 err = ret < 0 ? ret : -ECANCELED; 1256 break; 1257 case ICE_AQ_TASK_COMPLETE: 1258 err = ret < 0 ? ret : 0; 1259 break; 1260 default: 1261 WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1262 err = -EINVAL; 1263 break; 1264 } 1265 1266 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1267 jiffies_to_msecs(jiffies - start), 1268 jiffies_to_msecs(timeout), 1269 opcode); 1270 1271 spin_lock_bh(&pf->aq_wait_lock); 1272 hlist_del(&task->entry); 1273 spin_unlock_bh(&pf->aq_wait_lock); 1274 kfree(task); 1275 1276 return err; 1277 } 1278 1279 /** 1280 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1281 * @pf: pointer to the PF private structure 1282 * @opcode: the opcode of the event 1283 * @event: the event to check 1284 * 1285 * Loops over the current list of pending threads waiting for an AdminQ event. 1286 * For each matching task, copy the contents of the event into the task 1287 * structure and wake up the thread. 1288 * 1289 * If multiple threads wait for the same opcode, they will all be woken up. 1290 * 1291 * Note that event->msg_buf will only be duplicated if the event has a buffer 1292 * with enough space already allocated. Otherwise, only the descriptor and 1293 * message length will be copied. 1294 * 1295 * Returns: true if an event was found, false otherwise 1296 */ 1297 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1298 struct ice_rq_event_info *event) 1299 { 1300 struct ice_aq_task *task; 1301 bool found = false; 1302 1303 spin_lock_bh(&pf->aq_wait_lock); 1304 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1305 if (task->state || task->opcode != opcode) 1306 continue; 1307 1308 memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1309 task->event->msg_len = event->msg_len; 1310 1311 /* Only copy the data buffer if a destination was set */ 1312 if (task->event->msg_buf && 1313 task->event->buf_len > event->buf_len) { 1314 memcpy(task->event->msg_buf, event->msg_buf, 1315 event->buf_len); 1316 task->event->buf_len = event->buf_len; 1317 } 1318 1319 task->state = ICE_AQ_TASK_COMPLETE; 1320 found = true; 1321 } 1322 spin_unlock_bh(&pf->aq_wait_lock); 1323 1324 if (found) 1325 wake_up(&pf->aq_wait_queue); 1326 } 1327 1328 /** 1329 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1330 * @pf: the PF private structure 1331 * 1332 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1333 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1334 */ 1335 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1336 { 1337 struct ice_aq_task *task; 1338 1339 spin_lock_bh(&pf->aq_wait_lock); 1340 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1341 task->state = ICE_AQ_TASK_CANCELED; 1342 spin_unlock_bh(&pf->aq_wait_lock); 1343 1344 wake_up(&pf->aq_wait_queue); 1345 } 1346 1347 /** 1348 * __ice_clean_ctrlq - helper function to clean controlq rings 1349 * @pf: ptr to struct ice_pf 1350 * @q_type: specific Control queue type 1351 */ 1352 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1353 { 1354 struct device *dev = ice_pf_to_dev(pf); 1355 struct ice_rq_event_info event; 1356 struct ice_hw *hw = &pf->hw; 1357 struct ice_ctl_q_info *cq; 1358 u16 pending, i = 0; 1359 const char *qtype; 1360 u32 oldval, val; 1361 1362 /* Do not clean control queue if/when PF reset fails */ 1363 if (test_bit(ICE_RESET_FAILED, pf->state)) 1364 return 0; 1365 1366 switch (q_type) { 1367 case ICE_CTL_Q_ADMIN: 1368 cq = &hw->adminq; 1369 qtype = "Admin"; 1370 break; 1371 case ICE_CTL_Q_SB: 1372 cq = &hw->sbq; 1373 qtype = "Sideband"; 1374 break; 1375 case ICE_CTL_Q_MAILBOX: 1376 cq = &hw->mailboxq; 1377 qtype = "Mailbox"; 1378 /* we are going to try to detect a malicious VF, so set the 1379 * state to begin detection 1380 */ 1381 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1382 break; 1383 default: 1384 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1385 return 0; 1386 } 1387 1388 /* check for error indications - PF_xx_AxQLEN register layout for 1389 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1390 */ 1391 val = rd32(hw, cq->rq.len); 1392 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1393 PF_FW_ARQLEN_ARQCRIT_M)) { 1394 oldval = val; 1395 if (val & PF_FW_ARQLEN_ARQVFE_M) 1396 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1397 qtype); 1398 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1399 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1400 qtype); 1401 } 1402 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1403 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1404 qtype); 1405 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1406 PF_FW_ARQLEN_ARQCRIT_M); 1407 if (oldval != val) 1408 wr32(hw, cq->rq.len, val); 1409 } 1410 1411 val = rd32(hw, cq->sq.len); 1412 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1413 PF_FW_ATQLEN_ATQCRIT_M)) { 1414 oldval = val; 1415 if (val & PF_FW_ATQLEN_ATQVFE_M) 1416 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1417 qtype); 1418 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1419 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1420 qtype); 1421 } 1422 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1423 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1424 qtype); 1425 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1426 PF_FW_ATQLEN_ATQCRIT_M); 1427 if (oldval != val) 1428 wr32(hw, cq->sq.len, val); 1429 } 1430 1431 event.buf_len = cq->rq_buf_size; 1432 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1433 if (!event.msg_buf) 1434 return 0; 1435 1436 do { 1437 u16 opcode; 1438 int ret; 1439 1440 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1441 if (ret == -EALREADY) 1442 break; 1443 if (ret) { 1444 dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1445 ret); 1446 break; 1447 } 1448 1449 opcode = le16_to_cpu(event.desc.opcode); 1450 1451 /* Notify any thread that might be waiting for this event */ 1452 ice_aq_check_events(pf, opcode, &event); 1453 1454 switch (opcode) { 1455 case ice_aqc_opc_get_link_status: 1456 if (ice_handle_link_event(pf, &event)) 1457 dev_err(dev, "Could not handle link event\n"); 1458 break; 1459 case ice_aqc_opc_event_lan_overflow: 1460 ice_vf_lan_overflow_event(pf, &event); 1461 break; 1462 case ice_mbx_opc_send_msg_to_pf: 1463 if (!ice_is_malicious_vf(pf, &event, i, pending)) 1464 ice_vc_process_vf_msg(pf, &event); 1465 break; 1466 case ice_aqc_opc_fw_logging: 1467 ice_output_fw_log(hw, &event.desc, event.msg_buf); 1468 break; 1469 case ice_aqc_opc_lldp_set_mib_change: 1470 ice_dcb_process_lldp_set_mib_change(pf, &event); 1471 break; 1472 default: 1473 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1474 qtype, opcode); 1475 break; 1476 } 1477 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1478 1479 kfree(event.msg_buf); 1480 1481 return pending && (i == ICE_DFLT_IRQ_WORK); 1482 } 1483 1484 /** 1485 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1486 * @hw: pointer to hardware info 1487 * @cq: control queue information 1488 * 1489 * returns true if there are pending messages in a queue, false if there aren't 1490 */ 1491 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1492 { 1493 u16 ntu; 1494 1495 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1496 return cq->rq.next_to_clean != ntu; 1497 } 1498 1499 /** 1500 * ice_clean_adminq_subtask - clean the AdminQ rings 1501 * @pf: board private structure 1502 */ 1503 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1504 { 1505 struct ice_hw *hw = &pf->hw; 1506 1507 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1508 return; 1509 1510 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1511 return; 1512 1513 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1514 1515 /* There might be a situation where new messages arrive to a control 1516 * queue between processing the last message and clearing the 1517 * EVENT_PENDING bit. So before exiting, check queue head again (using 1518 * ice_ctrlq_pending) and process new messages if any. 1519 */ 1520 if (ice_ctrlq_pending(hw, &hw->adminq)) 1521 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1522 1523 ice_flush(hw); 1524 } 1525 1526 /** 1527 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1528 * @pf: board private structure 1529 */ 1530 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1531 { 1532 struct ice_hw *hw = &pf->hw; 1533 1534 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1535 return; 1536 1537 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1538 return; 1539 1540 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1541 1542 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1543 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1544 1545 ice_flush(hw); 1546 } 1547 1548 /** 1549 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1550 * @pf: board private structure 1551 */ 1552 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1553 { 1554 struct ice_hw *hw = &pf->hw; 1555 1556 /* Nothing to do here if sideband queue is not supported */ 1557 if (!ice_is_sbq_supported(hw)) { 1558 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1559 return; 1560 } 1561 1562 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1563 return; 1564 1565 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1566 return; 1567 1568 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1569 1570 if (ice_ctrlq_pending(hw, &hw->sbq)) 1571 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1572 1573 ice_flush(hw); 1574 } 1575 1576 /** 1577 * ice_service_task_schedule - schedule the service task to wake up 1578 * @pf: board private structure 1579 * 1580 * If not already scheduled, this puts the task into the work queue. 1581 */ 1582 void ice_service_task_schedule(struct ice_pf *pf) 1583 { 1584 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1585 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1586 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1587 queue_work(ice_wq, &pf->serv_task); 1588 } 1589 1590 /** 1591 * ice_service_task_complete - finish up the service task 1592 * @pf: board private structure 1593 */ 1594 static void ice_service_task_complete(struct ice_pf *pf) 1595 { 1596 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1597 1598 /* force memory (pf->state) to sync before next service task */ 1599 smp_mb__before_atomic(); 1600 clear_bit(ICE_SERVICE_SCHED, pf->state); 1601 } 1602 1603 /** 1604 * ice_service_task_stop - stop service task and cancel works 1605 * @pf: board private structure 1606 * 1607 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1608 * 1 otherwise. 1609 */ 1610 static int ice_service_task_stop(struct ice_pf *pf) 1611 { 1612 int ret; 1613 1614 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1615 1616 if (pf->serv_tmr.function) 1617 del_timer_sync(&pf->serv_tmr); 1618 if (pf->serv_task.func) 1619 cancel_work_sync(&pf->serv_task); 1620 1621 clear_bit(ICE_SERVICE_SCHED, pf->state); 1622 return ret; 1623 } 1624 1625 /** 1626 * ice_service_task_restart - restart service task and schedule works 1627 * @pf: board private structure 1628 * 1629 * This function is needed for suspend and resume works (e.g WoL scenario) 1630 */ 1631 static void ice_service_task_restart(struct ice_pf *pf) 1632 { 1633 clear_bit(ICE_SERVICE_DIS, pf->state); 1634 ice_service_task_schedule(pf); 1635 } 1636 1637 /** 1638 * ice_service_timer - timer callback to schedule service task 1639 * @t: pointer to timer_list 1640 */ 1641 static void ice_service_timer(struct timer_list *t) 1642 { 1643 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1644 1645 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1646 ice_service_task_schedule(pf); 1647 } 1648 1649 /** 1650 * ice_handle_mdd_event - handle malicious driver detect event 1651 * @pf: pointer to the PF structure 1652 * 1653 * Called from service task. OICR interrupt handler indicates MDD event. 1654 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1655 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1656 * disable the queue, the PF can be configured to reset the VF using ethtool 1657 * private flag mdd-auto-reset-vf. 1658 */ 1659 static void ice_handle_mdd_event(struct ice_pf *pf) 1660 { 1661 struct device *dev = ice_pf_to_dev(pf); 1662 struct ice_hw *hw = &pf->hw; 1663 unsigned int i; 1664 u32 reg; 1665 1666 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1667 /* Since the VF MDD event logging is rate limited, check if 1668 * there are pending MDD events. 1669 */ 1670 ice_print_vfs_mdd_events(pf); 1671 return; 1672 } 1673 1674 /* find what triggered an MDD event */ 1675 reg = rd32(hw, GL_MDET_TX_PQM); 1676 if (reg & GL_MDET_TX_PQM_VALID_M) { 1677 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1678 GL_MDET_TX_PQM_PF_NUM_S; 1679 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1680 GL_MDET_TX_PQM_VF_NUM_S; 1681 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1682 GL_MDET_TX_PQM_MAL_TYPE_S; 1683 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1684 GL_MDET_TX_PQM_QNUM_S); 1685 1686 if (netif_msg_tx_err(pf)) 1687 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1688 event, queue, pf_num, vf_num); 1689 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1690 } 1691 1692 reg = rd32(hw, GL_MDET_TX_TCLAN); 1693 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1694 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1695 GL_MDET_TX_TCLAN_PF_NUM_S; 1696 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1697 GL_MDET_TX_TCLAN_VF_NUM_S; 1698 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1699 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1700 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1701 GL_MDET_TX_TCLAN_QNUM_S); 1702 1703 if (netif_msg_tx_err(pf)) 1704 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1705 event, queue, pf_num, vf_num); 1706 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1707 } 1708 1709 reg = rd32(hw, GL_MDET_RX); 1710 if (reg & GL_MDET_RX_VALID_M) { 1711 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1712 GL_MDET_RX_PF_NUM_S; 1713 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1714 GL_MDET_RX_VF_NUM_S; 1715 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1716 GL_MDET_RX_MAL_TYPE_S; 1717 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1718 GL_MDET_RX_QNUM_S); 1719 1720 if (netif_msg_rx_err(pf)) 1721 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1722 event, queue, pf_num, vf_num); 1723 wr32(hw, GL_MDET_RX, 0xffffffff); 1724 } 1725 1726 /* check to see if this PF caused an MDD event */ 1727 reg = rd32(hw, PF_MDET_TX_PQM); 1728 if (reg & PF_MDET_TX_PQM_VALID_M) { 1729 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1730 if (netif_msg_tx_err(pf)) 1731 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1732 } 1733 1734 reg = rd32(hw, PF_MDET_TX_TCLAN); 1735 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1736 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 1737 if (netif_msg_tx_err(pf)) 1738 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1739 } 1740 1741 reg = rd32(hw, PF_MDET_RX); 1742 if (reg & PF_MDET_RX_VALID_M) { 1743 wr32(hw, PF_MDET_RX, 0xFFFF); 1744 if (netif_msg_rx_err(pf)) 1745 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1746 } 1747 1748 /* Check to see if one of the VFs caused an MDD event, and then 1749 * increment counters and set print pending 1750 */ 1751 ice_for_each_vf(pf, i) { 1752 struct ice_vf *vf = &pf->vf[i]; 1753 1754 reg = rd32(hw, VP_MDET_TX_PQM(i)); 1755 if (reg & VP_MDET_TX_PQM_VALID_M) { 1756 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 1757 vf->mdd_tx_events.count++; 1758 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1759 if (netif_msg_tx_err(pf)) 1760 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1761 i); 1762 } 1763 1764 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1765 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1766 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1767 vf->mdd_tx_events.count++; 1768 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1769 if (netif_msg_tx_err(pf)) 1770 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1771 i); 1772 } 1773 1774 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1775 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1776 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1777 vf->mdd_tx_events.count++; 1778 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1779 if (netif_msg_tx_err(pf)) 1780 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1781 i); 1782 } 1783 1784 reg = rd32(hw, VP_MDET_RX(i)); 1785 if (reg & VP_MDET_RX_VALID_M) { 1786 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1787 vf->mdd_rx_events.count++; 1788 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1789 if (netif_msg_rx_err(pf)) 1790 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1791 i); 1792 1793 /* Since the queue is disabled on VF Rx MDD events, the 1794 * PF can be configured to reset the VF through ethtool 1795 * private flag mdd-auto-reset-vf. 1796 */ 1797 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1798 /* VF MDD event counters will be cleared by 1799 * reset, so print the event prior to reset. 1800 */ 1801 ice_print_vf_rx_mdd_event(vf); 1802 mutex_lock(&pf->vf[i].cfg_lock); 1803 ice_reset_vf(&pf->vf[i], false); 1804 mutex_unlock(&pf->vf[i].cfg_lock); 1805 } 1806 } 1807 } 1808 1809 ice_print_vfs_mdd_events(pf); 1810 } 1811 1812 /** 1813 * ice_force_phys_link_state - Force the physical link state 1814 * @vsi: VSI to force the physical link state to up/down 1815 * @link_up: true/false indicates to set the physical link to up/down 1816 * 1817 * Force the physical link state by getting the current PHY capabilities from 1818 * hardware and setting the PHY config based on the determined capabilities. If 1819 * link changes a link event will be triggered because both the Enable Automatic 1820 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1821 * 1822 * Returns 0 on success, negative on failure 1823 */ 1824 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1825 { 1826 struct ice_aqc_get_phy_caps_data *pcaps; 1827 struct ice_aqc_set_phy_cfg_data *cfg; 1828 struct ice_port_info *pi; 1829 struct device *dev; 1830 int retcode; 1831 1832 if (!vsi || !vsi->port_info || !vsi->back) 1833 return -EINVAL; 1834 if (vsi->type != ICE_VSI_PF) 1835 return 0; 1836 1837 dev = ice_pf_to_dev(vsi->back); 1838 1839 pi = vsi->port_info; 1840 1841 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1842 if (!pcaps) 1843 return -ENOMEM; 1844 1845 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1846 NULL); 1847 if (retcode) { 1848 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1849 vsi->vsi_num, retcode); 1850 retcode = -EIO; 1851 goto out; 1852 } 1853 1854 /* No change in link */ 1855 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1856 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1857 goto out; 1858 1859 /* Use the current user PHY configuration. The current user PHY 1860 * configuration is initialized during probe from PHY capabilities 1861 * software mode, and updated on set PHY configuration. 1862 */ 1863 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1864 if (!cfg) { 1865 retcode = -ENOMEM; 1866 goto out; 1867 } 1868 1869 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1870 if (link_up) 1871 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1872 else 1873 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1874 1875 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1876 if (retcode) { 1877 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1878 vsi->vsi_num, retcode); 1879 retcode = -EIO; 1880 } 1881 1882 kfree(cfg); 1883 out: 1884 kfree(pcaps); 1885 return retcode; 1886 } 1887 1888 /** 1889 * ice_init_nvm_phy_type - Initialize the NVM PHY type 1890 * @pi: port info structure 1891 * 1892 * Initialize nvm_phy_type_[low|high] for link lenient mode support 1893 */ 1894 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1895 { 1896 struct ice_aqc_get_phy_caps_data *pcaps; 1897 struct ice_pf *pf = pi->hw->back; 1898 int err; 1899 1900 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1901 if (!pcaps) 1902 return -ENOMEM; 1903 1904 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 1905 pcaps, NULL); 1906 1907 if (err) { 1908 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1909 goto out; 1910 } 1911 1912 pf->nvm_phy_type_hi = pcaps->phy_type_high; 1913 pf->nvm_phy_type_lo = pcaps->phy_type_low; 1914 1915 out: 1916 kfree(pcaps); 1917 return err; 1918 } 1919 1920 /** 1921 * ice_init_link_dflt_override - Initialize link default override 1922 * @pi: port info structure 1923 * 1924 * Initialize link default override and PHY total port shutdown during probe 1925 */ 1926 static void ice_init_link_dflt_override(struct ice_port_info *pi) 1927 { 1928 struct ice_link_default_override_tlv *ldo; 1929 struct ice_pf *pf = pi->hw->back; 1930 1931 ldo = &pf->link_dflt_override; 1932 if (ice_get_link_default_override(ldo, pi)) 1933 return; 1934 1935 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1936 return; 1937 1938 /* Enable Total Port Shutdown (override/replace link-down-on-close 1939 * ethtool private flag) for ports with Port Disable bit set. 1940 */ 1941 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1942 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1943 } 1944 1945 /** 1946 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1947 * @pi: port info structure 1948 * 1949 * If default override is enabled, initialize the user PHY cfg speed and FEC 1950 * settings using the default override mask from the NVM. 1951 * 1952 * The PHY should only be configured with the default override settings the 1953 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1954 * is used to indicate that the user PHY cfg default override is initialized 1955 * and the PHY has not been configured with the default override settings. The 1956 * state is set here, and cleared in ice_configure_phy the first time the PHY is 1957 * configured. 1958 * 1959 * This function should be called only if the FW doesn't support default 1960 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 1961 */ 1962 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1963 { 1964 struct ice_link_default_override_tlv *ldo; 1965 struct ice_aqc_set_phy_cfg_data *cfg; 1966 struct ice_phy_info *phy = &pi->phy; 1967 struct ice_pf *pf = pi->hw->back; 1968 1969 ldo = &pf->link_dflt_override; 1970 1971 /* If link default override is enabled, use to mask NVM PHY capabilities 1972 * for speed and FEC default configuration. 1973 */ 1974 cfg = &phy->curr_user_phy_cfg; 1975 1976 if (ldo->phy_type_low || ldo->phy_type_high) { 1977 cfg->phy_type_low = pf->nvm_phy_type_lo & 1978 cpu_to_le64(ldo->phy_type_low); 1979 cfg->phy_type_high = pf->nvm_phy_type_hi & 1980 cpu_to_le64(ldo->phy_type_high); 1981 } 1982 cfg->link_fec_opt = ldo->fec_options; 1983 phy->curr_user_fec_req = ICE_FEC_AUTO; 1984 1985 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 1986 } 1987 1988 /** 1989 * ice_init_phy_user_cfg - Initialize the PHY user configuration 1990 * @pi: port info structure 1991 * 1992 * Initialize the current user PHY configuration, speed, FEC, and FC requested 1993 * mode to default. The PHY defaults are from get PHY capabilities topology 1994 * with media so call when media is first available. An error is returned if 1995 * called when media is not available. The PHY initialization completed state is 1996 * set here. 1997 * 1998 * These configurations are used when setting PHY 1999 * configuration. The user PHY configuration is updated on set PHY 2000 * configuration. Returns 0 on success, negative on failure 2001 */ 2002 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 2003 { 2004 struct ice_aqc_get_phy_caps_data *pcaps; 2005 struct ice_phy_info *phy = &pi->phy; 2006 struct ice_pf *pf = pi->hw->back; 2007 int err; 2008 2009 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2010 return -EIO; 2011 2012 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2013 if (!pcaps) 2014 return -ENOMEM; 2015 2016 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2017 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2018 pcaps, NULL); 2019 else 2020 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2021 pcaps, NULL); 2022 if (err) { 2023 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2024 goto err_out; 2025 } 2026 2027 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2028 2029 /* check if lenient mode is supported and enabled */ 2030 if (ice_fw_supports_link_override(pi->hw) && 2031 !(pcaps->module_compliance_enforcement & 2032 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2033 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2034 2035 /* if the FW supports default PHY configuration mode, then the driver 2036 * does not have to apply link override settings. If not, 2037 * initialize user PHY configuration with link override values 2038 */ 2039 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 2040 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2041 ice_init_phy_cfg_dflt_override(pi); 2042 goto out; 2043 } 2044 } 2045 2046 /* if link default override is not enabled, set user flow control and 2047 * FEC settings based on what get_phy_caps returned 2048 */ 2049 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 2050 pcaps->link_fec_options); 2051 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 2052 2053 out: 2054 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 2055 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 2056 err_out: 2057 kfree(pcaps); 2058 return err; 2059 } 2060 2061 /** 2062 * ice_configure_phy - configure PHY 2063 * @vsi: VSI of PHY 2064 * 2065 * Set the PHY configuration. If the current PHY configuration is the same as 2066 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 2067 * configure the based get PHY capabilities for topology with media. 2068 */ 2069 static int ice_configure_phy(struct ice_vsi *vsi) 2070 { 2071 struct device *dev = ice_pf_to_dev(vsi->back); 2072 struct ice_port_info *pi = vsi->port_info; 2073 struct ice_aqc_get_phy_caps_data *pcaps; 2074 struct ice_aqc_set_phy_cfg_data *cfg; 2075 struct ice_phy_info *phy = &pi->phy; 2076 struct ice_pf *pf = vsi->back; 2077 int err; 2078 2079 /* Ensure we have media as we cannot configure a medialess port */ 2080 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2081 return -EPERM; 2082 2083 ice_print_topo_conflict(vsi); 2084 2085 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 2086 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 2087 return -EPERM; 2088 2089 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 2090 return ice_force_phys_link_state(vsi, true); 2091 2092 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2093 if (!pcaps) 2094 return -ENOMEM; 2095 2096 /* Get current PHY config */ 2097 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 2098 NULL); 2099 if (err) { 2100 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 2101 vsi->vsi_num, err); 2102 goto done; 2103 } 2104 2105 /* If PHY enable link is configured and configuration has not changed, 2106 * there's nothing to do 2107 */ 2108 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2109 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 2110 goto done; 2111 2112 /* Use PHY topology as baseline for configuration */ 2113 memset(pcaps, 0, sizeof(*pcaps)); 2114 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2115 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2116 pcaps, NULL); 2117 else 2118 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2119 pcaps, NULL); 2120 if (err) { 2121 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 2122 vsi->vsi_num, err); 2123 goto done; 2124 } 2125 2126 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2127 if (!cfg) { 2128 err = -ENOMEM; 2129 goto done; 2130 } 2131 2132 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2133 2134 /* Speed - If default override pending, use curr_user_phy_cfg set in 2135 * ice_init_phy_user_cfg_ldo. 2136 */ 2137 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2138 vsi->back->state)) { 2139 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2140 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2141 } else { 2142 u64 phy_low = 0, phy_high = 0; 2143 2144 ice_update_phy_type(&phy_low, &phy_high, 2145 pi->phy.curr_user_speed_req); 2146 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2147 cfg->phy_type_high = pcaps->phy_type_high & 2148 cpu_to_le64(phy_high); 2149 } 2150 2151 /* Can't provide what was requested; use PHY capabilities */ 2152 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2153 cfg->phy_type_low = pcaps->phy_type_low; 2154 cfg->phy_type_high = pcaps->phy_type_high; 2155 } 2156 2157 /* FEC */ 2158 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2159 2160 /* Can't provide what was requested; use PHY capabilities */ 2161 if (cfg->link_fec_opt != 2162 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2163 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2164 cfg->link_fec_opt = pcaps->link_fec_options; 2165 } 2166 2167 /* Flow Control - always supported; no need to check against 2168 * capabilities 2169 */ 2170 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2171 2172 /* Enable link and link update */ 2173 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2174 2175 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2176 if (err) 2177 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2178 vsi->vsi_num, err); 2179 2180 kfree(cfg); 2181 done: 2182 kfree(pcaps); 2183 return err; 2184 } 2185 2186 /** 2187 * ice_check_media_subtask - Check for media 2188 * @pf: pointer to PF struct 2189 * 2190 * If media is available, then initialize PHY user configuration if it is not 2191 * been, and configure the PHY if the interface is up. 2192 */ 2193 static void ice_check_media_subtask(struct ice_pf *pf) 2194 { 2195 struct ice_port_info *pi; 2196 struct ice_vsi *vsi; 2197 int err; 2198 2199 /* No need to check for media if it's already present */ 2200 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2201 return; 2202 2203 vsi = ice_get_main_vsi(pf); 2204 if (!vsi) 2205 return; 2206 2207 /* Refresh link info and check if media is present */ 2208 pi = vsi->port_info; 2209 err = ice_update_link_info(pi); 2210 if (err) 2211 return; 2212 2213 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2214 2215 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2216 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2217 ice_init_phy_user_cfg(pi); 2218 2219 /* PHY settings are reset on media insertion, reconfigure 2220 * PHY to preserve settings. 2221 */ 2222 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2223 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2224 return; 2225 2226 err = ice_configure_phy(vsi); 2227 if (!err) 2228 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2229 2230 /* A Link Status Event will be generated; the event handler 2231 * will complete bringing the interface up 2232 */ 2233 } 2234 } 2235 2236 /** 2237 * ice_service_task - manage and run subtasks 2238 * @work: pointer to work_struct contained by the PF struct 2239 */ 2240 static void ice_service_task(struct work_struct *work) 2241 { 2242 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2243 unsigned long start_time = jiffies; 2244 2245 /* subtasks */ 2246 2247 /* process reset requests first */ 2248 ice_reset_subtask(pf); 2249 2250 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2251 if (ice_is_reset_in_progress(pf->state) || 2252 test_bit(ICE_SUSPENDED, pf->state) || 2253 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2254 ice_service_task_complete(pf); 2255 return; 2256 } 2257 2258 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { 2259 /* Plug aux device per request */ 2260 ice_plug_aux_dev(pf); 2261 2262 /* Mark plugging as done but check whether unplug was 2263 * requested during ice_plug_aux_dev() call 2264 * (e.g. from ice_clear_rdma_cap()) and if so then 2265 * plug aux device. 2266 */ 2267 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2268 ice_unplug_aux_dev(pf); 2269 } 2270 2271 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2272 struct iidc_event *event; 2273 2274 event = kzalloc(sizeof(*event), GFP_KERNEL); 2275 if (event) { 2276 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 2277 ice_send_event_to_aux(pf, event); 2278 kfree(event); 2279 } 2280 } 2281 2282 ice_clean_adminq_subtask(pf); 2283 ice_check_media_subtask(pf); 2284 ice_check_for_hang_subtask(pf); 2285 ice_sync_fltr_subtask(pf); 2286 ice_handle_mdd_event(pf); 2287 ice_watchdog_subtask(pf); 2288 2289 if (ice_is_safe_mode(pf)) { 2290 ice_service_task_complete(pf); 2291 return; 2292 } 2293 2294 ice_process_vflr_event(pf); 2295 ice_clean_mailboxq_subtask(pf); 2296 ice_clean_sbq_subtask(pf); 2297 ice_sync_arfs_fltrs(pf); 2298 ice_flush_fdir_ctx(pf); 2299 2300 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2301 ice_service_task_complete(pf); 2302 2303 /* If the tasks have taken longer than one service timer period 2304 * or there is more work to be done, reset the service timer to 2305 * schedule the service task now. 2306 */ 2307 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2308 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2309 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2310 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2311 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2312 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2313 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2314 mod_timer(&pf->serv_tmr, jiffies); 2315 } 2316 2317 /** 2318 * ice_set_ctrlq_len - helper function to set controlq length 2319 * @hw: pointer to the HW instance 2320 */ 2321 static void ice_set_ctrlq_len(struct ice_hw *hw) 2322 { 2323 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2324 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2325 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2326 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2327 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2328 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2329 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2330 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2331 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2332 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2333 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2334 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2335 } 2336 2337 /** 2338 * ice_schedule_reset - schedule a reset 2339 * @pf: board private structure 2340 * @reset: reset being requested 2341 */ 2342 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2343 { 2344 struct device *dev = ice_pf_to_dev(pf); 2345 2346 /* bail out if earlier reset has failed */ 2347 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2348 dev_dbg(dev, "earlier reset has failed\n"); 2349 return -EIO; 2350 } 2351 /* bail if reset/recovery already in progress */ 2352 if (ice_is_reset_in_progress(pf->state)) { 2353 dev_dbg(dev, "Reset already in progress\n"); 2354 return -EBUSY; 2355 } 2356 2357 ice_unplug_aux_dev(pf); 2358 2359 switch (reset) { 2360 case ICE_RESET_PFR: 2361 set_bit(ICE_PFR_REQ, pf->state); 2362 break; 2363 case ICE_RESET_CORER: 2364 set_bit(ICE_CORER_REQ, pf->state); 2365 break; 2366 case ICE_RESET_GLOBR: 2367 set_bit(ICE_GLOBR_REQ, pf->state); 2368 break; 2369 default: 2370 return -EINVAL; 2371 } 2372 2373 ice_service_task_schedule(pf); 2374 return 0; 2375 } 2376 2377 /** 2378 * ice_irq_affinity_notify - Callback for affinity changes 2379 * @notify: context as to what irq was changed 2380 * @mask: the new affinity mask 2381 * 2382 * This is a callback function used by the irq_set_affinity_notifier function 2383 * so that we may register to receive changes to the irq affinity masks. 2384 */ 2385 static void 2386 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2387 const cpumask_t *mask) 2388 { 2389 struct ice_q_vector *q_vector = 2390 container_of(notify, struct ice_q_vector, affinity_notify); 2391 2392 cpumask_copy(&q_vector->affinity_mask, mask); 2393 } 2394 2395 /** 2396 * ice_irq_affinity_release - Callback for affinity notifier release 2397 * @ref: internal core kernel usage 2398 * 2399 * This is a callback function used by the irq_set_affinity_notifier function 2400 * to inform the current notification subscriber that they will no longer 2401 * receive notifications. 2402 */ 2403 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2404 2405 /** 2406 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2407 * @vsi: the VSI being configured 2408 */ 2409 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2410 { 2411 struct ice_hw *hw = &vsi->back->hw; 2412 int i; 2413 2414 ice_for_each_q_vector(vsi, i) 2415 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2416 2417 ice_flush(hw); 2418 return 0; 2419 } 2420 2421 /** 2422 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2423 * @vsi: the VSI being configured 2424 * @basename: name for the vector 2425 */ 2426 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2427 { 2428 int q_vectors = vsi->num_q_vectors; 2429 struct ice_pf *pf = vsi->back; 2430 int base = vsi->base_vector; 2431 struct device *dev; 2432 int rx_int_idx = 0; 2433 int tx_int_idx = 0; 2434 int vector, err; 2435 int irq_num; 2436 2437 dev = ice_pf_to_dev(pf); 2438 for (vector = 0; vector < q_vectors; vector++) { 2439 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2440 2441 irq_num = pf->msix_entries[base + vector].vector; 2442 2443 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2444 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2445 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2446 tx_int_idx++; 2447 } else if (q_vector->rx.rx_ring) { 2448 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2449 "%s-%s-%d", basename, "rx", rx_int_idx++); 2450 } else if (q_vector->tx.tx_ring) { 2451 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2452 "%s-%s-%d", basename, "tx", tx_int_idx++); 2453 } else { 2454 /* skip this unused q_vector */ 2455 continue; 2456 } 2457 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) 2458 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2459 IRQF_SHARED, q_vector->name, 2460 q_vector); 2461 else 2462 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2463 0, q_vector->name, q_vector); 2464 if (err) { 2465 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2466 err); 2467 goto free_q_irqs; 2468 } 2469 2470 /* register for affinity change notifications */ 2471 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2472 struct irq_affinity_notify *affinity_notify; 2473 2474 affinity_notify = &q_vector->affinity_notify; 2475 affinity_notify->notify = ice_irq_affinity_notify; 2476 affinity_notify->release = ice_irq_affinity_release; 2477 irq_set_affinity_notifier(irq_num, affinity_notify); 2478 } 2479 2480 /* assign the mask for this irq */ 2481 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2482 } 2483 2484 vsi->irqs_ready = true; 2485 return 0; 2486 2487 free_q_irqs: 2488 while (vector) { 2489 vector--; 2490 irq_num = pf->msix_entries[base + vector].vector; 2491 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2492 irq_set_affinity_notifier(irq_num, NULL); 2493 irq_set_affinity_hint(irq_num, NULL); 2494 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2495 } 2496 return err; 2497 } 2498 2499 /** 2500 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2501 * @vsi: VSI to setup Tx rings used by XDP 2502 * 2503 * Return 0 on success and negative value on error 2504 */ 2505 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2506 { 2507 struct device *dev = ice_pf_to_dev(vsi->back); 2508 struct ice_tx_desc *tx_desc; 2509 int i, j; 2510 2511 ice_for_each_xdp_txq(vsi, i) { 2512 u16 xdp_q_idx = vsi->alloc_txq + i; 2513 struct ice_tx_ring *xdp_ring; 2514 2515 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2516 2517 if (!xdp_ring) 2518 goto free_xdp_rings; 2519 2520 xdp_ring->q_index = xdp_q_idx; 2521 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2522 xdp_ring->vsi = vsi; 2523 xdp_ring->netdev = NULL; 2524 xdp_ring->next_dd = ICE_TX_THRESH - 1; 2525 xdp_ring->next_rs = ICE_TX_THRESH - 1; 2526 xdp_ring->dev = dev; 2527 xdp_ring->count = vsi->num_tx_desc; 2528 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2529 if (ice_setup_tx_ring(xdp_ring)) 2530 goto free_xdp_rings; 2531 ice_set_ring_xdp(xdp_ring); 2532 xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); 2533 spin_lock_init(&xdp_ring->tx_lock); 2534 for (j = 0; j < xdp_ring->count; j++) { 2535 tx_desc = ICE_TX_DESC(xdp_ring, j); 2536 tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); 2537 } 2538 } 2539 2540 ice_for_each_rxq(vsi, i) { 2541 if (static_key_enabled(&ice_xdp_locking_key)) 2542 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; 2543 else 2544 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i]; 2545 } 2546 2547 return 0; 2548 2549 free_xdp_rings: 2550 for (; i >= 0; i--) 2551 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 2552 ice_free_tx_ring(vsi->xdp_rings[i]); 2553 return -ENOMEM; 2554 } 2555 2556 /** 2557 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2558 * @vsi: VSI to set the bpf prog on 2559 * @prog: the bpf prog pointer 2560 */ 2561 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2562 { 2563 struct bpf_prog *old_prog; 2564 int i; 2565 2566 old_prog = xchg(&vsi->xdp_prog, prog); 2567 if (old_prog) 2568 bpf_prog_put(old_prog); 2569 2570 ice_for_each_rxq(vsi, i) 2571 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2572 } 2573 2574 /** 2575 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2576 * @vsi: VSI to bring up Tx rings used by XDP 2577 * @prog: bpf program that will be assigned to VSI 2578 * 2579 * Return 0 on success and negative value on error 2580 */ 2581 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2582 { 2583 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2584 int xdp_rings_rem = vsi->num_xdp_txq; 2585 struct ice_pf *pf = vsi->back; 2586 struct ice_qs_cfg xdp_qs_cfg = { 2587 .qs_mutex = &pf->avail_q_mutex, 2588 .pf_map = pf->avail_txqs, 2589 .pf_map_size = pf->max_pf_txqs, 2590 .q_count = vsi->num_xdp_txq, 2591 .scatter_count = ICE_MAX_SCATTER_TXQS, 2592 .vsi_map = vsi->txq_map, 2593 .vsi_map_offset = vsi->alloc_txq, 2594 .mapping_mode = ICE_VSI_MAP_CONTIG 2595 }; 2596 struct device *dev; 2597 int i, v_idx; 2598 int status; 2599 2600 dev = ice_pf_to_dev(pf); 2601 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2602 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2603 if (!vsi->xdp_rings) 2604 return -ENOMEM; 2605 2606 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2607 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2608 goto err_map_xdp; 2609 2610 if (static_key_enabled(&ice_xdp_locking_key)) 2611 netdev_warn(vsi->netdev, 2612 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 2613 2614 if (ice_xdp_alloc_setup_rings(vsi)) 2615 goto clear_xdp_rings; 2616 2617 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2618 ice_for_each_q_vector(vsi, v_idx) { 2619 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2620 int xdp_rings_per_v, q_id, q_base; 2621 2622 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2623 vsi->num_q_vectors - v_idx); 2624 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2625 2626 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2627 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2628 2629 xdp_ring->q_vector = q_vector; 2630 xdp_ring->next = q_vector->tx.tx_ring; 2631 q_vector->tx.tx_ring = xdp_ring; 2632 } 2633 xdp_rings_rem -= xdp_rings_per_v; 2634 } 2635 2636 /* omit the scheduler update if in reset path; XDP queues will be 2637 * taken into account at the end of ice_vsi_rebuild, where 2638 * ice_cfg_vsi_lan is being called 2639 */ 2640 if (ice_is_reset_in_progress(pf->state)) 2641 return 0; 2642 2643 /* tell the Tx scheduler that right now we have 2644 * additional queues 2645 */ 2646 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2647 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2648 2649 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2650 max_txqs); 2651 if (status) { 2652 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2653 status); 2654 goto clear_xdp_rings; 2655 } 2656 2657 /* assign the prog only when it's not already present on VSI; 2658 * this flow is a subject of both ethtool -L and ndo_bpf flows; 2659 * VSI rebuild that happens under ethtool -L can expose us to 2660 * the bpf_prog refcount issues as we would be swapping same 2661 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2662 * on it as it would be treated as an 'old_prog'; for ndo_bpf 2663 * this is not harmful as dev_xdp_install bumps the refcount 2664 * before calling the op exposed by the driver; 2665 */ 2666 if (!ice_is_xdp_ena_vsi(vsi)) 2667 ice_vsi_assign_bpf_prog(vsi, prog); 2668 2669 return 0; 2670 clear_xdp_rings: 2671 ice_for_each_xdp_txq(vsi, i) 2672 if (vsi->xdp_rings[i]) { 2673 kfree_rcu(vsi->xdp_rings[i], rcu); 2674 vsi->xdp_rings[i] = NULL; 2675 } 2676 2677 err_map_xdp: 2678 mutex_lock(&pf->avail_q_mutex); 2679 ice_for_each_xdp_txq(vsi, i) { 2680 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2681 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2682 } 2683 mutex_unlock(&pf->avail_q_mutex); 2684 2685 devm_kfree(dev, vsi->xdp_rings); 2686 return -ENOMEM; 2687 } 2688 2689 /** 2690 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2691 * @vsi: VSI to remove XDP rings 2692 * 2693 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2694 * resources 2695 */ 2696 int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2697 { 2698 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2699 struct ice_pf *pf = vsi->back; 2700 int i, v_idx; 2701 2702 /* q_vectors are freed in reset path so there's no point in detaching 2703 * rings; in case of rebuild being triggered not from reset bits 2704 * in pf->state won't be set, so additionally check first q_vector 2705 * against NULL 2706 */ 2707 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2708 goto free_qmap; 2709 2710 ice_for_each_q_vector(vsi, v_idx) { 2711 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2712 struct ice_tx_ring *ring; 2713 2714 ice_for_each_tx_ring(ring, q_vector->tx) 2715 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2716 break; 2717 2718 /* restore the value of last node prior to XDP setup */ 2719 q_vector->tx.tx_ring = ring; 2720 } 2721 2722 free_qmap: 2723 mutex_lock(&pf->avail_q_mutex); 2724 ice_for_each_xdp_txq(vsi, i) { 2725 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2726 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2727 } 2728 mutex_unlock(&pf->avail_q_mutex); 2729 2730 ice_for_each_xdp_txq(vsi, i) 2731 if (vsi->xdp_rings[i]) { 2732 if (vsi->xdp_rings[i]->desc) 2733 ice_free_tx_ring(vsi->xdp_rings[i]); 2734 kfree_rcu(vsi->xdp_rings[i], rcu); 2735 vsi->xdp_rings[i] = NULL; 2736 } 2737 2738 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2739 vsi->xdp_rings = NULL; 2740 2741 if (static_key_enabled(&ice_xdp_locking_key)) 2742 static_branch_dec(&ice_xdp_locking_key); 2743 2744 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2745 return 0; 2746 2747 ice_vsi_assign_bpf_prog(vsi, NULL); 2748 2749 /* notify Tx scheduler that we destroyed XDP queues and bring 2750 * back the old number of child nodes 2751 */ 2752 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2753 max_txqs[i] = vsi->num_txq; 2754 2755 /* change number of XDP Tx queues to 0 */ 2756 vsi->num_xdp_txq = 0; 2757 2758 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2759 max_txqs); 2760 } 2761 2762 /** 2763 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2764 * @vsi: VSI to schedule napi on 2765 */ 2766 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2767 { 2768 int i; 2769 2770 ice_for_each_rxq(vsi, i) { 2771 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2772 2773 if (rx_ring->xsk_pool) 2774 napi_schedule(&rx_ring->q_vector->napi); 2775 } 2776 } 2777 2778 /** 2779 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 2780 * @vsi: VSI to determine the count of XDP Tx qs 2781 * 2782 * returns 0 if Tx qs count is higher than at least half of CPU count, 2783 * -ENOMEM otherwise 2784 */ 2785 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 2786 { 2787 u16 avail = ice_get_avail_txq_count(vsi->back); 2788 u16 cpus = num_possible_cpus(); 2789 2790 if (avail < cpus / 2) 2791 return -ENOMEM; 2792 2793 vsi->num_xdp_txq = min_t(u16, avail, cpus); 2794 2795 if (vsi->num_xdp_txq < cpus) 2796 static_branch_inc(&ice_xdp_locking_key); 2797 2798 return 0; 2799 } 2800 2801 /** 2802 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2803 * @vsi: VSI to setup XDP for 2804 * @prog: XDP program 2805 * @extack: netlink extended ack 2806 */ 2807 static int 2808 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2809 struct netlink_ext_ack *extack) 2810 { 2811 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2812 bool if_running = netif_running(vsi->netdev); 2813 int ret = 0, xdp_ring_err = 0; 2814 2815 if (frame_size > vsi->rx_buf_len) { 2816 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 2817 return -EOPNOTSUPP; 2818 } 2819 2820 /* need to stop netdev while setting up the program for Rx rings */ 2821 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2822 ret = ice_down(vsi); 2823 if (ret) { 2824 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2825 return ret; 2826 } 2827 } 2828 2829 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 2830 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 2831 if (xdp_ring_err) { 2832 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 2833 } else { 2834 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2835 if (xdp_ring_err) 2836 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 2837 } 2838 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2839 xdp_ring_err = ice_destroy_xdp_rings(vsi); 2840 if (xdp_ring_err) 2841 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2842 } else { 2843 /* safe to call even when prog == vsi->xdp_prog as 2844 * dev_xdp_install in net/core/dev.c incremented prog's 2845 * refcount so corresponding bpf_prog_put won't cause 2846 * underflow 2847 */ 2848 ice_vsi_assign_bpf_prog(vsi, prog); 2849 } 2850 2851 if (if_running) 2852 ret = ice_up(vsi); 2853 2854 if (!ret && prog) 2855 ice_vsi_rx_napi_schedule(vsi); 2856 2857 return (ret || xdp_ring_err) ? -ENOMEM : 0; 2858 } 2859 2860 /** 2861 * ice_xdp_safe_mode - XDP handler for safe mode 2862 * @dev: netdevice 2863 * @xdp: XDP command 2864 */ 2865 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 2866 struct netdev_bpf *xdp) 2867 { 2868 NL_SET_ERR_MSG_MOD(xdp->extack, 2869 "Please provide working DDP firmware package in order to use XDP\n" 2870 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 2871 return -EOPNOTSUPP; 2872 } 2873 2874 /** 2875 * ice_xdp - implements XDP handler 2876 * @dev: netdevice 2877 * @xdp: XDP command 2878 */ 2879 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2880 { 2881 struct ice_netdev_priv *np = netdev_priv(dev); 2882 struct ice_vsi *vsi = np->vsi; 2883 2884 if (vsi->type != ICE_VSI_PF) { 2885 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 2886 return -EINVAL; 2887 } 2888 2889 switch (xdp->command) { 2890 case XDP_SETUP_PROG: 2891 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 2892 case XDP_SETUP_XSK_POOL: 2893 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 2894 xdp->xsk.queue_id); 2895 default: 2896 return -EINVAL; 2897 } 2898 } 2899 2900 /** 2901 * ice_ena_misc_vector - enable the non-queue interrupts 2902 * @pf: board private structure 2903 */ 2904 static void ice_ena_misc_vector(struct ice_pf *pf) 2905 { 2906 struct ice_hw *hw = &pf->hw; 2907 u32 val; 2908 2909 /* Disable anti-spoof detection interrupt to prevent spurious event 2910 * interrupts during a function reset. Anti-spoof functionally is 2911 * still supported. 2912 */ 2913 val = rd32(hw, GL_MDCK_TX_TDPU); 2914 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 2915 wr32(hw, GL_MDCK_TX_TDPU, val); 2916 2917 /* clear things first */ 2918 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2919 rd32(hw, PFINT_OICR); /* read to clear */ 2920 2921 val = (PFINT_OICR_ECC_ERR_M | 2922 PFINT_OICR_MAL_DETECT_M | 2923 PFINT_OICR_GRST_M | 2924 PFINT_OICR_PCI_EXCEPTION_M | 2925 PFINT_OICR_VFLR_M | 2926 PFINT_OICR_HMC_ERR_M | 2927 PFINT_OICR_PE_PUSH_M | 2928 PFINT_OICR_PE_CRITERR_M); 2929 2930 wr32(hw, PFINT_OICR_ENA, val); 2931 2932 /* SW_ITR_IDX = 0, but don't change INTENA */ 2933 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 2934 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 2935 } 2936 2937 /** 2938 * ice_misc_intr - misc interrupt handler 2939 * @irq: interrupt number 2940 * @data: pointer to a q_vector 2941 */ 2942 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 2943 { 2944 struct ice_pf *pf = (struct ice_pf *)data; 2945 struct ice_hw *hw = &pf->hw; 2946 irqreturn_t ret = IRQ_NONE; 2947 struct device *dev; 2948 u32 oicr, ena_mask; 2949 2950 dev = ice_pf_to_dev(pf); 2951 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2952 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 2953 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 2954 2955 oicr = rd32(hw, PFINT_OICR); 2956 ena_mask = rd32(hw, PFINT_OICR_ENA); 2957 2958 if (oicr & PFINT_OICR_SWINT_M) { 2959 ena_mask &= ~PFINT_OICR_SWINT_M; 2960 pf->sw_int_count++; 2961 } 2962 2963 if (oicr & PFINT_OICR_MAL_DETECT_M) { 2964 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 2965 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 2966 } 2967 if (oicr & PFINT_OICR_VFLR_M) { 2968 /* disable any further VFLR event notifications */ 2969 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 2970 u32 reg = rd32(hw, PFINT_OICR_ENA); 2971 2972 reg &= ~PFINT_OICR_VFLR_M; 2973 wr32(hw, PFINT_OICR_ENA, reg); 2974 } else { 2975 ena_mask &= ~PFINT_OICR_VFLR_M; 2976 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 2977 } 2978 } 2979 2980 if (oicr & PFINT_OICR_GRST_M) { 2981 u32 reset; 2982 2983 /* we have a reset warning */ 2984 ena_mask &= ~PFINT_OICR_GRST_M; 2985 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 2986 GLGEN_RSTAT_RESET_TYPE_S; 2987 2988 if (reset == ICE_RESET_CORER) 2989 pf->corer_count++; 2990 else if (reset == ICE_RESET_GLOBR) 2991 pf->globr_count++; 2992 else if (reset == ICE_RESET_EMPR) 2993 pf->empr_count++; 2994 else 2995 dev_dbg(dev, "Invalid reset type %d\n", reset); 2996 2997 /* If a reset cycle isn't already in progress, we set a bit in 2998 * pf->state so that the service task can start a reset/rebuild. 2999 */ 3000 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 3001 if (reset == ICE_RESET_CORER) 3002 set_bit(ICE_CORER_RECV, pf->state); 3003 else if (reset == ICE_RESET_GLOBR) 3004 set_bit(ICE_GLOBR_RECV, pf->state); 3005 else 3006 set_bit(ICE_EMPR_RECV, pf->state); 3007 3008 /* There are couple of different bits at play here. 3009 * hw->reset_ongoing indicates whether the hardware is 3010 * in reset. This is set to true when a reset interrupt 3011 * is received and set back to false after the driver 3012 * has determined that the hardware is out of reset. 3013 * 3014 * ICE_RESET_OICR_RECV in pf->state indicates 3015 * that a post reset rebuild is required before the 3016 * driver is operational again. This is set above. 3017 * 3018 * As this is the start of the reset/rebuild cycle, set 3019 * both to indicate that. 3020 */ 3021 hw->reset_ongoing = true; 3022 } 3023 } 3024 3025 if (oicr & PFINT_OICR_TSYN_TX_M) { 3026 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3027 ice_ptp_process_ts(pf); 3028 } 3029 3030 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3031 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3032 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3033 3034 /* Save EVENTs from GTSYN register */ 3035 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | 3036 GLTSYN_STAT_EVENT1_M | 3037 GLTSYN_STAT_EVENT2_M); 3038 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3039 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); 3040 } 3041 3042 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3043 if (oicr & ICE_AUX_CRIT_ERR) { 3044 struct iidc_event *event; 3045 3046 ena_mask &= ~ICE_AUX_CRIT_ERR; 3047 event = kzalloc(sizeof(*event), GFP_ATOMIC); 3048 if (event) { 3049 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 3050 /* report the entire OICR value to AUX driver */ 3051 event->reg = oicr; 3052 ice_send_event_to_aux(pf, event); 3053 kfree(event); 3054 } 3055 } 3056 3057 /* Report any remaining unexpected interrupts */ 3058 oicr &= ena_mask; 3059 if (oicr) { 3060 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3061 /* If a critical error is pending there is no choice but to 3062 * reset the device. 3063 */ 3064 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 3065 PFINT_OICR_ECC_ERR_M)) { 3066 set_bit(ICE_PFR_REQ, pf->state); 3067 ice_service_task_schedule(pf); 3068 } 3069 } 3070 ret = IRQ_HANDLED; 3071 3072 ice_service_task_schedule(pf); 3073 ice_irq_dynamic_ena(hw, NULL, NULL); 3074 3075 return ret; 3076 } 3077 3078 /** 3079 * ice_dis_ctrlq_interrupts - disable control queue interrupts 3080 * @hw: pointer to HW structure 3081 */ 3082 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 3083 { 3084 /* disable Admin queue Interrupt causes */ 3085 wr32(hw, PFINT_FW_CTL, 3086 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 3087 3088 /* disable Mailbox queue Interrupt causes */ 3089 wr32(hw, PFINT_MBX_CTL, 3090 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 3091 3092 wr32(hw, PFINT_SB_CTL, 3093 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 3094 3095 /* disable Control queue Interrupt causes */ 3096 wr32(hw, PFINT_OICR_CTL, 3097 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 3098 3099 ice_flush(hw); 3100 } 3101 3102 /** 3103 * ice_free_irq_msix_misc - Unroll misc vector setup 3104 * @pf: board private structure 3105 */ 3106 static void ice_free_irq_msix_misc(struct ice_pf *pf) 3107 { 3108 struct ice_hw *hw = &pf->hw; 3109 3110 ice_dis_ctrlq_interrupts(hw); 3111 3112 /* disable OICR interrupt */ 3113 wr32(hw, PFINT_OICR_ENA, 0); 3114 ice_flush(hw); 3115 3116 if (pf->msix_entries) { 3117 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 3118 devm_free_irq(ice_pf_to_dev(pf), 3119 pf->msix_entries[pf->oicr_idx].vector, pf); 3120 } 3121 3122 pf->num_avail_sw_msix += 1; 3123 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 3124 } 3125 3126 /** 3127 * ice_ena_ctrlq_interrupts - enable control queue interrupts 3128 * @hw: pointer to HW structure 3129 * @reg_idx: HW vector index to associate the control queue interrupts with 3130 */ 3131 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 3132 { 3133 u32 val; 3134 3135 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 3136 PFINT_OICR_CTL_CAUSE_ENA_M); 3137 wr32(hw, PFINT_OICR_CTL, val); 3138 3139 /* enable Admin queue Interrupt causes */ 3140 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 3141 PFINT_FW_CTL_CAUSE_ENA_M); 3142 wr32(hw, PFINT_FW_CTL, val); 3143 3144 /* enable Mailbox queue Interrupt causes */ 3145 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 3146 PFINT_MBX_CTL_CAUSE_ENA_M); 3147 wr32(hw, PFINT_MBX_CTL, val); 3148 3149 /* This enables Sideband queue Interrupt causes */ 3150 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 3151 PFINT_SB_CTL_CAUSE_ENA_M); 3152 wr32(hw, PFINT_SB_CTL, val); 3153 3154 ice_flush(hw); 3155 } 3156 3157 /** 3158 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3159 * @pf: board private structure 3160 * 3161 * This sets up the handler for MSIX 0, which is used to manage the 3162 * non-queue interrupts, e.g. AdminQ and errors. This is not used 3163 * when in MSI or Legacy interrupt mode. 3164 */ 3165 static int ice_req_irq_msix_misc(struct ice_pf *pf) 3166 { 3167 struct device *dev = ice_pf_to_dev(pf); 3168 struct ice_hw *hw = &pf->hw; 3169 int oicr_idx, err = 0; 3170 3171 if (!pf->int_name[0]) 3172 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 3173 dev_driver_string(dev), dev_name(dev)); 3174 3175 /* Do not request IRQ but do enable OICR interrupt since settings are 3176 * lost during reset. Note that this function is called only during 3177 * rebuild path and not while reset is in progress. 3178 */ 3179 if (ice_is_reset_in_progress(pf->state)) 3180 goto skip_req_irq; 3181 3182 /* reserve one vector in irq_tracker for misc interrupts */ 3183 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3184 if (oicr_idx < 0) 3185 return oicr_idx; 3186 3187 pf->num_avail_sw_msix -= 1; 3188 pf->oicr_idx = (u16)oicr_idx; 3189 3190 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, 3191 ice_misc_intr, 0, pf->int_name, pf); 3192 if (err) { 3193 dev_err(dev, "devm_request_irq for %s failed: %d\n", 3194 pf->int_name, err); 3195 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3196 pf->num_avail_sw_msix += 1; 3197 return err; 3198 } 3199 3200 skip_req_irq: 3201 ice_ena_misc_vector(pf); 3202 3203 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 3204 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 3205 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3206 3207 ice_flush(hw); 3208 ice_irq_dynamic_ena(hw, NULL, NULL); 3209 3210 return 0; 3211 } 3212 3213 /** 3214 * ice_napi_add - register NAPI handler for the VSI 3215 * @vsi: VSI for which NAPI handler is to be registered 3216 * 3217 * This function is only called in the driver's load path. Registering the NAPI 3218 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 3219 * reset/rebuild, etc.) 3220 */ 3221 static void ice_napi_add(struct ice_vsi *vsi) 3222 { 3223 int v_idx; 3224 3225 if (!vsi->netdev) 3226 return; 3227 3228 ice_for_each_q_vector(vsi, v_idx) 3229 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 3230 ice_napi_poll, NAPI_POLL_WEIGHT); 3231 } 3232 3233 /** 3234 * ice_set_ops - set netdev and ethtools ops for the given netdev 3235 * @netdev: netdev instance 3236 */ 3237 static void ice_set_ops(struct net_device *netdev) 3238 { 3239 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3240 3241 if (ice_is_safe_mode(pf)) { 3242 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3243 ice_set_ethtool_safe_mode_ops(netdev); 3244 return; 3245 } 3246 3247 netdev->netdev_ops = &ice_netdev_ops; 3248 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3249 ice_set_ethtool_ops(netdev); 3250 } 3251 3252 /** 3253 * ice_set_netdev_features - set features for the given netdev 3254 * @netdev: netdev instance 3255 */ 3256 static void ice_set_netdev_features(struct net_device *netdev) 3257 { 3258 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3259 netdev_features_t csumo_features; 3260 netdev_features_t vlano_features; 3261 netdev_features_t dflt_features; 3262 netdev_features_t tso_features; 3263 3264 if (ice_is_safe_mode(pf)) { 3265 /* safe mode */ 3266 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3267 netdev->hw_features = netdev->features; 3268 return; 3269 } 3270 3271 dflt_features = NETIF_F_SG | 3272 NETIF_F_HIGHDMA | 3273 NETIF_F_NTUPLE | 3274 NETIF_F_RXHASH; 3275 3276 csumo_features = NETIF_F_RXCSUM | 3277 NETIF_F_IP_CSUM | 3278 NETIF_F_SCTP_CRC | 3279 NETIF_F_IPV6_CSUM; 3280 3281 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3282 NETIF_F_HW_VLAN_CTAG_TX | 3283 NETIF_F_HW_VLAN_CTAG_RX; 3284 3285 tso_features = NETIF_F_TSO | 3286 NETIF_F_TSO_ECN | 3287 NETIF_F_TSO6 | 3288 NETIF_F_GSO_GRE | 3289 NETIF_F_GSO_UDP_TUNNEL | 3290 NETIF_F_GSO_GRE_CSUM | 3291 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3292 NETIF_F_GSO_PARTIAL | 3293 NETIF_F_GSO_IPXIP4 | 3294 NETIF_F_GSO_IPXIP6 | 3295 NETIF_F_GSO_UDP_L4; 3296 3297 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3298 NETIF_F_GSO_GRE_CSUM; 3299 /* set features that user can change */ 3300 netdev->hw_features = dflt_features | csumo_features | 3301 vlano_features | tso_features; 3302 3303 /* add support for HW_CSUM on packets with MPLS header */ 3304 netdev->mpls_features = NETIF_F_HW_CSUM; 3305 3306 /* enable features */ 3307 netdev->features |= netdev->hw_features; 3308 3309 netdev->hw_features |= NETIF_F_HW_TC; 3310 3311 /* encap and VLAN devices inherit default, csumo and tso features */ 3312 netdev->hw_enc_features |= dflt_features | csumo_features | 3313 tso_features; 3314 netdev->vlan_features |= dflt_features | csumo_features | 3315 tso_features; 3316 } 3317 3318 /** 3319 * ice_cfg_netdev - Allocate, configure and register a netdev 3320 * @vsi: the VSI associated with the new netdev 3321 * 3322 * Returns 0 on success, negative value on failure 3323 */ 3324 static int ice_cfg_netdev(struct ice_vsi *vsi) 3325 { 3326 struct ice_netdev_priv *np; 3327 struct net_device *netdev; 3328 u8 mac_addr[ETH_ALEN]; 3329 3330 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 3331 vsi->alloc_rxq); 3332 if (!netdev) 3333 return -ENOMEM; 3334 3335 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3336 vsi->netdev = netdev; 3337 np = netdev_priv(netdev); 3338 np->vsi = vsi; 3339 3340 ice_set_netdev_features(netdev); 3341 3342 ice_set_ops(netdev); 3343 3344 if (vsi->type == ICE_VSI_PF) { 3345 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 3346 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3347 eth_hw_addr_set(netdev, mac_addr); 3348 ether_addr_copy(netdev->perm_addr, mac_addr); 3349 } 3350 3351 netdev->priv_flags |= IFF_UNICAST_FLT; 3352 3353 /* Setup netdev TC information */ 3354 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 3355 3356 /* setup watchdog timeout value to be 5 second */ 3357 netdev->watchdog_timeo = 5 * HZ; 3358 3359 netdev->min_mtu = ETH_MIN_MTU; 3360 netdev->max_mtu = ICE_MAX_MTU; 3361 3362 return 0; 3363 } 3364 3365 /** 3366 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3367 * @lut: Lookup table 3368 * @rss_table_size: Lookup table size 3369 * @rss_size: Range of queue number for hashing 3370 */ 3371 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3372 { 3373 u16 i; 3374 3375 for (i = 0; i < rss_table_size; i++) 3376 lut[i] = i % rss_size; 3377 } 3378 3379 /** 3380 * ice_pf_vsi_setup - Set up a PF VSI 3381 * @pf: board private structure 3382 * @pi: pointer to the port_info instance 3383 * 3384 * Returns pointer to the successfully allocated VSI software struct 3385 * on success, otherwise returns NULL on failure. 3386 */ 3387 static struct ice_vsi * 3388 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3389 { 3390 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL); 3391 } 3392 3393 static struct ice_vsi * 3394 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3395 struct ice_channel *ch) 3396 { 3397 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch); 3398 } 3399 3400 /** 3401 * ice_ctrl_vsi_setup - Set up a control VSI 3402 * @pf: board private structure 3403 * @pi: pointer to the port_info instance 3404 * 3405 * Returns pointer to the successfully allocated VSI software struct 3406 * on success, otherwise returns NULL on failure. 3407 */ 3408 static struct ice_vsi * 3409 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3410 { 3411 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL); 3412 } 3413 3414 /** 3415 * ice_lb_vsi_setup - Set up a loopback VSI 3416 * @pf: board private structure 3417 * @pi: pointer to the port_info instance 3418 * 3419 * Returns pointer to the successfully allocated VSI software struct 3420 * on success, otherwise returns NULL on failure. 3421 */ 3422 struct ice_vsi * 3423 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3424 { 3425 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL); 3426 } 3427 3428 /** 3429 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3430 * @netdev: network interface to be adjusted 3431 * @proto: unused protocol 3432 * @vid: VLAN ID to be added 3433 * 3434 * net_device_ops implementation for adding VLAN IDs 3435 */ 3436 static int 3437 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, 3438 u16 vid) 3439 { 3440 struct ice_netdev_priv *np = netdev_priv(netdev); 3441 struct ice_vsi *vsi = np->vsi; 3442 int ret; 3443 3444 /* VLAN 0 is added by default during load/reset */ 3445 if (!vid) 3446 return 0; 3447 3448 /* Enable VLAN pruning when a VLAN other than 0 is added */ 3449 if (!ice_vsi_is_vlan_pruning_ena(vsi)) { 3450 ret = ice_cfg_vlan_pruning(vsi, true); 3451 if (ret) 3452 return ret; 3453 } 3454 3455 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3456 * packets aren't pruned by the device's internal switch on Rx 3457 */ 3458 ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); 3459 if (!ret) 3460 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3461 3462 return ret; 3463 } 3464 3465 /** 3466 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3467 * @netdev: network interface to be adjusted 3468 * @proto: unused protocol 3469 * @vid: VLAN ID to be removed 3470 * 3471 * net_device_ops implementation for removing VLAN IDs 3472 */ 3473 static int 3474 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, 3475 u16 vid) 3476 { 3477 struct ice_netdev_priv *np = netdev_priv(netdev); 3478 struct ice_vsi *vsi = np->vsi; 3479 int ret; 3480 3481 /* don't allow removal of VLAN 0 */ 3482 if (!vid) 3483 return 0; 3484 3485 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 3486 * information 3487 */ 3488 ret = ice_vsi_kill_vlan(vsi, vid); 3489 if (ret) 3490 return ret; 3491 3492 /* Disable pruning when VLAN 0 is the only VLAN rule */ 3493 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) 3494 ret = ice_cfg_vlan_pruning(vsi, false); 3495 3496 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3497 return ret; 3498 } 3499 3500 /** 3501 * ice_rep_indr_tc_block_unbind 3502 * @cb_priv: indirection block private data 3503 */ 3504 static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3505 { 3506 struct ice_indr_block_priv *indr_priv = cb_priv; 3507 3508 list_del(&indr_priv->list); 3509 kfree(indr_priv); 3510 } 3511 3512 /** 3513 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3514 * @vsi: VSI struct which has the netdev 3515 */ 3516 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3517 { 3518 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3519 3520 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3521 ice_rep_indr_tc_block_unbind); 3522 } 3523 3524 /** 3525 * ice_tc_indir_block_remove - clean indirect TC block notifications 3526 * @pf: PF structure 3527 */ 3528 static void ice_tc_indir_block_remove(struct ice_pf *pf) 3529 { 3530 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 3531 3532 if (!pf_vsi) 3533 return; 3534 3535 ice_tc_indir_block_unregister(pf_vsi); 3536 } 3537 3538 /** 3539 * ice_tc_indir_block_register - Register TC indirect block notifications 3540 * @vsi: VSI struct which has the netdev 3541 * 3542 * Returns 0 on success, negative value on failure 3543 */ 3544 static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3545 { 3546 struct ice_netdev_priv *np; 3547 3548 if (!vsi || !vsi->netdev) 3549 return -EINVAL; 3550 3551 np = netdev_priv(vsi->netdev); 3552 3553 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3554 return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3555 } 3556 3557 /** 3558 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 3559 * @pf: board private structure 3560 * 3561 * Returns 0 on success, negative value on failure 3562 */ 3563 static int ice_setup_pf_sw(struct ice_pf *pf) 3564 { 3565 struct device *dev = ice_pf_to_dev(pf); 3566 struct ice_vsi *vsi; 3567 int status; 3568 3569 if (ice_is_reset_in_progress(pf->state)) 3570 return -EBUSY; 3571 3572 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 3573 if (!vsi) 3574 return -ENOMEM; 3575 3576 /* init channel list */ 3577 INIT_LIST_HEAD(&vsi->ch_list); 3578 3579 status = ice_cfg_netdev(vsi); 3580 if (status) 3581 goto unroll_vsi_setup; 3582 /* netdev has to be configured before setting frame size */ 3583 ice_vsi_cfg_frame_size(vsi); 3584 3585 /* init indirect block notifications */ 3586 status = ice_tc_indir_block_register(vsi); 3587 if (status) { 3588 dev_err(dev, "Failed to register netdev notifier\n"); 3589 goto unroll_cfg_netdev; 3590 } 3591 3592 /* Setup DCB netlink interface */ 3593 ice_dcbnl_setup(vsi); 3594 3595 /* registering the NAPI handler requires both the queues and 3596 * netdev to be created, which are done in ice_pf_vsi_setup() 3597 * and ice_cfg_netdev() respectively 3598 */ 3599 ice_napi_add(vsi); 3600 3601 status = ice_set_cpu_rx_rmap(vsi); 3602 if (status) { 3603 dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", 3604 vsi->vsi_num, status); 3605 goto unroll_napi_add; 3606 } 3607 status = ice_init_mac_fltr(pf); 3608 if (status) 3609 goto free_cpu_rx_map; 3610 3611 return 0; 3612 3613 free_cpu_rx_map: 3614 ice_free_cpu_rx_rmap(vsi); 3615 unroll_napi_add: 3616 ice_tc_indir_block_unregister(vsi); 3617 unroll_cfg_netdev: 3618 if (vsi) { 3619 ice_napi_del(vsi); 3620 if (vsi->netdev) { 3621 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3622 free_netdev(vsi->netdev); 3623 vsi->netdev = NULL; 3624 } 3625 } 3626 3627 unroll_vsi_setup: 3628 ice_vsi_release(vsi); 3629 return status; 3630 } 3631 3632 /** 3633 * ice_get_avail_q_count - Get count of queues in use 3634 * @pf_qmap: bitmap to get queue use count from 3635 * @lock: pointer to a mutex that protects access to pf_qmap 3636 * @size: size of the bitmap 3637 */ 3638 static u16 3639 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3640 { 3641 unsigned long bit; 3642 u16 count = 0; 3643 3644 mutex_lock(lock); 3645 for_each_clear_bit(bit, pf_qmap, size) 3646 count++; 3647 mutex_unlock(lock); 3648 3649 return count; 3650 } 3651 3652 /** 3653 * ice_get_avail_txq_count - Get count of Tx queues in use 3654 * @pf: pointer to an ice_pf instance 3655 */ 3656 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3657 { 3658 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3659 pf->max_pf_txqs); 3660 } 3661 3662 /** 3663 * ice_get_avail_rxq_count - Get count of Rx queues in use 3664 * @pf: pointer to an ice_pf instance 3665 */ 3666 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3667 { 3668 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3669 pf->max_pf_rxqs); 3670 } 3671 3672 /** 3673 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3674 * @pf: board private structure to initialize 3675 */ 3676 static void ice_deinit_pf(struct ice_pf *pf) 3677 { 3678 ice_service_task_stop(pf); 3679 mutex_destroy(&pf->sw_mutex); 3680 mutex_destroy(&pf->tc_mutex); 3681 mutex_destroy(&pf->avail_q_mutex); 3682 3683 if (pf->avail_txqs) { 3684 bitmap_free(pf->avail_txqs); 3685 pf->avail_txqs = NULL; 3686 } 3687 3688 if (pf->avail_rxqs) { 3689 bitmap_free(pf->avail_rxqs); 3690 pf->avail_rxqs = NULL; 3691 } 3692 3693 if (pf->ptp.clock) 3694 ptp_clock_unregister(pf->ptp.clock); 3695 } 3696 3697 /** 3698 * ice_set_pf_caps - set PFs capability flags 3699 * @pf: pointer to the PF instance 3700 */ 3701 static void ice_set_pf_caps(struct ice_pf *pf) 3702 { 3703 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3704 3705 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3706 clear_bit(ICE_FLAG_AUX_ENA, pf->flags); 3707 if (func_caps->common_cap.rdma) { 3708 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3709 set_bit(ICE_FLAG_AUX_ENA, pf->flags); 3710 } 3711 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3712 if (func_caps->common_cap.dcb) 3713 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3714 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3715 if (func_caps->common_cap.sr_iov_1_1) { 3716 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3717 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, 3718 ICE_MAX_VF_COUNT); 3719 } 3720 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3721 if (func_caps->common_cap.rss_table_size) 3722 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3723 3724 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3725 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3726 u16 unused; 3727 3728 /* ctrl_vsi_idx will be set to a valid value when flow director 3729 * is setup by ice_init_fdir 3730 */ 3731 pf->ctrl_vsi_idx = ICE_NO_VSI; 3732 set_bit(ICE_FLAG_FD_ENA, pf->flags); 3733 /* force guaranteed filter pool for PF */ 3734 ice_alloc_fd_guar_item(&pf->hw, &unused, 3735 func_caps->fd_fltr_guar); 3736 /* force shared filter pool for PF */ 3737 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3738 func_caps->fd_fltr_best_effort); 3739 } 3740 3741 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3742 if (func_caps->common_cap.ieee_1588) 3743 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3744 3745 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3746 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3747 } 3748 3749 /** 3750 * ice_init_pf - Initialize general software structures (struct ice_pf) 3751 * @pf: board private structure to initialize 3752 */ 3753 static int ice_init_pf(struct ice_pf *pf) 3754 { 3755 ice_set_pf_caps(pf); 3756 3757 mutex_init(&pf->sw_mutex); 3758 mutex_init(&pf->tc_mutex); 3759 3760 INIT_HLIST_HEAD(&pf->aq_wait_list); 3761 spin_lock_init(&pf->aq_wait_lock); 3762 init_waitqueue_head(&pf->aq_wait_queue); 3763 3764 init_waitqueue_head(&pf->reset_wait_queue); 3765 3766 /* setup service timer and periodic service task */ 3767 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3768 pf->serv_tmr_period = HZ; 3769 INIT_WORK(&pf->serv_task, ice_service_task); 3770 clear_bit(ICE_SERVICE_SCHED, pf->state); 3771 3772 mutex_init(&pf->avail_q_mutex); 3773 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 3774 if (!pf->avail_txqs) 3775 return -ENOMEM; 3776 3777 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 3778 if (!pf->avail_rxqs) { 3779 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); 3780 pf->avail_txqs = NULL; 3781 return -ENOMEM; 3782 } 3783 3784 return 0; 3785 } 3786 3787 /** 3788 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 3789 * @pf: board private structure 3790 * 3791 * compute the number of MSIX vectors required (v_budget) and request from 3792 * the OS. Return the number of vectors reserved or negative on failure 3793 */ 3794 static int ice_ena_msix_range(struct ice_pf *pf) 3795 { 3796 int num_cpus, v_left, v_actual, v_other, v_budget = 0; 3797 struct device *dev = ice_pf_to_dev(pf); 3798 int needed, err, i; 3799 3800 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 3801 num_cpus = num_online_cpus(); 3802 3803 /* reserve for LAN miscellaneous handler */ 3804 needed = ICE_MIN_LAN_OICR_MSIX; 3805 if (v_left < needed) 3806 goto no_hw_vecs_left_err; 3807 v_budget += needed; 3808 v_left -= needed; 3809 3810 /* reserve for flow director */ 3811 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 3812 needed = ICE_FDIR_MSIX; 3813 if (v_left < needed) 3814 goto no_hw_vecs_left_err; 3815 v_budget += needed; 3816 v_left -= needed; 3817 } 3818 3819 /* reserve for switchdev */ 3820 needed = ICE_ESWITCH_MSIX; 3821 if (v_left < needed) 3822 goto no_hw_vecs_left_err; 3823 v_budget += needed; 3824 v_left -= needed; 3825 3826 /* total used for non-traffic vectors */ 3827 v_other = v_budget; 3828 3829 /* reserve vectors for LAN traffic */ 3830 needed = num_cpus; 3831 if (v_left < needed) 3832 goto no_hw_vecs_left_err; 3833 pf->num_lan_msix = needed; 3834 v_budget += needed; 3835 v_left -= needed; 3836 3837 /* reserve vectors for RDMA auxiliary driver */ 3838 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3839 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; 3840 if (v_left < needed) 3841 goto no_hw_vecs_left_err; 3842 pf->num_rdma_msix = needed; 3843 v_budget += needed; 3844 v_left -= needed; 3845 } 3846 3847 pf->msix_entries = devm_kcalloc(dev, v_budget, 3848 sizeof(*pf->msix_entries), GFP_KERNEL); 3849 if (!pf->msix_entries) { 3850 err = -ENOMEM; 3851 goto exit_err; 3852 } 3853 3854 for (i = 0; i < v_budget; i++) 3855 pf->msix_entries[i].entry = i; 3856 3857 /* actually reserve the vectors */ 3858 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 3859 ICE_MIN_MSIX, v_budget); 3860 if (v_actual < 0) { 3861 dev_err(dev, "unable to reserve MSI-X vectors\n"); 3862 err = v_actual; 3863 goto msix_err; 3864 } 3865 3866 if (v_actual < v_budget) { 3867 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 3868 v_budget, v_actual); 3869 3870 if (v_actual < ICE_MIN_MSIX) { 3871 /* error if we can't get minimum vectors */ 3872 pci_disable_msix(pf->pdev); 3873 err = -ERANGE; 3874 goto msix_err; 3875 } else { 3876 int v_remain = v_actual - v_other; 3877 int v_rdma = 0, v_min_rdma = 0; 3878 3879 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3880 /* Need at least 1 interrupt in addition to 3881 * AEQ MSIX 3882 */ 3883 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; 3884 v_min_rdma = ICE_MIN_RDMA_MSIX; 3885 } 3886 3887 if (v_actual == ICE_MIN_MSIX || 3888 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { 3889 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); 3890 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3891 3892 pf->num_rdma_msix = 0; 3893 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; 3894 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || 3895 (v_remain - v_rdma < v_rdma)) { 3896 /* Support minimum RDMA and give remaining 3897 * vectors to LAN MSIX 3898 */ 3899 pf->num_rdma_msix = v_min_rdma; 3900 pf->num_lan_msix = v_remain - v_min_rdma; 3901 } else { 3902 /* Split remaining MSIX with RDMA after 3903 * accounting for AEQ MSIX 3904 */ 3905 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + 3906 ICE_RDMA_NUM_AEQ_MSIX; 3907 pf->num_lan_msix = v_remain - pf->num_rdma_msix; 3908 } 3909 3910 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", 3911 pf->num_lan_msix); 3912 3913 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 3914 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", 3915 pf->num_rdma_msix); 3916 } 3917 } 3918 3919 return v_actual; 3920 3921 msix_err: 3922 devm_kfree(dev, pf->msix_entries); 3923 goto exit_err; 3924 3925 no_hw_vecs_left_err: 3926 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", 3927 needed, v_left); 3928 err = -ERANGE; 3929 exit_err: 3930 pf->num_rdma_msix = 0; 3931 pf->num_lan_msix = 0; 3932 return err; 3933 } 3934 3935 /** 3936 * ice_dis_msix - Disable MSI-X interrupt setup in OS 3937 * @pf: board private structure 3938 */ 3939 static void ice_dis_msix(struct ice_pf *pf) 3940 { 3941 pci_disable_msix(pf->pdev); 3942 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 3943 pf->msix_entries = NULL; 3944 } 3945 3946 /** 3947 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 3948 * @pf: board private structure 3949 */ 3950 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 3951 { 3952 ice_dis_msix(pf); 3953 3954 if (pf->irq_tracker) { 3955 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 3956 pf->irq_tracker = NULL; 3957 } 3958 } 3959 3960 /** 3961 * ice_init_interrupt_scheme - Determine proper interrupt scheme 3962 * @pf: board private structure to initialize 3963 */ 3964 static int ice_init_interrupt_scheme(struct ice_pf *pf) 3965 { 3966 int vectors; 3967 3968 vectors = ice_ena_msix_range(pf); 3969 3970 if (vectors < 0) 3971 return vectors; 3972 3973 /* set up vector assignment tracking */ 3974 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), 3975 struct_size(pf->irq_tracker, list, vectors), 3976 GFP_KERNEL); 3977 if (!pf->irq_tracker) { 3978 ice_dis_msix(pf); 3979 return -ENOMEM; 3980 } 3981 3982 /* populate SW interrupts pool with number of OS granted IRQs. */ 3983 pf->num_avail_sw_msix = (u16)vectors; 3984 pf->irq_tracker->num_entries = (u16)vectors; 3985 pf->irq_tracker->end = pf->irq_tracker->num_entries; 3986 3987 return 0; 3988 } 3989 3990 /** 3991 * ice_is_wol_supported - check if WoL is supported 3992 * @hw: pointer to hardware info 3993 * 3994 * Check if WoL is supported based on the HW configuration. 3995 * Returns true if NVM supports and enables WoL for this port, false otherwise 3996 */ 3997 bool ice_is_wol_supported(struct ice_hw *hw) 3998 { 3999 u16 wol_ctrl; 4000 4001 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 4002 * word) indicates WoL is not supported on the corresponding PF ID. 4003 */ 4004 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 4005 return false; 4006 4007 return !(BIT(hw->port_info->lport) & wol_ctrl); 4008 } 4009 4010 /** 4011 * ice_vsi_recfg_qs - Change the number of queues on a VSI 4012 * @vsi: VSI being changed 4013 * @new_rx: new number of Rx queues 4014 * @new_tx: new number of Tx queues 4015 * 4016 * Only change the number of queues if new_tx, or new_rx is non-0. 4017 * 4018 * Returns 0 on success. 4019 */ 4020 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 4021 { 4022 struct ice_pf *pf = vsi->back; 4023 int err = 0, timeout = 50; 4024 4025 if (!new_rx && !new_tx) 4026 return -EINVAL; 4027 4028 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 4029 timeout--; 4030 if (!timeout) 4031 return -EBUSY; 4032 usleep_range(1000, 2000); 4033 } 4034 4035 if (new_tx) 4036 vsi->req_txq = (u16)new_tx; 4037 if (new_rx) 4038 vsi->req_rxq = (u16)new_rx; 4039 4040 /* set for the next time the netdev is started */ 4041 if (!netif_running(vsi->netdev)) { 4042 ice_vsi_rebuild(vsi, false); 4043 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 4044 goto done; 4045 } 4046 4047 ice_vsi_close(vsi); 4048 ice_vsi_rebuild(vsi, false); 4049 ice_pf_dcb_recfg(pf); 4050 ice_vsi_open(vsi); 4051 done: 4052 clear_bit(ICE_CFG_BUSY, pf->state); 4053 return err; 4054 } 4055 4056 /** 4057 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 4058 * @pf: PF to configure 4059 * 4060 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4061 * VSI can still Tx/Rx VLAN tagged packets. 4062 */ 4063 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4064 { 4065 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4066 struct ice_vsi_ctx *ctxt; 4067 struct ice_hw *hw; 4068 int status; 4069 4070 if (!vsi) 4071 return; 4072 4073 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4074 if (!ctxt) 4075 return; 4076 4077 hw = &pf->hw; 4078 ctxt->info = vsi->info; 4079 4080 ctxt->info.valid_sections = 4081 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4082 ICE_AQ_VSI_PROP_SECURITY_VALID | 4083 ICE_AQ_VSI_PROP_SW_VALID); 4084 4085 /* disable VLAN anti-spoof */ 4086 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4087 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4088 4089 /* disable VLAN pruning and keep all other settings */ 4090 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4091 4092 /* allow all VLANs on Tx and don't strip on Rx */ 4093 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | 4094 ICE_AQ_VSI_VLAN_EMOD_NOTHING; 4095 4096 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4097 if (status) { 4098 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 4099 status, ice_aq_str(hw->adminq.sq_last_status)); 4100 } else { 4101 vsi->info.sec_flags = ctxt->info.sec_flags; 4102 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4103 vsi->info.vlan_flags = ctxt->info.vlan_flags; 4104 } 4105 4106 kfree(ctxt); 4107 } 4108 4109 /** 4110 * ice_log_pkg_init - log result of DDP package load 4111 * @hw: pointer to hardware info 4112 * @state: state of package load 4113 */ 4114 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4115 { 4116 struct ice_pf *pf = hw->back; 4117 struct device *dev; 4118 4119 dev = ice_pf_to_dev(pf); 4120 4121 switch (state) { 4122 case ICE_DDP_PKG_SUCCESS: 4123 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4124 hw->active_pkg_name, 4125 hw->active_pkg_ver.major, 4126 hw->active_pkg_ver.minor, 4127 hw->active_pkg_ver.update, 4128 hw->active_pkg_ver.draft); 4129 break; 4130 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4131 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4132 hw->active_pkg_name, 4133 hw->active_pkg_ver.major, 4134 hw->active_pkg_ver.minor, 4135 hw->active_pkg_ver.update, 4136 hw->active_pkg_ver.draft); 4137 break; 4138 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 4139 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4140 hw->active_pkg_name, 4141 hw->active_pkg_ver.major, 4142 hw->active_pkg_ver.minor, 4143 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4144 break; 4145 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 4146 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4147 hw->active_pkg_name, 4148 hw->active_pkg_ver.major, 4149 hw->active_pkg_ver.minor, 4150 hw->active_pkg_ver.update, 4151 hw->active_pkg_ver.draft, 4152 hw->pkg_name, 4153 hw->pkg_ver.major, 4154 hw->pkg_ver.minor, 4155 hw->pkg_ver.update, 4156 hw->pkg_ver.draft); 4157 break; 4158 case ICE_DDP_PKG_FW_MISMATCH: 4159 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4160 break; 4161 case ICE_DDP_PKG_INVALID_FILE: 4162 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4163 break; 4164 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 4165 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4166 break; 4167 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 4168 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4169 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4170 break; 4171 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 4172 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4173 break; 4174 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 4175 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4176 break; 4177 case ICE_DDP_PKG_LOAD_ERROR: 4178 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 4179 /* poll for reset to complete */ 4180 if (ice_check_reset(hw)) 4181 dev_err(dev, "Error resetting device. Please reload the driver\n"); 4182 break; 4183 case ICE_DDP_PKG_ERR: 4184 default: 4185 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 4186 break; 4187 } 4188 } 4189 4190 /** 4191 * ice_load_pkg - load/reload the DDP Package file 4192 * @firmware: firmware structure when firmware requested or NULL for reload 4193 * @pf: pointer to the PF instance 4194 * 4195 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4196 * initialize HW tables. 4197 */ 4198 static void 4199 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4200 { 4201 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 4202 struct device *dev = ice_pf_to_dev(pf); 4203 struct ice_hw *hw = &pf->hw; 4204 4205 /* Load DDP Package */ 4206 if (firmware && !hw->pkg_copy) { 4207 state = ice_copy_and_init_pkg(hw, firmware->data, 4208 firmware->size); 4209 ice_log_pkg_init(hw, state); 4210 } else if (!firmware && hw->pkg_copy) { 4211 /* Reload package during rebuild after CORER/GLOBR reset */ 4212 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4213 ice_log_pkg_init(hw, state); 4214 } else { 4215 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4216 } 4217 4218 if (!ice_is_init_pkg_successful(state)) { 4219 /* Safe Mode */ 4220 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4221 return; 4222 } 4223 4224 /* Successful download package is the precondition for advanced 4225 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4226 */ 4227 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4228 } 4229 4230 /** 4231 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4232 * @pf: pointer to the PF structure 4233 * 4234 * There is no error returned here because the driver should be able to handle 4235 * 128 Byte cache lines, so we only print a warning in case issues are seen, 4236 * specifically with Tx. 4237 */ 4238 static void ice_verify_cacheline_size(struct ice_pf *pf) 4239 { 4240 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 4241 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4242 ICE_CACHE_LINE_BYTES); 4243 } 4244 4245 /** 4246 * ice_send_version - update firmware with driver version 4247 * @pf: PF struct 4248 * 4249 * Returns 0 on success, else error code 4250 */ 4251 static int ice_send_version(struct ice_pf *pf) 4252 { 4253 struct ice_driver_ver dv; 4254 4255 dv.major_ver = 0xff; 4256 dv.minor_ver = 0xff; 4257 dv.build_ver = 0xff; 4258 dv.subbuild_ver = 0; 4259 strscpy((char *)dv.driver_string, UTS_RELEASE, 4260 sizeof(dv.driver_string)); 4261 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4262 } 4263 4264 /** 4265 * ice_init_fdir - Initialize flow director VSI and configuration 4266 * @pf: pointer to the PF instance 4267 * 4268 * returns 0 on success, negative on error 4269 */ 4270 static int ice_init_fdir(struct ice_pf *pf) 4271 { 4272 struct device *dev = ice_pf_to_dev(pf); 4273 struct ice_vsi *ctrl_vsi; 4274 int err; 4275 4276 /* Side Band Flow Director needs to have a control VSI. 4277 * Allocate it and store it in the PF. 4278 */ 4279 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4280 if (!ctrl_vsi) { 4281 dev_dbg(dev, "could not create control VSI\n"); 4282 return -ENOMEM; 4283 } 4284 4285 err = ice_vsi_open_ctrl(ctrl_vsi); 4286 if (err) { 4287 dev_dbg(dev, "could not open control VSI\n"); 4288 goto err_vsi_open; 4289 } 4290 4291 mutex_init(&pf->hw.fdir_fltr_lock); 4292 4293 err = ice_fdir_create_dflt_rules(pf); 4294 if (err) 4295 goto err_fdir_rule; 4296 4297 return 0; 4298 4299 err_fdir_rule: 4300 ice_fdir_release_flows(&pf->hw); 4301 ice_vsi_close(ctrl_vsi); 4302 err_vsi_open: 4303 ice_vsi_release(ctrl_vsi); 4304 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4305 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4306 pf->ctrl_vsi_idx = ICE_NO_VSI; 4307 } 4308 return err; 4309 } 4310 4311 /** 4312 * ice_get_opt_fw_name - return optional firmware file name or NULL 4313 * @pf: pointer to the PF instance 4314 */ 4315 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4316 { 4317 /* Optional firmware name same as default with additional dash 4318 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4319 */ 4320 struct pci_dev *pdev = pf->pdev; 4321 char *opt_fw_filename; 4322 u64 dsn; 4323 4324 /* Determine the name of the optional file using the DSN (two 4325 * dwords following the start of the DSN Capability). 4326 */ 4327 dsn = pci_get_dsn(pdev); 4328 if (!dsn) 4329 return NULL; 4330 4331 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4332 if (!opt_fw_filename) 4333 return NULL; 4334 4335 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4336 ICE_DDP_PKG_PATH, dsn); 4337 4338 return opt_fw_filename; 4339 } 4340 4341 /** 4342 * ice_request_fw - Device initialization routine 4343 * @pf: pointer to the PF instance 4344 */ 4345 static void ice_request_fw(struct ice_pf *pf) 4346 { 4347 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4348 const struct firmware *firmware = NULL; 4349 struct device *dev = ice_pf_to_dev(pf); 4350 int err = 0; 4351 4352 /* optional device-specific DDP (if present) overrides the default DDP 4353 * package file. kernel logs a debug message if the file doesn't exist, 4354 * and warning messages for other errors. 4355 */ 4356 if (opt_fw_filename) { 4357 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4358 if (err) { 4359 kfree(opt_fw_filename); 4360 goto dflt_pkg_load; 4361 } 4362 4363 /* request for firmware was successful. Download to device */ 4364 ice_load_pkg(firmware, pf); 4365 kfree(opt_fw_filename); 4366 release_firmware(firmware); 4367 return; 4368 } 4369 4370 dflt_pkg_load: 4371 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4372 if (err) { 4373 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4374 return; 4375 } 4376 4377 /* request for firmware was successful. Download to device */ 4378 ice_load_pkg(firmware, pf); 4379 release_firmware(firmware); 4380 } 4381 4382 /** 4383 * ice_print_wake_reason - show the wake up cause in the log 4384 * @pf: pointer to the PF struct 4385 */ 4386 static void ice_print_wake_reason(struct ice_pf *pf) 4387 { 4388 u32 wus = pf->wakeup_reason; 4389 const char *wake_str; 4390 4391 /* if no wake event, nothing to print */ 4392 if (!wus) 4393 return; 4394 4395 if (wus & PFPM_WUS_LNKC_M) 4396 wake_str = "Link\n"; 4397 else if (wus & PFPM_WUS_MAG_M) 4398 wake_str = "Magic Packet\n"; 4399 else if (wus & PFPM_WUS_MNG_M) 4400 wake_str = "Management\n"; 4401 else if (wus & PFPM_WUS_FW_RST_WK_M) 4402 wake_str = "Firmware Reset\n"; 4403 else 4404 wake_str = "Unknown\n"; 4405 4406 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4407 } 4408 4409 /** 4410 * ice_register_netdev - register netdev and devlink port 4411 * @pf: pointer to the PF struct 4412 */ 4413 static int ice_register_netdev(struct ice_pf *pf) 4414 { 4415 struct ice_vsi *vsi; 4416 int err = 0; 4417 4418 vsi = ice_get_main_vsi(pf); 4419 if (!vsi || !vsi->netdev) 4420 return -EIO; 4421 4422 err = register_netdev(vsi->netdev); 4423 if (err) 4424 goto err_register_netdev; 4425 4426 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4427 netif_carrier_off(vsi->netdev); 4428 netif_tx_stop_all_queues(vsi->netdev); 4429 err = ice_devlink_create_pf_port(pf); 4430 if (err) 4431 goto err_devlink_create; 4432 4433 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); 4434 4435 return 0; 4436 err_devlink_create: 4437 unregister_netdev(vsi->netdev); 4438 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4439 err_register_netdev: 4440 free_netdev(vsi->netdev); 4441 vsi->netdev = NULL; 4442 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4443 return err; 4444 } 4445 4446 /** 4447 * ice_probe - Device initialization routine 4448 * @pdev: PCI device information struct 4449 * @ent: entry in ice_pci_tbl 4450 * 4451 * Returns 0 on success, negative on failure 4452 */ 4453 static int 4454 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4455 { 4456 struct device *dev = &pdev->dev; 4457 struct ice_pf *pf; 4458 struct ice_hw *hw; 4459 int i, err; 4460 4461 if (pdev->is_virtfn) { 4462 dev_err(dev, "can't probe a virtual function\n"); 4463 return -EINVAL; 4464 } 4465 4466 /* this driver uses devres, see 4467 * Documentation/driver-api/driver-model/devres.rst 4468 */ 4469 err = pcim_enable_device(pdev); 4470 if (err) 4471 return err; 4472 4473 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 4474 if (err) { 4475 dev_err(dev, "BAR0 I/O map error %d\n", err); 4476 return err; 4477 } 4478 4479 pf = ice_allocate_pf(dev); 4480 if (!pf) 4481 return -ENOMEM; 4482 4483 /* initialize Auxiliary index to invalid value */ 4484 pf->aux_idx = -1; 4485 4486 /* set up for high or low DMA */ 4487 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4488 if (err) 4489 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4490 if (err) { 4491 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 4492 return err; 4493 } 4494 4495 pci_enable_pcie_error_reporting(pdev); 4496 pci_set_master(pdev); 4497 4498 pf->pdev = pdev; 4499 pci_set_drvdata(pdev, pf); 4500 set_bit(ICE_DOWN, pf->state); 4501 /* Disable service task until DOWN bit is cleared */ 4502 set_bit(ICE_SERVICE_DIS, pf->state); 4503 4504 hw = &pf->hw; 4505 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 4506 pci_save_state(pdev); 4507 4508 hw->back = pf; 4509 hw->vendor_id = pdev->vendor; 4510 hw->device_id = pdev->device; 4511 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4512 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4513 hw->subsystem_device_id = pdev->subsystem_device; 4514 hw->bus.device = PCI_SLOT(pdev->devfn); 4515 hw->bus.func = PCI_FUNC(pdev->devfn); 4516 ice_set_ctrlq_len(hw); 4517 4518 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 4519 4520 #ifndef CONFIG_DYNAMIC_DEBUG 4521 if (debug < -1) 4522 hw->debug_mask = debug; 4523 #endif 4524 4525 err = ice_init_hw(hw); 4526 if (err) { 4527 dev_err(dev, "ice_init_hw failed: %d\n", err); 4528 err = -EIO; 4529 goto err_exit_unroll; 4530 } 4531 4532 ice_init_feature_support(pf); 4533 4534 ice_request_fw(pf); 4535 4536 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4537 * set in pf->state, which will cause ice_is_safe_mode to return 4538 * true 4539 */ 4540 if (ice_is_safe_mode(pf)) { 4541 /* we already got function/device capabilities but these don't 4542 * reflect what the driver needs to do in safe mode. Instead of 4543 * adding conditional logic everywhere to ignore these 4544 * device/function capabilities, override them. 4545 */ 4546 ice_set_safe_mode_caps(hw); 4547 } 4548 4549 err = ice_init_pf(pf); 4550 if (err) { 4551 dev_err(dev, "ice_init_pf failed: %d\n", err); 4552 goto err_init_pf_unroll; 4553 } 4554 4555 ice_devlink_init_regions(pf); 4556 4557 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4558 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4559 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4560 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4561 i = 0; 4562 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4563 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4564 pf->hw.tnl.valid_count[TNL_VXLAN]; 4565 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4566 UDP_TUNNEL_TYPE_VXLAN; 4567 i++; 4568 } 4569 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4570 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4571 pf->hw.tnl.valid_count[TNL_GENEVE]; 4572 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4573 UDP_TUNNEL_TYPE_GENEVE; 4574 i++; 4575 } 4576 4577 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 4578 if (!pf->num_alloc_vsi) { 4579 err = -EIO; 4580 goto err_init_pf_unroll; 4581 } 4582 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4583 dev_warn(&pf->pdev->dev, 4584 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4585 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4586 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4587 } 4588 4589 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4590 GFP_KERNEL); 4591 if (!pf->vsi) { 4592 err = -ENOMEM; 4593 goto err_init_pf_unroll; 4594 } 4595 4596 err = ice_init_interrupt_scheme(pf); 4597 if (err) { 4598 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4599 err = -EIO; 4600 goto err_init_vsi_unroll; 4601 } 4602 4603 /* In case of MSIX we are going to setup the misc vector right here 4604 * to handle admin queue events etc. In case of legacy and MSI 4605 * the misc functionality and queue processing is combined in 4606 * the same vector and that gets setup at open. 4607 */ 4608 err = ice_req_irq_msix_misc(pf); 4609 if (err) { 4610 dev_err(dev, "setup of misc vector failed: %d\n", err); 4611 goto err_init_interrupt_unroll; 4612 } 4613 4614 /* create switch struct for the switch element created by FW on boot */ 4615 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 4616 if (!pf->first_sw) { 4617 err = -ENOMEM; 4618 goto err_msix_misc_unroll; 4619 } 4620 4621 if (hw->evb_veb) 4622 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4623 else 4624 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4625 4626 pf->first_sw->pf = pf; 4627 4628 /* record the sw_id available for later use */ 4629 pf->first_sw->sw_id = hw->port_info->sw_id; 4630 4631 err = ice_setup_pf_sw(pf); 4632 if (err) { 4633 dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 4634 goto err_alloc_sw_unroll; 4635 } 4636 4637 clear_bit(ICE_SERVICE_DIS, pf->state); 4638 4639 /* tell the firmware we are up */ 4640 err = ice_send_version(pf); 4641 if (err) { 4642 dev_err(dev, "probe failed sending driver version %s. error: %d\n", 4643 UTS_RELEASE, err); 4644 goto err_send_version_unroll; 4645 } 4646 4647 /* since everything is good, start the service timer */ 4648 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4649 4650 err = ice_init_link_events(pf->hw.port_info); 4651 if (err) { 4652 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4653 goto err_send_version_unroll; 4654 } 4655 4656 /* not a fatal error if this fails */ 4657 err = ice_init_nvm_phy_type(pf->hw.port_info); 4658 if (err) 4659 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4660 4661 /* not a fatal error if this fails */ 4662 err = ice_update_link_info(pf->hw.port_info); 4663 if (err) 4664 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4665 4666 ice_init_link_dflt_override(pf->hw.port_info); 4667 4668 ice_check_link_cfg_err(pf, 4669 pf->hw.port_info->phy.link_info.link_cfg_err); 4670 4671 /* if media available, initialize PHY settings */ 4672 if (pf->hw.port_info->phy.link_info.link_info & 4673 ICE_AQ_MEDIA_AVAILABLE) { 4674 /* not a fatal error if this fails */ 4675 err = ice_init_phy_user_cfg(pf->hw.port_info); 4676 if (err) 4677 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4678 4679 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4680 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4681 4682 if (vsi) 4683 ice_configure_phy(vsi); 4684 } 4685 } else { 4686 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4687 } 4688 4689 ice_verify_cacheline_size(pf); 4690 4691 /* Save wakeup reason register for later use */ 4692 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4693 4694 /* check for a power management event */ 4695 ice_print_wake_reason(pf); 4696 4697 /* clear wake status, all bits */ 4698 wr32(hw, PFPM_WUS, U32_MAX); 4699 4700 /* Disable WoL at init, wait for user to enable */ 4701 device_set_wakeup_enable(dev, false); 4702 4703 if (ice_is_safe_mode(pf)) { 4704 ice_set_safe_mode_vlan_cfg(pf); 4705 goto probe_done; 4706 } 4707 4708 /* initialize DDP driven features */ 4709 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4710 ice_ptp_init(pf); 4711 4712 /* Note: Flow director init failure is non-fatal to load */ 4713 if (ice_init_fdir(pf)) 4714 dev_err(dev, "could not initialize flow director\n"); 4715 4716 /* Note: DCB init failure is non-fatal to load */ 4717 if (ice_init_pf_dcb(pf, false)) { 4718 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4719 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4720 } else { 4721 ice_cfg_lldp_mib_change(&pf->hw, true); 4722 } 4723 4724 if (ice_init_lag(pf)) 4725 dev_warn(dev, "Failed to init link aggregation support\n"); 4726 4727 /* print PCI link speed and width */ 4728 pcie_print_link_status(pf->pdev); 4729 4730 probe_done: 4731 err = ice_register_netdev(pf); 4732 if (err) 4733 goto err_netdev_reg; 4734 4735 err = ice_devlink_register_params(pf); 4736 if (err) 4737 goto err_netdev_reg; 4738 4739 /* ready to go, so clear down state bit */ 4740 clear_bit(ICE_DOWN, pf->state); 4741 if (ice_is_aux_ena(pf)) { 4742 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); 4743 if (pf->aux_idx < 0) { 4744 dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 4745 err = -ENOMEM; 4746 goto err_devlink_reg_param; 4747 } 4748 4749 err = ice_init_rdma(pf); 4750 if (err) { 4751 dev_err(dev, "Failed to initialize RDMA: %d\n", err); 4752 err = -EIO; 4753 goto err_init_aux_unroll; 4754 } 4755 } else { 4756 dev_warn(dev, "RDMA is not supported on this device\n"); 4757 } 4758 4759 ice_devlink_register(pf); 4760 return 0; 4761 4762 err_init_aux_unroll: 4763 pf->adev = NULL; 4764 ida_free(&ice_aux_ida, pf->aux_idx); 4765 err_devlink_reg_param: 4766 ice_devlink_unregister_params(pf); 4767 err_netdev_reg: 4768 err_send_version_unroll: 4769 ice_vsi_release_all(pf); 4770 err_alloc_sw_unroll: 4771 set_bit(ICE_SERVICE_DIS, pf->state); 4772 set_bit(ICE_DOWN, pf->state); 4773 devm_kfree(dev, pf->first_sw); 4774 err_msix_misc_unroll: 4775 ice_free_irq_msix_misc(pf); 4776 err_init_interrupt_unroll: 4777 ice_clear_interrupt_scheme(pf); 4778 err_init_vsi_unroll: 4779 devm_kfree(dev, pf->vsi); 4780 err_init_pf_unroll: 4781 ice_deinit_pf(pf); 4782 ice_devlink_destroy_regions(pf); 4783 ice_deinit_hw(hw); 4784 err_exit_unroll: 4785 pci_disable_pcie_error_reporting(pdev); 4786 pci_disable_device(pdev); 4787 return err; 4788 } 4789 4790 /** 4791 * ice_set_wake - enable or disable Wake on LAN 4792 * @pf: pointer to the PF struct 4793 * 4794 * Simple helper for WoL control 4795 */ 4796 static void ice_set_wake(struct ice_pf *pf) 4797 { 4798 struct ice_hw *hw = &pf->hw; 4799 bool wol = pf->wol_ena; 4800 4801 /* clear wake state, otherwise new wake events won't fire */ 4802 wr32(hw, PFPM_WUS, U32_MAX); 4803 4804 /* enable / disable APM wake up, no RMW needed */ 4805 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 4806 4807 /* set magic packet filter enabled */ 4808 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 4809 } 4810 4811 /** 4812 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 4813 * @pf: pointer to the PF struct 4814 * 4815 * Issue firmware command to enable multicast magic wake, making 4816 * sure that any locally administered address (LAA) is used for 4817 * wake, and that PF reset doesn't undo the LAA. 4818 */ 4819 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 4820 { 4821 struct device *dev = ice_pf_to_dev(pf); 4822 struct ice_hw *hw = &pf->hw; 4823 u8 mac_addr[ETH_ALEN]; 4824 struct ice_vsi *vsi; 4825 int status; 4826 u8 flags; 4827 4828 if (!pf->wol_ena) 4829 return; 4830 4831 vsi = ice_get_main_vsi(pf); 4832 if (!vsi) 4833 return; 4834 4835 /* Get current MAC address in case it's an LAA */ 4836 if (vsi->netdev) 4837 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 4838 else 4839 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4840 4841 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 4842 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 4843 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 4844 4845 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 4846 if (status) 4847 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 4848 status, ice_aq_str(hw->adminq.sq_last_status)); 4849 } 4850 4851 /** 4852 * ice_remove - Device removal routine 4853 * @pdev: PCI device information struct 4854 */ 4855 static void ice_remove(struct pci_dev *pdev) 4856 { 4857 struct ice_pf *pf = pci_get_drvdata(pdev); 4858 int i; 4859 4860 ice_devlink_unregister(pf); 4861 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 4862 if (!ice_is_reset_in_progress(pf->state)) 4863 break; 4864 msleep(100); 4865 } 4866 4867 ice_tc_indir_block_remove(pf); 4868 4869 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 4870 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 4871 ice_free_vfs(pf); 4872 } 4873 4874 ice_service_task_stop(pf); 4875 4876 ice_aq_cancel_waiting_tasks(pf); 4877 ice_unplug_aux_dev(pf); 4878 if (pf->aux_idx >= 0) 4879 ida_free(&ice_aux_ida, pf->aux_idx); 4880 ice_devlink_unregister_params(pf); 4881 set_bit(ICE_DOWN, pf->state); 4882 4883 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4884 ice_deinit_lag(pf); 4885 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4886 ice_ptp_release(pf); 4887 if (!ice_is_safe_mode(pf)) 4888 ice_remove_arfs(pf); 4889 ice_setup_mc_magic_wake(pf); 4890 ice_vsi_release_all(pf); 4891 ice_set_wake(pf); 4892 ice_free_irq_msix_misc(pf); 4893 ice_for_each_vsi(pf, i) { 4894 if (!pf->vsi[i]) 4895 continue; 4896 ice_vsi_free_q_vectors(pf->vsi[i]); 4897 } 4898 ice_deinit_pf(pf); 4899 ice_devlink_destroy_regions(pf); 4900 ice_deinit_hw(&pf->hw); 4901 4902 /* Issue a PFR as part of the prescribed driver unload flow. Do not 4903 * do it via ice_schedule_reset() since there is no need to rebuild 4904 * and the service task is already stopped. 4905 */ 4906 ice_reset(&pf->hw, ICE_RESET_PFR); 4907 pci_wait_for_pending_transaction(pdev); 4908 ice_clear_interrupt_scheme(pf); 4909 pci_disable_pcie_error_reporting(pdev); 4910 pci_disable_device(pdev); 4911 } 4912 4913 /** 4914 * ice_shutdown - PCI callback for shutting down device 4915 * @pdev: PCI device information struct 4916 */ 4917 static void ice_shutdown(struct pci_dev *pdev) 4918 { 4919 struct ice_pf *pf = pci_get_drvdata(pdev); 4920 4921 ice_remove(pdev); 4922 4923 if (system_state == SYSTEM_POWER_OFF) { 4924 pci_wake_from_d3(pdev, pf->wol_ena); 4925 pci_set_power_state(pdev, PCI_D3hot); 4926 } 4927 } 4928 4929 #ifdef CONFIG_PM 4930 /** 4931 * ice_prepare_for_shutdown - prep for PCI shutdown 4932 * @pf: board private structure 4933 * 4934 * Inform or close all dependent features in prep for PCI device shutdown 4935 */ 4936 static void ice_prepare_for_shutdown(struct ice_pf *pf) 4937 { 4938 struct ice_hw *hw = &pf->hw; 4939 u32 v; 4940 4941 /* Notify VFs of impending reset */ 4942 if (ice_check_sq_alive(hw, &hw->mailboxq)) 4943 ice_vc_notify_reset(pf); 4944 4945 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 4946 4947 /* disable the VSIs and their queues that are not already DOWN */ 4948 ice_pf_dis_all_vsi(pf, false); 4949 4950 ice_for_each_vsi(pf, v) 4951 if (pf->vsi[v]) 4952 pf->vsi[v]->vsi_num = 0; 4953 4954 ice_shutdown_all_ctrlq(hw); 4955 } 4956 4957 /** 4958 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 4959 * @pf: board private structure to reinitialize 4960 * 4961 * This routine reinitialize interrupt scheme that was cleared during 4962 * power management suspend callback. 4963 * 4964 * This should be called during resume routine to re-allocate the q_vectors 4965 * and reacquire interrupts. 4966 */ 4967 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 4968 { 4969 struct device *dev = ice_pf_to_dev(pf); 4970 int ret, v; 4971 4972 /* Since we clear MSIX flag during suspend, we need to 4973 * set it back during resume... 4974 */ 4975 4976 ret = ice_init_interrupt_scheme(pf); 4977 if (ret) { 4978 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 4979 return ret; 4980 } 4981 4982 /* Remap vectors and rings, after successful re-init interrupts */ 4983 ice_for_each_vsi(pf, v) { 4984 if (!pf->vsi[v]) 4985 continue; 4986 4987 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 4988 if (ret) 4989 goto err_reinit; 4990 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 4991 } 4992 4993 ret = ice_req_irq_msix_misc(pf); 4994 if (ret) { 4995 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 4996 ret); 4997 goto err_reinit; 4998 } 4999 5000 return 0; 5001 5002 err_reinit: 5003 while (v--) 5004 if (pf->vsi[v]) 5005 ice_vsi_free_q_vectors(pf->vsi[v]); 5006 5007 return ret; 5008 } 5009 5010 /** 5011 * ice_suspend 5012 * @dev: generic device information structure 5013 * 5014 * Power Management callback to quiesce the device and prepare 5015 * for D3 transition. 5016 */ 5017 static int __maybe_unused ice_suspend(struct device *dev) 5018 { 5019 struct pci_dev *pdev = to_pci_dev(dev); 5020 struct ice_pf *pf; 5021 int disabled, v; 5022 5023 pf = pci_get_drvdata(pdev); 5024 5025 if (!ice_pf_state_is_nominal(pf)) { 5026 dev_err(dev, "Device is not ready, no need to suspend it\n"); 5027 return -EBUSY; 5028 } 5029 5030 /* Stop watchdog tasks until resume completion. 5031 * Even though it is most likely that the service task is 5032 * disabled if the device is suspended or down, the service task's 5033 * state is controlled by a different state bit, and we should 5034 * store and honor whatever state that bit is in at this point. 5035 */ 5036 disabled = ice_service_task_stop(pf); 5037 5038 ice_unplug_aux_dev(pf); 5039 5040 /* Already suspended?, then there is nothing to do */ 5041 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5042 if (!disabled) 5043 ice_service_task_restart(pf); 5044 return 0; 5045 } 5046 5047 if (test_bit(ICE_DOWN, pf->state) || 5048 ice_is_reset_in_progress(pf->state)) { 5049 dev_err(dev, "can't suspend device in reset or already down\n"); 5050 if (!disabled) 5051 ice_service_task_restart(pf); 5052 return 0; 5053 } 5054 5055 ice_setup_mc_magic_wake(pf); 5056 5057 ice_prepare_for_shutdown(pf); 5058 5059 ice_set_wake(pf); 5060 5061 /* Free vectors, clear the interrupt scheme and release IRQs 5062 * for proper hibernation, especially with large number of CPUs. 5063 * Otherwise hibernation might fail when mapping all the vectors back 5064 * to CPU0. 5065 */ 5066 ice_free_irq_msix_misc(pf); 5067 ice_for_each_vsi(pf, v) { 5068 if (!pf->vsi[v]) 5069 continue; 5070 ice_vsi_free_q_vectors(pf->vsi[v]); 5071 } 5072 ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); 5073 ice_clear_interrupt_scheme(pf); 5074 5075 pci_save_state(pdev); 5076 pci_wake_from_d3(pdev, pf->wol_ena); 5077 pci_set_power_state(pdev, PCI_D3hot); 5078 return 0; 5079 } 5080 5081 /** 5082 * ice_resume - PM callback for waking up from D3 5083 * @dev: generic device information structure 5084 */ 5085 static int __maybe_unused ice_resume(struct device *dev) 5086 { 5087 struct pci_dev *pdev = to_pci_dev(dev); 5088 enum ice_reset_req reset_type; 5089 struct ice_pf *pf; 5090 struct ice_hw *hw; 5091 int ret; 5092 5093 pci_set_power_state(pdev, PCI_D0); 5094 pci_restore_state(pdev); 5095 pci_save_state(pdev); 5096 5097 if (!pci_device_is_present(pdev)) 5098 return -ENODEV; 5099 5100 ret = pci_enable_device_mem(pdev); 5101 if (ret) { 5102 dev_err(dev, "Cannot enable device after suspend\n"); 5103 return ret; 5104 } 5105 5106 pf = pci_get_drvdata(pdev); 5107 hw = &pf->hw; 5108 5109 pf->wakeup_reason = rd32(hw, PFPM_WUS); 5110 ice_print_wake_reason(pf); 5111 5112 /* We cleared the interrupt scheme when we suspended, so we need to 5113 * restore it now to resume device functionality. 5114 */ 5115 ret = ice_reinit_interrupt_scheme(pf); 5116 if (ret) 5117 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5118 5119 clear_bit(ICE_DOWN, pf->state); 5120 /* Now perform PF reset and rebuild */ 5121 reset_type = ICE_RESET_PFR; 5122 /* re-enable service task for reset, but allow reset to schedule it */ 5123 clear_bit(ICE_SERVICE_DIS, pf->state); 5124 5125 if (ice_schedule_reset(pf, reset_type)) 5126 dev_err(dev, "Reset during resume failed.\n"); 5127 5128 clear_bit(ICE_SUSPENDED, pf->state); 5129 ice_service_task_restart(pf); 5130 5131 /* Restart the service task */ 5132 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5133 5134 return 0; 5135 } 5136 #endif /* CONFIG_PM */ 5137 5138 /** 5139 * ice_pci_err_detected - warning that PCI error has been detected 5140 * @pdev: PCI device information struct 5141 * @err: the type of PCI error 5142 * 5143 * Called to warn that something happened on the PCI bus and the error handling 5144 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 5145 */ 5146 static pci_ers_result_t 5147 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 5148 { 5149 struct ice_pf *pf = pci_get_drvdata(pdev); 5150 5151 if (!pf) { 5152 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 5153 __func__, err); 5154 return PCI_ERS_RESULT_DISCONNECT; 5155 } 5156 5157 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5158 ice_service_task_stop(pf); 5159 5160 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5161 set_bit(ICE_PFR_REQ, pf->state); 5162 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5163 } 5164 } 5165 5166 return PCI_ERS_RESULT_NEED_RESET; 5167 } 5168 5169 /** 5170 * ice_pci_err_slot_reset - a PCI slot reset has just happened 5171 * @pdev: PCI device information struct 5172 * 5173 * Called to determine if the driver can recover from the PCI slot reset by 5174 * using a register read to determine if the device is recoverable. 5175 */ 5176 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 5177 { 5178 struct ice_pf *pf = pci_get_drvdata(pdev); 5179 pci_ers_result_t result; 5180 int err; 5181 u32 reg; 5182 5183 err = pci_enable_device_mem(pdev); 5184 if (err) { 5185 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 5186 err); 5187 result = PCI_ERS_RESULT_DISCONNECT; 5188 } else { 5189 pci_set_master(pdev); 5190 pci_restore_state(pdev); 5191 pci_save_state(pdev); 5192 pci_wake_from_d3(pdev, false); 5193 5194 /* Check for life */ 5195 reg = rd32(&pf->hw, GLGEN_RTRIG); 5196 if (!reg) 5197 result = PCI_ERS_RESULT_RECOVERED; 5198 else 5199 result = PCI_ERS_RESULT_DISCONNECT; 5200 } 5201 5202 err = pci_aer_clear_nonfatal_status(pdev); 5203 if (err) 5204 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", 5205 err); 5206 /* non-fatal, continue */ 5207 5208 return result; 5209 } 5210 5211 /** 5212 * ice_pci_err_resume - restart operations after PCI error recovery 5213 * @pdev: PCI device information struct 5214 * 5215 * Called to allow the driver to bring things back up after PCI error and/or 5216 * reset recovery have finished 5217 */ 5218 static void ice_pci_err_resume(struct pci_dev *pdev) 5219 { 5220 struct ice_pf *pf = pci_get_drvdata(pdev); 5221 5222 if (!pf) { 5223 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 5224 __func__); 5225 return; 5226 } 5227 5228 if (test_bit(ICE_SUSPENDED, pf->state)) { 5229 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 5230 __func__); 5231 return; 5232 } 5233 5234 ice_restore_all_vfs_msi_state(pdev); 5235 5236 ice_do_reset(pf, ICE_RESET_PFR); 5237 ice_service_task_restart(pf); 5238 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5239 } 5240 5241 /** 5242 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 5243 * @pdev: PCI device information struct 5244 */ 5245 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 5246 { 5247 struct ice_pf *pf = pci_get_drvdata(pdev); 5248 5249 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5250 ice_service_task_stop(pf); 5251 5252 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5253 set_bit(ICE_PFR_REQ, pf->state); 5254 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5255 } 5256 } 5257 } 5258 5259 /** 5260 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5261 * @pdev: PCI device information struct 5262 */ 5263 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5264 { 5265 ice_pci_err_resume(pdev); 5266 } 5267 5268 /* ice_pci_tbl - PCI Device ID Table 5269 * 5270 * Wildcard entries (PCI_ANY_ID) should come last 5271 * Last entry must be all 0s 5272 * 5273 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5274 * Class, Class Mask, private data (not used) } 5275 */ 5276 static const struct pci_device_id ice_pci_tbl[] = { 5277 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 5278 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 5279 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 5280 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, 5281 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, 5282 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, 5283 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, 5284 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, 5285 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, 5286 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, 5287 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, 5288 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 5289 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 5290 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 5291 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 5292 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 5293 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, 5294 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 5295 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 5296 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 5297 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, 5298 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, 5299 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, 5300 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, 5301 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, 5302 /* required last entry */ 5303 { 0, } 5304 }; 5305 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5306 5307 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5308 5309 static const struct pci_error_handlers ice_pci_err_handler = { 5310 .error_detected = ice_pci_err_detected, 5311 .slot_reset = ice_pci_err_slot_reset, 5312 .reset_prepare = ice_pci_err_reset_prepare, 5313 .reset_done = ice_pci_err_reset_done, 5314 .resume = ice_pci_err_resume 5315 }; 5316 5317 static struct pci_driver ice_driver = { 5318 .name = KBUILD_MODNAME, 5319 .id_table = ice_pci_tbl, 5320 .probe = ice_probe, 5321 .remove = ice_remove, 5322 #ifdef CONFIG_PM 5323 .driver.pm = &ice_pm_ops, 5324 #endif /* CONFIG_PM */ 5325 .shutdown = ice_shutdown, 5326 .sriov_configure = ice_sriov_configure, 5327 .err_handler = &ice_pci_err_handler 5328 }; 5329 5330 /** 5331 * ice_module_init - Driver registration routine 5332 * 5333 * ice_module_init is the first routine called when the driver is 5334 * loaded. All it does is register with the PCI subsystem. 5335 */ 5336 static int __init ice_module_init(void) 5337 { 5338 int status; 5339 5340 pr_info("%s\n", ice_driver_string); 5341 pr_info("%s\n", ice_copyright); 5342 5343 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 5344 if (!ice_wq) { 5345 pr_err("Failed to create workqueue\n"); 5346 return -ENOMEM; 5347 } 5348 5349 status = pci_register_driver(&ice_driver); 5350 if (status) { 5351 pr_err("failed to register PCI driver, err %d\n", status); 5352 destroy_workqueue(ice_wq); 5353 } 5354 5355 return status; 5356 } 5357 module_init(ice_module_init); 5358 5359 /** 5360 * ice_module_exit - Driver exit cleanup routine 5361 * 5362 * ice_module_exit is called just before the driver is removed 5363 * from memory. 5364 */ 5365 static void __exit ice_module_exit(void) 5366 { 5367 pci_unregister_driver(&ice_driver); 5368 destroy_workqueue(ice_wq); 5369 pr_info("module unloaded\n"); 5370 } 5371 module_exit(ice_module_exit); 5372 5373 /** 5374 * ice_set_mac_address - NDO callback to set MAC address 5375 * @netdev: network interface device structure 5376 * @pi: pointer to an address structure 5377 * 5378 * Returns 0 on success, negative on failure 5379 */ 5380 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5381 { 5382 struct ice_netdev_priv *np = netdev_priv(netdev); 5383 struct ice_vsi *vsi = np->vsi; 5384 struct ice_pf *pf = vsi->back; 5385 struct ice_hw *hw = &pf->hw; 5386 struct sockaddr *addr = pi; 5387 u8 old_mac[ETH_ALEN]; 5388 u8 flags = 0; 5389 u8 *mac; 5390 int err; 5391 5392 mac = (u8 *)addr->sa_data; 5393 5394 if (!is_valid_ether_addr(mac)) 5395 return -EADDRNOTAVAIL; 5396 5397 if (ether_addr_equal(netdev->dev_addr, mac)) { 5398 netdev_dbg(netdev, "already using mac %pM\n", mac); 5399 return 0; 5400 } 5401 5402 if (test_bit(ICE_DOWN, pf->state) || 5403 ice_is_reset_in_progress(pf->state)) { 5404 netdev_err(netdev, "can't set mac %pM. device not ready\n", 5405 mac); 5406 return -EBUSY; 5407 } 5408 5409 if (ice_chnl_dmac_fltr_cnt(pf)) { 5410 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 5411 mac); 5412 return -EAGAIN; 5413 } 5414 5415 netif_addr_lock_bh(netdev); 5416 ether_addr_copy(old_mac, netdev->dev_addr); 5417 /* change the netdev's MAC address */ 5418 eth_hw_addr_set(netdev, mac); 5419 netif_addr_unlock_bh(netdev); 5420 5421 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 5422 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 5423 if (err && err != -ENOENT) { 5424 err = -EADDRNOTAVAIL; 5425 goto err_update_filters; 5426 } 5427 5428 /* Add filter for new MAC. If filter exists, return success */ 5429 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 5430 if (err == -EEXIST) 5431 /* Although this MAC filter is already present in hardware it's 5432 * possible in some cases (e.g. bonding) that dev_addr was 5433 * modified outside of the driver and needs to be restored back 5434 * to this value. 5435 */ 5436 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 5437 else if (err) 5438 /* error if the new filter addition failed */ 5439 err = -EADDRNOTAVAIL; 5440 5441 err_update_filters: 5442 if (err) { 5443 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5444 mac); 5445 netif_addr_lock_bh(netdev); 5446 eth_hw_addr_set(netdev, old_mac); 5447 netif_addr_unlock_bh(netdev); 5448 return err; 5449 } 5450 5451 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5452 netdev->dev_addr); 5453 5454 /* write new MAC address to the firmware */ 5455 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 5456 err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 5457 if (err) { 5458 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 5459 mac, err); 5460 } 5461 return 0; 5462 } 5463 5464 /** 5465 * ice_set_rx_mode - NDO callback to set the netdev filters 5466 * @netdev: network interface device structure 5467 */ 5468 static void ice_set_rx_mode(struct net_device *netdev) 5469 { 5470 struct ice_netdev_priv *np = netdev_priv(netdev); 5471 struct ice_vsi *vsi = np->vsi; 5472 5473 if (!vsi) 5474 return; 5475 5476 /* Set the flags to synchronize filters 5477 * ndo_set_rx_mode may be triggered even without a change in netdev 5478 * flags 5479 */ 5480 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5481 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5482 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5483 5484 /* schedule our worker thread which will take care of 5485 * applying the new filter changes 5486 */ 5487 ice_service_task_schedule(vsi->back); 5488 } 5489 5490 /** 5491 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 5492 * @netdev: network interface device structure 5493 * @queue_index: Queue ID 5494 * @maxrate: maximum bandwidth in Mbps 5495 */ 5496 static int 5497 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 5498 { 5499 struct ice_netdev_priv *np = netdev_priv(netdev); 5500 struct ice_vsi *vsi = np->vsi; 5501 u16 q_handle; 5502 int status; 5503 u8 tc; 5504 5505 /* Validate maxrate requested is within permitted range */ 5506 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 5507 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 5508 maxrate, queue_index); 5509 return -EINVAL; 5510 } 5511 5512 q_handle = vsi->tx_rings[queue_index]->q_handle; 5513 tc = ice_dcb_get_tc(vsi, queue_index); 5514 5515 /* Set BW back to default, when user set maxrate to 0 */ 5516 if (!maxrate) 5517 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 5518 q_handle, ICE_MAX_BW); 5519 else 5520 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 5521 q_handle, ICE_MAX_BW, maxrate * 1000); 5522 if (status) 5523 netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 5524 status); 5525 5526 return status; 5527 } 5528 5529 /** 5530 * ice_fdb_add - add an entry to the hardware database 5531 * @ndm: the input from the stack 5532 * @tb: pointer to array of nladdr (unused) 5533 * @dev: the net device pointer 5534 * @addr: the MAC address entry being added 5535 * @vid: VLAN ID 5536 * @flags: instructions from stack about fdb operation 5537 * @extack: netlink extended ack 5538 */ 5539 static int 5540 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 5541 struct net_device *dev, const unsigned char *addr, u16 vid, 5542 u16 flags, struct netlink_ext_ack __always_unused *extack) 5543 { 5544 int err; 5545 5546 if (vid) { 5547 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5548 return -EINVAL; 5549 } 5550 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5551 netdev_err(dev, "FDB only supports static addresses\n"); 5552 return -EINVAL; 5553 } 5554 5555 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5556 err = dev_uc_add_excl(dev, addr); 5557 else if (is_multicast_ether_addr(addr)) 5558 err = dev_mc_add_excl(dev, addr); 5559 else 5560 err = -EINVAL; 5561 5562 /* Only return duplicate errors if NLM_F_EXCL is set */ 5563 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5564 err = 0; 5565 5566 return err; 5567 } 5568 5569 /** 5570 * ice_fdb_del - delete an entry from the hardware database 5571 * @ndm: the input from the stack 5572 * @tb: pointer to array of nladdr (unused) 5573 * @dev: the net device pointer 5574 * @addr: the MAC address entry being added 5575 * @vid: VLAN ID 5576 */ 5577 static int 5578 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5579 struct net_device *dev, const unsigned char *addr, 5580 __always_unused u16 vid) 5581 { 5582 int err; 5583 5584 if (ndm->ndm_state & NUD_PERMANENT) { 5585 netdev_err(dev, "FDB only supports static addresses\n"); 5586 return -EINVAL; 5587 } 5588 5589 if (is_unicast_ether_addr(addr)) 5590 err = dev_uc_del(dev, addr); 5591 else if (is_multicast_ether_addr(addr)) 5592 err = dev_mc_del(dev, addr); 5593 else 5594 err = -EINVAL; 5595 5596 return err; 5597 } 5598 5599 /** 5600 * ice_set_features - set the netdev feature flags 5601 * @netdev: ptr to the netdev being adjusted 5602 * @features: the feature set that the stack is suggesting 5603 */ 5604 static int 5605 ice_set_features(struct net_device *netdev, netdev_features_t features) 5606 { 5607 struct ice_netdev_priv *np = netdev_priv(netdev); 5608 struct ice_vsi *vsi = np->vsi; 5609 struct ice_pf *pf = vsi->back; 5610 int ret = 0; 5611 5612 /* Don't set any netdev advanced features with device in Safe Mode */ 5613 if (ice_is_safe_mode(vsi->back)) { 5614 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); 5615 return ret; 5616 } 5617 5618 /* Do not change setting during reset */ 5619 if (ice_is_reset_in_progress(pf->state)) { 5620 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 5621 return -EBUSY; 5622 } 5623 5624 /* Multiple features can be changed in one call so keep features in 5625 * separate if/else statements to guarantee each feature is checked 5626 */ 5627 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 5628 ice_vsi_manage_rss_lut(vsi, true); 5629 else if (!(features & NETIF_F_RXHASH) && 5630 netdev->features & NETIF_F_RXHASH) 5631 ice_vsi_manage_rss_lut(vsi, false); 5632 5633 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 5634 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5635 ret = ice_vsi_manage_vlan_stripping(vsi, true); 5636 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 5637 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5638 ret = ice_vsi_manage_vlan_stripping(vsi, false); 5639 5640 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 5641 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5642 ret = ice_vsi_manage_vlan_insertion(vsi); 5643 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 5644 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5645 ret = ice_vsi_manage_vlan_insertion(vsi); 5646 5647 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 5648 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 5649 ret = ice_cfg_vlan_pruning(vsi, true); 5650 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 5651 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 5652 ret = ice_cfg_vlan_pruning(vsi, false); 5653 5654 if ((features & NETIF_F_NTUPLE) && 5655 !(netdev->features & NETIF_F_NTUPLE)) { 5656 ice_vsi_manage_fdir(vsi, true); 5657 ice_init_arfs(vsi); 5658 } else if (!(features & NETIF_F_NTUPLE) && 5659 (netdev->features & NETIF_F_NTUPLE)) { 5660 ice_vsi_manage_fdir(vsi, false); 5661 ice_clear_arfs(vsi); 5662 } 5663 5664 /* don't turn off hw_tc_offload when ADQ is already enabled */ 5665 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 5666 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 5667 return -EACCES; 5668 } 5669 5670 if ((features & NETIF_F_HW_TC) && 5671 !(netdev->features & NETIF_F_HW_TC)) 5672 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 5673 else 5674 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 5675 5676 return ret; 5677 } 5678 5679 /** 5680 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI 5681 * @vsi: VSI to setup VLAN properties for 5682 */ 5683 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 5684 { 5685 int ret = 0; 5686 5687 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 5688 ret = ice_vsi_manage_vlan_stripping(vsi, true); 5689 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 5690 ret = ice_vsi_manage_vlan_insertion(vsi); 5691 5692 return ret; 5693 } 5694 5695 /** 5696 * ice_vsi_cfg - Setup the VSI 5697 * @vsi: the VSI being configured 5698 * 5699 * Return 0 on success and negative value on error 5700 */ 5701 int ice_vsi_cfg(struct ice_vsi *vsi) 5702 { 5703 int err; 5704 5705 if (vsi->netdev) { 5706 ice_set_rx_mode(vsi->netdev); 5707 5708 err = ice_vsi_vlan_setup(vsi); 5709 5710 if (err) 5711 return err; 5712 } 5713 ice_vsi_cfg_dcb_rings(vsi); 5714 5715 err = ice_vsi_cfg_lan_txqs(vsi); 5716 if (!err && ice_is_xdp_ena_vsi(vsi)) 5717 err = ice_vsi_cfg_xdp_txqs(vsi); 5718 if (!err) 5719 err = ice_vsi_cfg_rxqs(vsi); 5720 5721 return err; 5722 } 5723 5724 /* THEORY OF MODERATION: 5725 * The ice driver hardware works differently than the hardware that DIMLIB was 5726 * originally made for. ice hardware doesn't have packet count limits that 5727 * can trigger an interrupt, but it *does* have interrupt rate limit support, 5728 * which is hard-coded to a limit of 250,000 ints/second. 5729 * If not using dynamic moderation, the INTRL value can be modified 5730 * by ethtool rx-usecs-high. 5731 */ 5732 struct ice_dim { 5733 /* the throttle rate for interrupts, basically worst case delay before 5734 * an initial interrupt fires, value is stored in microseconds. 5735 */ 5736 u16 itr; 5737 }; 5738 5739 /* Make a different profile for Rx that doesn't allow quite so aggressive 5740 * moderation at the high end (it maxes out at 126us or about 8k interrupts a 5741 * second. 5742 */ 5743 static const struct ice_dim rx_profile[] = { 5744 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 5745 {8}, /* 125,000 ints/s */ 5746 {16}, /* 62,500 ints/s */ 5747 {62}, /* 16,129 ints/s */ 5748 {126} /* 7,936 ints/s */ 5749 }; 5750 5751 /* The transmit profile, which has the same sorts of values 5752 * as the previous struct 5753 */ 5754 static const struct ice_dim tx_profile[] = { 5755 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 5756 {8}, /* 125,000 ints/s */ 5757 {40}, /* 16,125 ints/s */ 5758 {128}, /* 7,812 ints/s */ 5759 {256} /* 3,906 ints/s */ 5760 }; 5761 5762 static void ice_tx_dim_work(struct work_struct *work) 5763 { 5764 struct ice_ring_container *rc; 5765 struct dim *dim; 5766 u16 itr; 5767 5768 dim = container_of(work, struct dim, work); 5769 rc = (struct ice_ring_container *)dim->priv; 5770 5771 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 5772 5773 /* look up the values in our local table */ 5774 itr = tx_profile[dim->profile_ix].itr; 5775 5776 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 5777 ice_write_itr(rc, itr); 5778 5779 dim->state = DIM_START_MEASURE; 5780 } 5781 5782 static void ice_rx_dim_work(struct work_struct *work) 5783 { 5784 struct ice_ring_container *rc; 5785 struct dim *dim; 5786 u16 itr; 5787 5788 dim = container_of(work, struct dim, work); 5789 rc = (struct ice_ring_container *)dim->priv; 5790 5791 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 5792 5793 /* look up the values in our local table */ 5794 itr = rx_profile[dim->profile_ix].itr; 5795 5796 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 5797 ice_write_itr(rc, itr); 5798 5799 dim->state = DIM_START_MEASURE; 5800 } 5801 5802 #define ICE_DIM_DEFAULT_PROFILE_IX 1 5803 5804 /** 5805 * ice_init_moderation - set up interrupt moderation 5806 * @q_vector: the vector containing rings to be configured 5807 * 5808 * Set up interrupt moderation registers, with the intent to do the right thing 5809 * when called from reset or from probe, and whether or not dynamic moderation 5810 * is enabled or not. Take special care to write all the registers in both 5811 * dynamic moderation mode or not in order to make sure hardware is in a known 5812 * state. 5813 */ 5814 static void ice_init_moderation(struct ice_q_vector *q_vector) 5815 { 5816 struct ice_ring_container *rc; 5817 bool tx_dynamic, rx_dynamic; 5818 5819 rc = &q_vector->tx; 5820 INIT_WORK(&rc->dim.work, ice_tx_dim_work); 5821 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5822 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 5823 rc->dim.priv = rc; 5824 tx_dynamic = ITR_IS_DYNAMIC(rc); 5825 5826 /* set the initial TX ITR to match the above */ 5827 ice_write_itr(rc, tx_dynamic ? 5828 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 5829 5830 rc = &q_vector->rx; 5831 INIT_WORK(&rc->dim.work, ice_rx_dim_work); 5832 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5833 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 5834 rc->dim.priv = rc; 5835 rx_dynamic = ITR_IS_DYNAMIC(rc); 5836 5837 /* set the initial RX ITR to match the above */ 5838 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 5839 rc->itr_setting); 5840 5841 ice_set_q_vector_intrl(q_vector); 5842 } 5843 5844 /** 5845 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 5846 * @vsi: the VSI being configured 5847 */ 5848 static void ice_napi_enable_all(struct ice_vsi *vsi) 5849 { 5850 int q_idx; 5851 5852 if (!vsi->netdev) 5853 return; 5854 5855 ice_for_each_q_vector(vsi, q_idx) { 5856 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 5857 5858 ice_init_moderation(q_vector); 5859 5860 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 5861 napi_enable(&q_vector->napi); 5862 } 5863 } 5864 5865 /** 5866 * ice_up_complete - Finish the last steps of bringing up a connection 5867 * @vsi: The VSI being configured 5868 * 5869 * Return 0 on success and negative value on error 5870 */ 5871 static int ice_up_complete(struct ice_vsi *vsi) 5872 { 5873 struct ice_pf *pf = vsi->back; 5874 int err; 5875 5876 ice_vsi_cfg_msix(vsi); 5877 5878 /* Enable only Rx rings, Tx rings were enabled by the FW when the 5879 * Tx queue group list was configured and the context bits were 5880 * programmed using ice_vsi_cfg_txqs 5881 */ 5882 err = ice_vsi_start_all_rx_rings(vsi); 5883 if (err) 5884 return err; 5885 5886 clear_bit(ICE_VSI_DOWN, vsi->state); 5887 ice_napi_enable_all(vsi); 5888 ice_vsi_ena_irq(vsi); 5889 5890 if (vsi->port_info && 5891 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 5892 vsi->netdev) { 5893 ice_print_link_msg(vsi, true); 5894 netif_tx_start_all_queues(vsi->netdev); 5895 netif_carrier_on(vsi->netdev); 5896 if (!ice_is_e810(&pf->hw)) 5897 ice_ptp_link_change(pf, pf->hw.pf_id, true); 5898 } 5899 5900 /* clear this now, and the first stats read will be used as baseline */ 5901 vsi->stat_offsets_loaded = false; 5902 5903 ice_service_task_schedule(pf); 5904 5905 return 0; 5906 } 5907 5908 /** 5909 * ice_up - Bring the connection back up after being down 5910 * @vsi: VSI being configured 5911 */ 5912 int ice_up(struct ice_vsi *vsi) 5913 { 5914 int err; 5915 5916 err = ice_vsi_cfg(vsi); 5917 if (!err) 5918 err = ice_up_complete(vsi); 5919 5920 return err; 5921 } 5922 5923 /** 5924 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 5925 * @syncp: pointer to u64_stats_sync 5926 * @stats: stats that pkts and bytes count will be taken from 5927 * @pkts: packets stats counter 5928 * @bytes: bytes stats counter 5929 * 5930 * This function fetches stats from the ring considering the atomic operations 5931 * that needs to be performed to read u64 values in 32 bit machine. 5932 */ 5933 static void 5934 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats, 5935 u64 *pkts, u64 *bytes) 5936 { 5937 unsigned int start; 5938 5939 do { 5940 start = u64_stats_fetch_begin_irq(syncp); 5941 *pkts = stats.pkts; 5942 *bytes = stats.bytes; 5943 } while (u64_stats_fetch_retry_irq(syncp, start)); 5944 } 5945 5946 /** 5947 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 5948 * @vsi: the VSI to be updated 5949 * @vsi_stats: the stats struct to be updated 5950 * @rings: rings to work on 5951 * @count: number of rings 5952 */ 5953 static void 5954 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 5955 struct rtnl_link_stats64 *vsi_stats, 5956 struct ice_tx_ring **rings, u16 count) 5957 { 5958 u16 i; 5959 5960 for (i = 0; i < count; i++) { 5961 struct ice_tx_ring *ring; 5962 u64 pkts = 0, bytes = 0; 5963 5964 ring = READ_ONCE(rings[i]); 5965 if (ring) 5966 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); 5967 vsi_stats->tx_packets += pkts; 5968 vsi_stats->tx_bytes += bytes; 5969 vsi->tx_restart += ring->tx_stats.restart_q; 5970 vsi->tx_busy += ring->tx_stats.tx_busy; 5971 vsi->tx_linearize += ring->tx_stats.tx_linearize; 5972 } 5973 } 5974 5975 /** 5976 * ice_update_vsi_ring_stats - Update VSI stats counters 5977 * @vsi: the VSI to be updated 5978 */ 5979 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 5980 { 5981 struct rtnl_link_stats64 *vsi_stats; 5982 u64 pkts, bytes; 5983 int i; 5984 5985 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 5986 if (!vsi_stats) 5987 return; 5988 5989 /* reset non-netdev (extended) stats */ 5990 vsi->tx_restart = 0; 5991 vsi->tx_busy = 0; 5992 vsi->tx_linearize = 0; 5993 vsi->rx_buf_failed = 0; 5994 vsi->rx_page_failed = 0; 5995 5996 rcu_read_lock(); 5997 5998 /* update Tx rings counters */ 5999 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 6000 vsi->num_txq); 6001 6002 /* update Rx rings counters */ 6003 ice_for_each_rxq(vsi, i) { 6004 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 6005 6006 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); 6007 vsi_stats->rx_packets += pkts; 6008 vsi_stats->rx_bytes += bytes; 6009 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 6010 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 6011 } 6012 6013 /* update XDP Tx rings counters */ 6014 if (ice_is_xdp_ena_vsi(vsi)) 6015 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 6016 vsi->num_xdp_txq); 6017 6018 rcu_read_unlock(); 6019 6020 vsi->net_stats.tx_packets = vsi_stats->tx_packets; 6021 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; 6022 vsi->net_stats.rx_packets = vsi_stats->rx_packets; 6023 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; 6024 6025 kfree(vsi_stats); 6026 } 6027 6028 /** 6029 * ice_update_vsi_stats - Update VSI stats counters 6030 * @vsi: the VSI to be updated 6031 */ 6032 void ice_update_vsi_stats(struct ice_vsi *vsi) 6033 { 6034 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6035 struct ice_eth_stats *cur_es = &vsi->eth_stats; 6036 struct ice_pf *pf = vsi->back; 6037 6038 if (test_bit(ICE_VSI_DOWN, vsi->state) || 6039 test_bit(ICE_CFG_BUSY, pf->state)) 6040 return; 6041 6042 /* get stats as recorded by Tx/Rx rings */ 6043 ice_update_vsi_ring_stats(vsi); 6044 6045 /* get VSI stats as recorded by the hardware */ 6046 ice_update_eth_stats(vsi); 6047 6048 cur_ns->tx_errors = cur_es->tx_errors; 6049 cur_ns->rx_dropped = cur_es->rx_discards; 6050 cur_ns->tx_dropped = cur_es->tx_discards; 6051 cur_ns->multicast = cur_es->rx_multicast; 6052 6053 /* update some more netdev stats if this is main VSI */ 6054 if (vsi->type == ICE_VSI_PF) { 6055 cur_ns->rx_crc_errors = pf->stats.crc_errors; 6056 cur_ns->rx_errors = pf->stats.crc_errors + 6057 pf->stats.illegal_bytes + 6058 pf->stats.rx_len_errors + 6059 pf->stats.rx_undersize + 6060 pf->hw_csum_rx_error + 6061 pf->stats.rx_jabber + 6062 pf->stats.rx_fragments + 6063 pf->stats.rx_oversize; 6064 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 6065 /* record drops from the port level */ 6066 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 6067 } 6068 } 6069 6070 /** 6071 * ice_update_pf_stats - Update PF port stats counters 6072 * @pf: PF whose stats needs to be updated 6073 */ 6074 void ice_update_pf_stats(struct ice_pf *pf) 6075 { 6076 struct ice_hw_port_stats *prev_ps, *cur_ps; 6077 struct ice_hw *hw = &pf->hw; 6078 u16 fd_ctr_base; 6079 u8 port; 6080 6081 port = hw->port_info->lport; 6082 prev_ps = &pf->stats_prev; 6083 cur_ps = &pf->stats; 6084 6085 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 6086 &prev_ps->eth.rx_bytes, 6087 &cur_ps->eth.rx_bytes); 6088 6089 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 6090 &prev_ps->eth.rx_unicast, 6091 &cur_ps->eth.rx_unicast); 6092 6093 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 6094 &prev_ps->eth.rx_multicast, 6095 &cur_ps->eth.rx_multicast); 6096 6097 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 6098 &prev_ps->eth.rx_broadcast, 6099 &cur_ps->eth.rx_broadcast); 6100 6101 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 6102 &prev_ps->eth.rx_discards, 6103 &cur_ps->eth.rx_discards); 6104 6105 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 6106 &prev_ps->eth.tx_bytes, 6107 &cur_ps->eth.tx_bytes); 6108 6109 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 6110 &prev_ps->eth.tx_unicast, 6111 &cur_ps->eth.tx_unicast); 6112 6113 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 6114 &prev_ps->eth.tx_multicast, 6115 &cur_ps->eth.tx_multicast); 6116 6117 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 6118 &prev_ps->eth.tx_broadcast, 6119 &cur_ps->eth.tx_broadcast); 6120 6121 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 6122 &prev_ps->tx_dropped_link_down, 6123 &cur_ps->tx_dropped_link_down); 6124 6125 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 6126 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 6127 6128 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 6129 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 6130 6131 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 6132 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 6133 6134 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 6135 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 6136 6137 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 6138 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 6139 6140 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 6141 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 6142 6143 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 6144 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 6145 6146 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 6147 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 6148 6149 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 6150 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 6151 6152 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 6153 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 6154 6155 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 6156 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 6157 6158 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 6159 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 6160 6161 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 6162 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 6163 6164 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 6165 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 6166 6167 fd_ctr_base = hw->fd_ctr_base; 6168 6169 ice_stat_update40(hw, 6170 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 6171 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 6172 &cur_ps->fd_sb_match); 6173 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 6174 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 6175 6176 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 6177 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 6178 6179 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 6180 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 6181 6182 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 6183 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 6184 6185 ice_update_dcb_stats(pf); 6186 6187 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 6188 &prev_ps->crc_errors, &cur_ps->crc_errors); 6189 6190 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 6191 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 6192 6193 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 6194 &prev_ps->mac_local_faults, 6195 &cur_ps->mac_local_faults); 6196 6197 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 6198 &prev_ps->mac_remote_faults, 6199 &cur_ps->mac_remote_faults); 6200 6201 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 6202 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 6203 6204 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 6205 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 6206 6207 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 6208 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 6209 6210 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 6211 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 6212 6213 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 6214 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 6215 6216 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 6217 6218 pf->stat_prev_loaded = true; 6219 } 6220 6221 /** 6222 * ice_get_stats64 - get statistics for network device structure 6223 * @netdev: network interface device structure 6224 * @stats: main device statistics structure 6225 */ 6226 static 6227 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 6228 { 6229 struct ice_netdev_priv *np = netdev_priv(netdev); 6230 struct rtnl_link_stats64 *vsi_stats; 6231 struct ice_vsi *vsi = np->vsi; 6232 6233 vsi_stats = &vsi->net_stats; 6234 6235 if (!vsi->num_txq || !vsi->num_rxq) 6236 return; 6237 6238 /* netdev packet/byte stats come from ring counter. These are obtained 6239 * by summing up ring counters (done by ice_update_vsi_ring_stats). 6240 * But, only call the update routine and read the registers if VSI is 6241 * not down. 6242 */ 6243 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 6244 ice_update_vsi_ring_stats(vsi); 6245 stats->tx_packets = vsi_stats->tx_packets; 6246 stats->tx_bytes = vsi_stats->tx_bytes; 6247 stats->rx_packets = vsi_stats->rx_packets; 6248 stats->rx_bytes = vsi_stats->rx_bytes; 6249 6250 /* The rest of the stats can be read from the hardware but instead we 6251 * just return values that the watchdog task has already obtained from 6252 * the hardware. 6253 */ 6254 stats->multicast = vsi_stats->multicast; 6255 stats->tx_errors = vsi_stats->tx_errors; 6256 stats->tx_dropped = vsi_stats->tx_dropped; 6257 stats->rx_errors = vsi_stats->rx_errors; 6258 stats->rx_dropped = vsi_stats->rx_dropped; 6259 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 6260 stats->rx_length_errors = vsi_stats->rx_length_errors; 6261 } 6262 6263 /** 6264 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 6265 * @vsi: VSI having NAPI disabled 6266 */ 6267 static void ice_napi_disable_all(struct ice_vsi *vsi) 6268 { 6269 int q_idx; 6270 6271 if (!vsi->netdev) 6272 return; 6273 6274 ice_for_each_q_vector(vsi, q_idx) { 6275 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6276 6277 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6278 napi_disable(&q_vector->napi); 6279 6280 cancel_work_sync(&q_vector->tx.dim.work); 6281 cancel_work_sync(&q_vector->rx.dim.work); 6282 } 6283 } 6284 6285 /** 6286 * ice_down - Shutdown the connection 6287 * @vsi: The VSI being stopped 6288 * 6289 * Caller of this function is expected to set the vsi->state ICE_DOWN bit 6290 */ 6291 int ice_down(struct ice_vsi *vsi) 6292 { 6293 int i, tx_err, rx_err, link_err = 0; 6294 6295 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 6296 6297 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6298 if (!ice_is_e810(&vsi->back->hw)) 6299 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); 6300 netif_carrier_off(vsi->netdev); 6301 netif_tx_disable(vsi->netdev); 6302 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 6303 ice_eswitch_stop_all_tx_queues(vsi->back); 6304 } 6305 6306 ice_vsi_dis_irq(vsi); 6307 6308 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 6309 if (tx_err) 6310 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 6311 vsi->vsi_num, tx_err); 6312 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 6313 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 6314 if (tx_err) 6315 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 6316 vsi->vsi_num, tx_err); 6317 } 6318 6319 rx_err = ice_vsi_stop_all_rx_rings(vsi); 6320 if (rx_err) 6321 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 6322 vsi->vsi_num, rx_err); 6323 6324 ice_napi_disable_all(vsi); 6325 6326 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 6327 link_err = ice_force_phys_link_state(vsi, false); 6328 if (link_err) 6329 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 6330 vsi->vsi_num, link_err); 6331 } 6332 6333 ice_for_each_txq(vsi, i) 6334 ice_clean_tx_ring(vsi->tx_rings[i]); 6335 6336 ice_for_each_rxq(vsi, i) 6337 ice_clean_rx_ring(vsi->rx_rings[i]); 6338 6339 if (tx_err || rx_err || link_err) { 6340 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 6341 vsi->vsi_num, vsi->vsw->sw_id); 6342 return -EIO; 6343 } 6344 6345 return 0; 6346 } 6347 6348 /** 6349 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 6350 * @vsi: VSI having resources allocated 6351 * 6352 * Return 0 on success, negative on failure 6353 */ 6354 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 6355 { 6356 int i, err = 0; 6357 6358 if (!vsi->num_txq) { 6359 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 6360 vsi->vsi_num); 6361 return -EINVAL; 6362 } 6363 6364 ice_for_each_txq(vsi, i) { 6365 struct ice_tx_ring *ring = vsi->tx_rings[i]; 6366 6367 if (!ring) 6368 return -EINVAL; 6369 6370 if (vsi->netdev) 6371 ring->netdev = vsi->netdev; 6372 err = ice_setup_tx_ring(ring); 6373 if (err) 6374 break; 6375 } 6376 6377 return err; 6378 } 6379 6380 /** 6381 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 6382 * @vsi: VSI having resources allocated 6383 * 6384 * Return 0 on success, negative on failure 6385 */ 6386 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 6387 { 6388 int i, err = 0; 6389 6390 if (!vsi->num_rxq) { 6391 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 6392 vsi->vsi_num); 6393 return -EINVAL; 6394 } 6395 6396 ice_for_each_rxq(vsi, i) { 6397 struct ice_rx_ring *ring = vsi->rx_rings[i]; 6398 6399 if (!ring) 6400 return -EINVAL; 6401 6402 if (vsi->netdev) 6403 ring->netdev = vsi->netdev; 6404 err = ice_setup_rx_ring(ring); 6405 if (err) 6406 break; 6407 } 6408 6409 return err; 6410 } 6411 6412 /** 6413 * ice_vsi_open_ctrl - open control VSI for use 6414 * @vsi: the VSI to open 6415 * 6416 * Initialization of the Control VSI 6417 * 6418 * Returns 0 on success, negative value on error 6419 */ 6420 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 6421 { 6422 char int_name[ICE_INT_NAME_STR_LEN]; 6423 struct ice_pf *pf = vsi->back; 6424 struct device *dev; 6425 int err; 6426 6427 dev = ice_pf_to_dev(pf); 6428 /* allocate descriptors */ 6429 err = ice_vsi_setup_tx_rings(vsi); 6430 if (err) 6431 goto err_setup_tx; 6432 6433 err = ice_vsi_setup_rx_rings(vsi); 6434 if (err) 6435 goto err_setup_rx; 6436 6437 err = ice_vsi_cfg(vsi); 6438 if (err) 6439 goto err_setup_rx; 6440 6441 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 6442 dev_driver_string(dev), dev_name(dev)); 6443 err = ice_vsi_req_irq_msix(vsi, int_name); 6444 if (err) 6445 goto err_setup_rx; 6446 6447 ice_vsi_cfg_msix(vsi); 6448 6449 err = ice_vsi_start_all_rx_rings(vsi); 6450 if (err) 6451 goto err_up_complete; 6452 6453 clear_bit(ICE_VSI_DOWN, vsi->state); 6454 ice_vsi_ena_irq(vsi); 6455 6456 return 0; 6457 6458 err_up_complete: 6459 ice_down(vsi); 6460 err_setup_rx: 6461 ice_vsi_free_rx_rings(vsi); 6462 err_setup_tx: 6463 ice_vsi_free_tx_rings(vsi); 6464 6465 return err; 6466 } 6467 6468 /** 6469 * ice_vsi_open - Called when a network interface is made active 6470 * @vsi: the VSI to open 6471 * 6472 * Initialization of the VSI 6473 * 6474 * Returns 0 on success, negative value on error 6475 */ 6476 int ice_vsi_open(struct ice_vsi *vsi) 6477 { 6478 char int_name[ICE_INT_NAME_STR_LEN]; 6479 struct ice_pf *pf = vsi->back; 6480 int err; 6481 6482 /* allocate descriptors */ 6483 err = ice_vsi_setup_tx_rings(vsi); 6484 if (err) 6485 goto err_setup_tx; 6486 6487 err = ice_vsi_setup_rx_rings(vsi); 6488 if (err) 6489 goto err_setup_rx; 6490 6491 err = ice_vsi_cfg(vsi); 6492 if (err) 6493 goto err_setup_rx; 6494 6495 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 6496 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 6497 err = ice_vsi_req_irq_msix(vsi, int_name); 6498 if (err) 6499 goto err_setup_rx; 6500 6501 if (vsi->type == ICE_VSI_PF) { 6502 /* Notify the stack of the actual queue counts. */ 6503 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 6504 if (err) 6505 goto err_set_qs; 6506 6507 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 6508 if (err) 6509 goto err_set_qs; 6510 } 6511 6512 err = ice_up_complete(vsi); 6513 if (err) 6514 goto err_up_complete; 6515 6516 return 0; 6517 6518 err_up_complete: 6519 ice_down(vsi); 6520 err_set_qs: 6521 ice_vsi_free_irq(vsi); 6522 err_setup_rx: 6523 ice_vsi_free_rx_rings(vsi); 6524 err_setup_tx: 6525 ice_vsi_free_tx_rings(vsi); 6526 6527 return err; 6528 } 6529 6530 /** 6531 * ice_vsi_release_all - Delete all VSIs 6532 * @pf: PF from which all VSIs are being removed 6533 */ 6534 static void ice_vsi_release_all(struct ice_pf *pf) 6535 { 6536 int err, i; 6537 6538 if (!pf->vsi) 6539 return; 6540 6541 ice_for_each_vsi(pf, i) { 6542 if (!pf->vsi[i]) 6543 continue; 6544 6545 if (pf->vsi[i]->type == ICE_VSI_CHNL) 6546 continue; 6547 6548 err = ice_vsi_release(pf->vsi[i]); 6549 if (err) 6550 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 6551 i, err, pf->vsi[i]->vsi_num); 6552 } 6553 } 6554 6555 /** 6556 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 6557 * @pf: pointer to the PF instance 6558 * @type: VSI type to rebuild 6559 * 6560 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 6561 */ 6562 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 6563 { 6564 struct device *dev = ice_pf_to_dev(pf); 6565 int i, err; 6566 6567 ice_for_each_vsi(pf, i) { 6568 struct ice_vsi *vsi = pf->vsi[i]; 6569 6570 if (!vsi || vsi->type != type) 6571 continue; 6572 6573 /* rebuild the VSI */ 6574 err = ice_vsi_rebuild(vsi, true); 6575 if (err) { 6576 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 6577 err, vsi->idx, ice_vsi_type_str(type)); 6578 return err; 6579 } 6580 6581 /* replay filters for the VSI */ 6582 err = ice_replay_vsi(&pf->hw, vsi->idx); 6583 if (err) { 6584 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 6585 err, vsi->idx, ice_vsi_type_str(type)); 6586 return err; 6587 } 6588 6589 /* Re-map HW VSI number, using VSI handle that has been 6590 * previously validated in ice_replay_vsi() call above 6591 */ 6592 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 6593 6594 /* enable the VSI */ 6595 err = ice_ena_vsi(vsi, false); 6596 if (err) { 6597 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 6598 err, vsi->idx, ice_vsi_type_str(type)); 6599 return err; 6600 } 6601 6602 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 6603 ice_vsi_type_str(type)); 6604 } 6605 6606 return 0; 6607 } 6608 6609 /** 6610 * ice_update_pf_netdev_link - Update PF netdev link status 6611 * @pf: pointer to the PF instance 6612 */ 6613 static void ice_update_pf_netdev_link(struct ice_pf *pf) 6614 { 6615 bool link_up; 6616 int i; 6617 6618 ice_for_each_vsi(pf, i) { 6619 struct ice_vsi *vsi = pf->vsi[i]; 6620 6621 if (!vsi || vsi->type != ICE_VSI_PF) 6622 return; 6623 6624 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 6625 if (link_up) { 6626 netif_carrier_on(pf->vsi[i]->netdev); 6627 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 6628 } else { 6629 netif_carrier_off(pf->vsi[i]->netdev); 6630 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 6631 } 6632 } 6633 } 6634 6635 /** 6636 * ice_rebuild - rebuild after reset 6637 * @pf: PF to rebuild 6638 * @reset_type: type of reset 6639 * 6640 * Do not rebuild VF VSI in this flow because that is already handled via 6641 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 6642 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 6643 * to reset/rebuild all the VF VSI twice. 6644 */ 6645 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 6646 { 6647 struct device *dev = ice_pf_to_dev(pf); 6648 struct ice_hw *hw = &pf->hw; 6649 int err; 6650 6651 if (test_bit(ICE_DOWN, pf->state)) 6652 goto clear_recovery; 6653 6654 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 6655 6656 if (reset_type == ICE_RESET_EMPR) { 6657 /* If an EMP reset has occurred, any previously pending flash 6658 * update will have completed. We no longer know whether or 6659 * not the NVM update EMP reset is restricted. 6660 */ 6661 pf->fw_emp_reset_disabled = false; 6662 } 6663 6664 err = ice_init_all_ctrlq(hw); 6665 if (err) { 6666 dev_err(dev, "control queues init failed %d\n", err); 6667 goto err_init_ctrlq; 6668 } 6669 6670 /* if DDP was previously loaded successfully */ 6671 if (!ice_is_safe_mode(pf)) { 6672 /* reload the SW DB of filter tables */ 6673 if (reset_type == ICE_RESET_PFR) 6674 ice_fill_blk_tbls(hw); 6675 else 6676 /* Reload DDP Package after CORER/GLOBR reset */ 6677 ice_load_pkg(NULL, pf); 6678 } 6679 6680 err = ice_clear_pf_cfg(hw); 6681 if (err) { 6682 dev_err(dev, "clear PF configuration failed %d\n", err); 6683 goto err_init_ctrlq; 6684 } 6685 6686 if (pf->first_sw->dflt_vsi_ena) 6687 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); 6688 /* clear the default VSI configuration if it exists */ 6689 pf->first_sw->dflt_vsi = NULL; 6690 pf->first_sw->dflt_vsi_ena = false; 6691 6692 ice_clear_pxe_mode(hw); 6693 6694 err = ice_init_nvm(hw); 6695 if (err) { 6696 dev_err(dev, "ice_init_nvm failed %d\n", err); 6697 goto err_init_ctrlq; 6698 } 6699 6700 err = ice_get_caps(hw); 6701 if (err) { 6702 dev_err(dev, "ice_get_caps failed %d\n", err); 6703 goto err_init_ctrlq; 6704 } 6705 6706 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 6707 if (err) { 6708 dev_err(dev, "set_mac_cfg failed %d\n", err); 6709 goto err_init_ctrlq; 6710 } 6711 6712 err = ice_sched_init_port(hw->port_info); 6713 if (err) 6714 goto err_sched_init_port; 6715 6716 /* start misc vector */ 6717 err = ice_req_irq_msix_misc(pf); 6718 if (err) { 6719 dev_err(dev, "misc vector setup failed: %d\n", err); 6720 goto err_sched_init_port; 6721 } 6722 6723 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6724 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 6725 if (!rd32(hw, PFQF_FD_SIZE)) { 6726 u16 unused, guar, b_effort; 6727 6728 guar = hw->func_caps.fd_fltr_guar; 6729 b_effort = hw->func_caps.fd_fltr_best_effort; 6730 6731 /* force guaranteed filter pool for PF */ 6732 ice_alloc_fd_guar_item(hw, &unused, guar); 6733 /* force shared filter pool for PF */ 6734 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 6735 } 6736 } 6737 6738 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 6739 ice_dcb_rebuild(pf); 6740 6741 /* If the PF previously had enabled PTP, PTP init needs to happen before 6742 * the VSI rebuild. If not, this causes the PTP link status events to 6743 * fail. 6744 */ 6745 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6746 ice_ptp_reset(pf); 6747 6748 /* rebuild PF VSI */ 6749 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 6750 if (err) { 6751 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 6752 goto err_vsi_rebuild; 6753 } 6754 6755 /* configure PTP timestamping after VSI rebuild */ 6756 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6757 ice_ptp_cfg_timestamp(pf, false); 6758 6759 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); 6760 if (err) { 6761 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); 6762 goto err_vsi_rebuild; 6763 } 6764 6765 if (reset_type == ICE_RESET_PFR) { 6766 err = ice_rebuild_channels(pf); 6767 if (err) { 6768 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 6769 err); 6770 goto err_vsi_rebuild; 6771 } 6772 } 6773 6774 /* If Flow Director is active */ 6775 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6776 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 6777 if (err) { 6778 dev_err(dev, "control VSI rebuild failed: %d\n", err); 6779 goto err_vsi_rebuild; 6780 } 6781 6782 /* replay HW Flow Director recipes */ 6783 if (hw->fdir_prof) 6784 ice_fdir_replay_flows(hw); 6785 6786 /* replay Flow Director filters */ 6787 ice_fdir_replay_fltrs(pf); 6788 6789 ice_rebuild_arfs(pf); 6790 } 6791 6792 ice_update_pf_netdev_link(pf); 6793 6794 /* tell the firmware we are up */ 6795 err = ice_send_version(pf); 6796 if (err) { 6797 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 6798 err); 6799 goto err_vsi_rebuild; 6800 } 6801 6802 ice_replay_post(hw); 6803 6804 /* if we get here, reset flow is successful */ 6805 clear_bit(ICE_RESET_FAILED, pf->state); 6806 6807 ice_plug_aux_dev(pf); 6808 return; 6809 6810 err_vsi_rebuild: 6811 err_sched_init_port: 6812 ice_sched_cleanup_all(hw); 6813 err_init_ctrlq: 6814 ice_shutdown_all_ctrlq(hw); 6815 set_bit(ICE_RESET_FAILED, pf->state); 6816 clear_recovery: 6817 /* set this bit in PF state to control service task scheduling */ 6818 set_bit(ICE_NEEDS_RESTART, pf->state); 6819 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 6820 } 6821 6822 /** 6823 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 6824 * @vsi: Pointer to VSI structure 6825 */ 6826 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 6827 { 6828 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 6829 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 6830 else 6831 return ICE_RXBUF_3072; 6832 } 6833 6834 /** 6835 * ice_change_mtu - NDO callback to change the MTU 6836 * @netdev: network interface device structure 6837 * @new_mtu: new value for maximum frame size 6838 * 6839 * Returns 0 on success, negative on failure 6840 */ 6841 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 6842 { 6843 struct ice_netdev_priv *np = netdev_priv(netdev); 6844 struct ice_vsi *vsi = np->vsi; 6845 struct ice_pf *pf = vsi->back; 6846 u8 count = 0; 6847 int err = 0; 6848 6849 if (new_mtu == (int)netdev->mtu) { 6850 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 6851 return 0; 6852 } 6853 6854 if (ice_is_xdp_ena_vsi(vsi)) { 6855 int frame_size = ice_max_xdp_frame_size(vsi); 6856 6857 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 6858 netdev_err(netdev, "max MTU for XDP usage is %d\n", 6859 frame_size - ICE_ETH_PKT_HDR_PAD); 6860 return -EINVAL; 6861 } 6862 } 6863 6864 /* if a reset is in progress, wait for some time for it to complete */ 6865 do { 6866 if (ice_is_reset_in_progress(pf->state)) { 6867 count++; 6868 usleep_range(1000, 2000); 6869 } else { 6870 break; 6871 } 6872 6873 } while (count < 100); 6874 6875 if (count == 100) { 6876 netdev_err(netdev, "can't change MTU. Device is busy\n"); 6877 return -EBUSY; 6878 } 6879 6880 netdev->mtu = (unsigned int)new_mtu; 6881 6882 /* if VSI is up, bring it down and then back up */ 6883 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6884 err = ice_down(vsi); 6885 if (err) { 6886 netdev_err(netdev, "change MTU if_down err %d\n", err); 6887 return err; 6888 } 6889 6890 err = ice_up(vsi); 6891 if (err) { 6892 netdev_err(netdev, "change MTU if_up err %d\n", err); 6893 return err; 6894 } 6895 } 6896 6897 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 6898 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); 6899 6900 return err; 6901 } 6902 6903 /** 6904 * ice_eth_ioctl - Access the hwtstamp interface 6905 * @netdev: network interface device structure 6906 * @ifr: interface request data 6907 * @cmd: ioctl command 6908 */ 6909 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6910 { 6911 struct ice_netdev_priv *np = netdev_priv(netdev); 6912 struct ice_pf *pf = np->vsi->back; 6913 6914 switch (cmd) { 6915 case SIOCGHWTSTAMP: 6916 return ice_ptp_get_ts_config(pf, ifr); 6917 case SIOCSHWTSTAMP: 6918 return ice_ptp_set_ts_config(pf, ifr); 6919 default: 6920 return -EOPNOTSUPP; 6921 } 6922 } 6923 6924 /** 6925 * ice_aq_str - convert AQ err code to a string 6926 * @aq_err: the AQ error code to convert 6927 */ 6928 const char *ice_aq_str(enum ice_aq_err aq_err) 6929 { 6930 switch (aq_err) { 6931 case ICE_AQ_RC_OK: 6932 return "OK"; 6933 case ICE_AQ_RC_EPERM: 6934 return "ICE_AQ_RC_EPERM"; 6935 case ICE_AQ_RC_ENOENT: 6936 return "ICE_AQ_RC_ENOENT"; 6937 case ICE_AQ_RC_ENOMEM: 6938 return "ICE_AQ_RC_ENOMEM"; 6939 case ICE_AQ_RC_EBUSY: 6940 return "ICE_AQ_RC_EBUSY"; 6941 case ICE_AQ_RC_EEXIST: 6942 return "ICE_AQ_RC_EEXIST"; 6943 case ICE_AQ_RC_EINVAL: 6944 return "ICE_AQ_RC_EINVAL"; 6945 case ICE_AQ_RC_ENOSPC: 6946 return "ICE_AQ_RC_ENOSPC"; 6947 case ICE_AQ_RC_ENOSYS: 6948 return "ICE_AQ_RC_ENOSYS"; 6949 case ICE_AQ_RC_EMODE: 6950 return "ICE_AQ_RC_EMODE"; 6951 case ICE_AQ_RC_ENOSEC: 6952 return "ICE_AQ_RC_ENOSEC"; 6953 case ICE_AQ_RC_EBADSIG: 6954 return "ICE_AQ_RC_EBADSIG"; 6955 case ICE_AQ_RC_ESVN: 6956 return "ICE_AQ_RC_ESVN"; 6957 case ICE_AQ_RC_EBADMAN: 6958 return "ICE_AQ_RC_EBADMAN"; 6959 case ICE_AQ_RC_EBADBUF: 6960 return "ICE_AQ_RC_EBADBUF"; 6961 } 6962 6963 return "ICE_AQ_RC_UNKNOWN"; 6964 } 6965 6966 /** 6967 * ice_set_rss_lut - Set RSS LUT 6968 * @vsi: Pointer to VSI structure 6969 * @lut: Lookup table 6970 * @lut_size: Lookup table size 6971 * 6972 * Returns 0 on success, negative on failure 6973 */ 6974 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 6975 { 6976 struct ice_aq_get_set_rss_lut_params params = {}; 6977 struct ice_hw *hw = &vsi->back->hw; 6978 int status; 6979 6980 if (!lut) 6981 return -EINVAL; 6982 6983 params.vsi_handle = vsi->idx; 6984 params.lut_size = lut_size; 6985 params.lut_type = vsi->rss_lut_type; 6986 params.lut = lut; 6987 6988 status = ice_aq_set_rss_lut(hw, ¶ms); 6989 if (status) 6990 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 6991 status, ice_aq_str(hw->adminq.sq_last_status)); 6992 6993 return status; 6994 } 6995 6996 /** 6997 * ice_set_rss_key - Set RSS key 6998 * @vsi: Pointer to the VSI structure 6999 * @seed: RSS hash seed 7000 * 7001 * Returns 0 on success, negative on failure 7002 */ 7003 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 7004 { 7005 struct ice_hw *hw = &vsi->back->hw; 7006 int status; 7007 7008 if (!seed) 7009 return -EINVAL; 7010 7011 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7012 if (status) 7013 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 7014 status, ice_aq_str(hw->adminq.sq_last_status)); 7015 7016 return status; 7017 } 7018 7019 /** 7020 * ice_get_rss_lut - Get RSS LUT 7021 * @vsi: Pointer to VSI structure 7022 * @lut: Buffer to store the lookup table entries 7023 * @lut_size: Size of buffer to store the lookup table entries 7024 * 7025 * Returns 0 on success, negative on failure 7026 */ 7027 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7028 { 7029 struct ice_aq_get_set_rss_lut_params params = {}; 7030 struct ice_hw *hw = &vsi->back->hw; 7031 int status; 7032 7033 if (!lut) 7034 return -EINVAL; 7035 7036 params.vsi_handle = vsi->idx; 7037 params.lut_size = lut_size; 7038 params.lut_type = vsi->rss_lut_type; 7039 params.lut = lut; 7040 7041 status = ice_aq_get_rss_lut(hw, ¶ms); 7042 if (status) 7043 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 7044 status, ice_aq_str(hw->adminq.sq_last_status)); 7045 7046 return status; 7047 } 7048 7049 /** 7050 * ice_get_rss_key - Get RSS key 7051 * @vsi: Pointer to VSI structure 7052 * @seed: Buffer to store the key in 7053 * 7054 * Returns 0 on success, negative on failure 7055 */ 7056 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 7057 { 7058 struct ice_hw *hw = &vsi->back->hw; 7059 int status; 7060 7061 if (!seed) 7062 return -EINVAL; 7063 7064 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7065 if (status) 7066 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 7067 status, ice_aq_str(hw->adminq.sq_last_status)); 7068 7069 return status; 7070 } 7071 7072 /** 7073 * ice_bridge_getlink - Get the hardware bridge mode 7074 * @skb: skb buff 7075 * @pid: process ID 7076 * @seq: RTNL message seq 7077 * @dev: the netdev being configured 7078 * @filter_mask: filter mask passed in 7079 * @nlflags: netlink flags passed in 7080 * 7081 * Return the bridge mode (VEB/VEPA) 7082 */ 7083 static int 7084 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7085 struct net_device *dev, u32 filter_mask, int nlflags) 7086 { 7087 struct ice_netdev_priv *np = netdev_priv(dev); 7088 struct ice_vsi *vsi = np->vsi; 7089 struct ice_pf *pf = vsi->back; 7090 u16 bmode; 7091 7092 bmode = pf->first_sw->bridge_mode; 7093 7094 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 7095 filter_mask, NULL); 7096 } 7097 7098 /** 7099 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 7100 * @vsi: Pointer to VSI structure 7101 * @bmode: Hardware bridge mode (VEB/VEPA) 7102 * 7103 * Returns 0 on success, negative on failure 7104 */ 7105 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 7106 { 7107 struct ice_aqc_vsi_props *vsi_props; 7108 struct ice_hw *hw = &vsi->back->hw; 7109 struct ice_vsi_ctx *ctxt; 7110 int ret; 7111 7112 vsi_props = &vsi->info; 7113 7114 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 7115 if (!ctxt) 7116 return -ENOMEM; 7117 7118 ctxt->info = vsi->info; 7119 7120 if (bmode == BRIDGE_MODE_VEB) 7121 /* change from VEPA to VEB mode */ 7122 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7123 else 7124 /* change from VEB to VEPA mode */ 7125 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7126 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 7127 7128 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 7129 if (ret) { 7130 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 7131 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); 7132 goto out; 7133 } 7134 /* Update sw flags for book keeping */ 7135 vsi_props->sw_flags = ctxt->info.sw_flags; 7136 7137 out: 7138 kfree(ctxt); 7139 return ret; 7140 } 7141 7142 /** 7143 * ice_bridge_setlink - Set the hardware bridge mode 7144 * @dev: the netdev being configured 7145 * @nlh: RTNL message 7146 * @flags: bridge setlink flags 7147 * @extack: netlink extended ack 7148 * 7149 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 7150 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 7151 * not already set for all VSIs connected to this switch. And also update the 7152 * unicast switch filter rules for the corresponding switch of the netdev. 7153 */ 7154 static int 7155 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7156 u16 __always_unused flags, 7157 struct netlink_ext_ack __always_unused *extack) 7158 { 7159 struct ice_netdev_priv *np = netdev_priv(dev); 7160 struct ice_pf *pf = np->vsi->back; 7161 struct nlattr *attr, *br_spec; 7162 struct ice_hw *hw = &pf->hw; 7163 struct ice_sw *pf_sw; 7164 int rem, v, err = 0; 7165 7166 pf_sw = pf->first_sw; 7167 /* find the attribute in the netlink message */ 7168 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7169 7170 nla_for_each_nested(attr, br_spec, rem) { 7171 __u16 mode; 7172 7173 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7174 continue; 7175 mode = nla_get_u16(attr); 7176 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 7177 return -EINVAL; 7178 /* Continue if bridge mode is not being flipped */ 7179 if (mode == pf_sw->bridge_mode) 7180 continue; 7181 /* Iterates through the PF VSI list and update the loopback 7182 * mode of the VSI 7183 */ 7184 ice_for_each_vsi(pf, v) { 7185 if (!pf->vsi[v]) 7186 continue; 7187 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 7188 if (err) 7189 return err; 7190 } 7191 7192 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 7193 /* Update the unicast switch filter rules for the corresponding 7194 * switch of the netdev 7195 */ 7196 err = ice_update_sw_rule_bridge_mode(hw); 7197 if (err) { 7198 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 7199 mode, err, 7200 ice_aq_str(hw->adminq.sq_last_status)); 7201 /* revert hw->evb_veb */ 7202 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 7203 return err; 7204 } 7205 7206 pf_sw->bridge_mode = mode; 7207 } 7208 7209 return 0; 7210 } 7211 7212 /** 7213 * ice_tx_timeout - Respond to a Tx Hang 7214 * @netdev: network interface device structure 7215 * @txqueue: Tx queue 7216 */ 7217 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 7218 { 7219 struct ice_netdev_priv *np = netdev_priv(netdev); 7220 struct ice_tx_ring *tx_ring = NULL; 7221 struct ice_vsi *vsi = np->vsi; 7222 struct ice_pf *pf = vsi->back; 7223 u32 i; 7224 7225 pf->tx_timeout_count++; 7226 7227 /* Check if PFC is enabled for the TC to which the queue belongs 7228 * to. If yes then Tx timeout is not caused by a hung queue, no 7229 * need to reset and rebuild 7230 */ 7231 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 7232 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 7233 txqueue); 7234 return; 7235 } 7236 7237 /* now that we have an index, find the tx_ring struct */ 7238 ice_for_each_txq(vsi, i) 7239 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 7240 if (txqueue == vsi->tx_rings[i]->q_index) { 7241 tx_ring = vsi->tx_rings[i]; 7242 break; 7243 } 7244 7245 /* Reset recovery level if enough time has elapsed after last timeout. 7246 * Also ensure no new reset action happens before next timeout period. 7247 */ 7248 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 7249 pf->tx_timeout_recovery_level = 1; 7250 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 7251 netdev->watchdog_timeo))) 7252 return; 7253 7254 if (tx_ring) { 7255 struct ice_hw *hw = &pf->hw; 7256 u32 head, val = 0; 7257 7258 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 7259 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 7260 /* Read interrupt register */ 7261 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 7262 7263 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 7264 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 7265 head, tx_ring->next_to_use, val); 7266 } 7267 7268 pf->tx_timeout_last_recovery = jiffies; 7269 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 7270 pf->tx_timeout_recovery_level, txqueue); 7271 7272 switch (pf->tx_timeout_recovery_level) { 7273 case 1: 7274 set_bit(ICE_PFR_REQ, pf->state); 7275 break; 7276 case 2: 7277 set_bit(ICE_CORER_REQ, pf->state); 7278 break; 7279 case 3: 7280 set_bit(ICE_GLOBR_REQ, pf->state); 7281 break; 7282 default: 7283 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 7284 set_bit(ICE_DOWN, pf->state); 7285 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 7286 set_bit(ICE_SERVICE_DIS, pf->state); 7287 break; 7288 } 7289 7290 ice_service_task_schedule(pf); 7291 pf->tx_timeout_recovery_level++; 7292 } 7293 7294 /** 7295 * ice_setup_tc_cls_flower - flower classifier offloads 7296 * @np: net device to configure 7297 * @filter_dev: device on which filter is added 7298 * @cls_flower: offload data 7299 */ 7300 static int 7301 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 7302 struct net_device *filter_dev, 7303 struct flow_cls_offload *cls_flower) 7304 { 7305 struct ice_vsi *vsi = np->vsi; 7306 7307 if (cls_flower->common.chain_index) 7308 return -EOPNOTSUPP; 7309 7310 switch (cls_flower->command) { 7311 case FLOW_CLS_REPLACE: 7312 return ice_add_cls_flower(filter_dev, vsi, cls_flower); 7313 case FLOW_CLS_DESTROY: 7314 return ice_del_cls_flower(vsi, cls_flower); 7315 default: 7316 return -EINVAL; 7317 } 7318 } 7319 7320 /** 7321 * ice_setup_tc_block_cb - callback handler registered for TC block 7322 * @type: TC SETUP type 7323 * @type_data: TC flower offload data that contains user input 7324 * @cb_priv: netdev private data 7325 */ 7326 static int 7327 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 7328 { 7329 struct ice_netdev_priv *np = cb_priv; 7330 7331 switch (type) { 7332 case TC_SETUP_CLSFLOWER: 7333 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 7334 type_data); 7335 default: 7336 return -EOPNOTSUPP; 7337 } 7338 } 7339 7340 /** 7341 * ice_validate_mqprio_qopt - Validate TCF input parameters 7342 * @vsi: Pointer to VSI 7343 * @mqprio_qopt: input parameters for mqprio queue configuration 7344 * 7345 * This function validates MQPRIO params, such as qcount (power of 2 wherever 7346 * needed), and make sure user doesn't specify qcount and BW rate limit 7347 * for TCs, which are more than "num_tc" 7348 */ 7349 static int 7350 ice_validate_mqprio_qopt(struct ice_vsi *vsi, 7351 struct tc_mqprio_qopt_offload *mqprio_qopt) 7352 { 7353 u64 sum_max_rate = 0, sum_min_rate = 0; 7354 int non_power_of_2_qcount = 0; 7355 struct ice_pf *pf = vsi->back; 7356 int max_rss_q_cnt = 0; 7357 struct device *dev; 7358 int i, speed; 7359 u8 num_tc; 7360 7361 if (vsi->type != ICE_VSI_PF) 7362 return -EINVAL; 7363 7364 if (mqprio_qopt->qopt.offset[0] != 0 || 7365 mqprio_qopt->qopt.num_tc < 1 || 7366 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 7367 return -EINVAL; 7368 7369 dev = ice_pf_to_dev(pf); 7370 vsi->ch_rss_size = 0; 7371 num_tc = mqprio_qopt->qopt.num_tc; 7372 7373 for (i = 0; num_tc; i++) { 7374 int qcount = mqprio_qopt->qopt.count[i]; 7375 u64 max_rate, min_rate, rem; 7376 7377 if (!qcount) 7378 return -EINVAL; 7379 7380 if (is_power_of_2(qcount)) { 7381 if (non_power_of_2_qcount && 7382 qcount > non_power_of_2_qcount) { 7383 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 7384 qcount, non_power_of_2_qcount); 7385 return -EINVAL; 7386 } 7387 if (qcount > max_rss_q_cnt) 7388 max_rss_q_cnt = qcount; 7389 } else { 7390 if (non_power_of_2_qcount && 7391 qcount != non_power_of_2_qcount) { 7392 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 7393 qcount, non_power_of_2_qcount); 7394 return -EINVAL; 7395 } 7396 if (qcount < max_rss_q_cnt) { 7397 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 7398 qcount, max_rss_q_cnt); 7399 return -EINVAL; 7400 } 7401 max_rss_q_cnt = qcount; 7402 non_power_of_2_qcount = qcount; 7403 } 7404 7405 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 7406 * converts the bandwidth rate limit into Bytes/s when 7407 * passing it down to the driver. So convert input bandwidth 7408 * from Bytes/s to Kbps 7409 */ 7410 max_rate = mqprio_qopt->max_rate[i]; 7411 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 7412 sum_max_rate += max_rate; 7413 7414 /* min_rate is minimum guaranteed rate and it can't be zero */ 7415 min_rate = mqprio_qopt->min_rate[i]; 7416 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 7417 sum_min_rate += min_rate; 7418 7419 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 7420 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 7421 min_rate, ICE_MIN_BW_LIMIT); 7422 return -EINVAL; 7423 } 7424 7425 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 7426 if (rem) { 7427 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 7428 i, ICE_MIN_BW_LIMIT); 7429 return -EINVAL; 7430 } 7431 7432 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 7433 if (rem) { 7434 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 7435 i, ICE_MIN_BW_LIMIT); 7436 return -EINVAL; 7437 } 7438 7439 /* min_rate can't be more than max_rate, except when max_rate 7440 * is zero (implies max_rate sought is max line rate). In such 7441 * a case min_rate can be more than max. 7442 */ 7443 if (max_rate && min_rate > max_rate) { 7444 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 7445 min_rate, max_rate); 7446 return -EINVAL; 7447 } 7448 7449 if (i >= mqprio_qopt->qopt.num_tc - 1) 7450 break; 7451 if (mqprio_qopt->qopt.offset[i + 1] != 7452 (mqprio_qopt->qopt.offset[i] + qcount)) 7453 return -EINVAL; 7454 } 7455 if (vsi->num_rxq < 7456 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7457 return -EINVAL; 7458 if (vsi->num_txq < 7459 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7460 return -EINVAL; 7461 7462 speed = ice_get_link_speed_kbps(vsi); 7463 if (sum_max_rate && sum_max_rate > (u64)speed) { 7464 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n", 7465 sum_max_rate, speed); 7466 return -EINVAL; 7467 } 7468 if (sum_min_rate && sum_min_rate > (u64)speed) { 7469 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 7470 sum_min_rate, speed); 7471 return -EINVAL; 7472 } 7473 7474 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 7475 vsi->ch_rss_size = max_rss_q_cnt; 7476 7477 return 0; 7478 } 7479 7480 /** 7481 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 7482 * @pf: ptr to PF device 7483 * @vsi: ptr to VSI 7484 */ 7485 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 7486 { 7487 struct device *dev = ice_pf_to_dev(pf); 7488 bool added = false; 7489 struct ice_hw *hw; 7490 int flow; 7491 7492 if (!(vsi->num_gfltr || vsi->num_bfltr)) 7493 return -EINVAL; 7494 7495 hw = &pf->hw; 7496 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 7497 struct ice_fd_hw_prof *prof; 7498 int tun, status; 7499 u64 entry_h; 7500 7501 if (!(hw->fdir_prof && hw->fdir_prof[flow] && 7502 hw->fdir_prof[flow]->cnt)) 7503 continue; 7504 7505 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 7506 enum ice_flow_priority prio; 7507 u64 prof_id; 7508 7509 /* add this VSI to FDir profile for this flow */ 7510 prio = ICE_FLOW_PRIO_NORMAL; 7511 prof = hw->fdir_prof[flow]; 7512 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 7513 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, 7514 prof->vsi_h[0], vsi->idx, 7515 prio, prof->fdir_seg[tun], 7516 &entry_h); 7517 if (status) { 7518 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 7519 vsi->idx, flow); 7520 continue; 7521 } 7522 7523 prof->entry_h[prof->cnt][tun] = entry_h; 7524 } 7525 7526 /* store VSI for filter replay and delete */ 7527 prof->vsi_h[prof->cnt] = vsi->idx; 7528 prof->cnt++; 7529 7530 added = true; 7531 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 7532 flow); 7533 } 7534 7535 if (!added) 7536 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 7537 7538 return 0; 7539 } 7540 7541 /** 7542 * ice_add_channel - add a channel by adding VSI 7543 * @pf: ptr to PF device 7544 * @sw_id: underlying HW switching element ID 7545 * @ch: ptr to channel structure 7546 * 7547 * Add a channel (VSI) using add_vsi and queue_map 7548 */ 7549 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 7550 { 7551 struct device *dev = ice_pf_to_dev(pf); 7552 struct ice_vsi *vsi; 7553 7554 if (ch->type != ICE_VSI_CHNL) { 7555 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 7556 return -EINVAL; 7557 } 7558 7559 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 7560 if (!vsi || vsi->type != ICE_VSI_CHNL) { 7561 dev_err(dev, "create chnl VSI failure\n"); 7562 return -EINVAL; 7563 } 7564 7565 ice_add_vsi_to_fdir(pf, vsi); 7566 7567 ch->sw_id = sw_id; 7568 ch->vsi_num = vsi->vsi_num; 7569 ch->info.mapping_flags = vsi->info.mapping_flags; 7570 ch->ch_vsi = vsi; 7571 /* set the back pointer of channel for newly created VSI */ 7572 vsi->ch = ch; 7573 7574 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 7575 sizeof(vsi->info.q_mapping)); 7576 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 7577 sizeof(vsi->info.tc_mapping)); 7578 7579 return 0; 7580 } 7581 7582 /** 7583 * ice_chnl_cfg_res 7584 * @vsi: the VSI being setup 7585 * @ch: ptr to channel structure 7586 * 7587 * Configure channel specific resources such as rings, vector. 7588 */ 7589 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 7590 { 7591 int i; 7592 7593 for (i = 0; i < ch->num_txq; i++) { 7594 struct ice_q_vector *tx_q_vector, *rx_q_vector; 7595 struct ice_ring_container *rc; 7596 struct ice_tx_ring *tx_ring; 7597 struct ice_rx_ring *rx_ring; 7598 7599 tx_ring = vsi->tx_rings[ch->base_q + i]; 7600 rx_ring = vsi->rx_rings[ch->base_q + i]; 7601 if (!tx_ring || !rx_ring) 7602 continue; 7603 7604 /* setup ring being channel enabled */ 7605 tx_ring->ch = ch; 7606 rx_ring->ch = ch; 7607 7608 /* following code block sets up vector specific attributes */ 7609 tx_q_vector = tx_ring->q_vector; 7610 rx_q_vector = rx_ring->q_vector; 7611 if (!tx_q_vector && !rx_q_vector) 7612 continue; 7613 7614 if (tx_q_vector) { 7615 tx_q_vector->ch = ch; 7616 /* setup Tx and Rx ITR setting if DIM is off */ 7617 rc = &tx_q_vector->tx; 7618 if (!ITR_IS_DYNAMIC(rc)) 7619 ice_write_itr(rc, rc->itr_setting); 7620 } 7621 if (rx_q_vector) { 7622 rx_q_vector->ch = ch; 7623 /* setup Tx and Rx ITR setting if DIM is off */ 7624 rc = &rx_q_vector->rx; 7625 if (!ITR_IS_DYNAMIC(rc)) 7626 ice_write_itr(rc, rc->itr_setting); 7627 } 7628 } 7629 7630 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 7631 * GLINT_ITR register would have written to perform in-context 7632 * update, hence perform flush 7633 */ 7634 if (ch->num_txq || ch->num_rxq) 7635 ice_flush(&vsi->back->hw); 7636 } 7637 7638 /** 7639 * ice_cfg_chnl_all_res - configure channel resources 7640 * @vsi: pte to main_vsi 7641 * @ch: ptr to channel structure 7642 * 7643 * This function configures channel specific resources such as flow-director 7644 * counter index, and other resources such as queues, vectors, ITR settings 7645 */ 7646 static void 7647 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 7648 { 7649 /* configure channel (aka ADQ) resources such as queues, vectors, 7650 * ITR settings for channel specific vectors and anything else 7651 */ 7652 ice_chnl_cfg_res(vsi, ch); 7653 } 7654 7655 /** 7656 * ice_setup_hw_channel - setup new channel 7657 * @pf: ptr to PF device 7658 * @vsi: the VSI being setup 7659 * @ch: ptr to channel structure 7660 * @sw_id: underlying HW switching element ID 7661 * @type: type of channel to be created (VMDq2/VF) 7662 * 7663 * Setup new channel (VSI) based on specified type (VMDq2/VF) 7664 * and configures Tx rings accordingly 7665 */ 7666 static int 7667 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 7668 struct ice_channel *ch, u16 sw_id, u8 type) 7669 { 7670 struct device *dev = ice_pf_to_dev(pf); 7671 int ret; 7672 7673 ch->base_q = vsi->next_base_q; 7674 ch->type = type; 7675 7676 ret = ice_add_channel(pf, sw_id, ch); 7677 if (ret) { 7678 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 7679 return ret; 7680 } 7681 7682 /* configure/setup ADQ specific resources */ 7683 ice_cfg_chnl_all_res(vsi, ch); 7684 7685 /* make sure to update the next_base_q so that subsequent channel's 7686 * (aka ADQ) VSI queue map is correct 7687 */ 7688 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 7689 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 7690 ch->num_rxq); 7691 7692 return 0; 7693 } 7694 7695 /** 7696 * ice_setup_channel - setup new channel using uplink element 7697 * @pf: ptr to PF device 7698 * @vsi: the VSI being setup 7699 * @ch: ptr to channel structure 7700 * 7701 * Setup new channel (VSI) based on specified type (VMDq2/VF) 7702 * and uplink switching element 7703 */ 7704 static bool 7705 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 7706 struct ice_channel *ch) 7707 { 7708 struct device *dev = ice_pf_to_dev(pf); 7709 u16 sw_id; 7710 int ret; 7711 7712 if (vsi->type != ICE_VSI_PF) { 7713 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 7714 return false; 7715 } 7716 7717 sw_id = pf->first_sw->sw_id; 7718 7719 /* create channel (VSI) */ 7720 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 7721 if (ret) { 7722 dev_err(dev, "failed to setup hw_channel\n"); 7723 return false; 7724 } 7725 dev_dbg(dev, "successfully created channel()\n"); 7726 7727 return ch->ch_vsi ? true : false; 7728 } 7729 7730 /** 7731 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 7732 * @vsi: VSI to be configured 7733 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 7734 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 7735 */ 7736 static int 7737 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 7738 { 7739 int err; 7740 7741 err = ice_set_min_bw_limit(vsi, min_tx_rate); 7742 if (err) 7743 return err; 7744 7745 return ice_set_max_bw_limit(vsi, max_tx_rate); 7746 } 7747 7748 /** 7749 * ice_create_q_channel - function to create channel 7750 * @vsi: VSI to be configured 7751 * @ch: ptr to channel (it contains channel specific params) 7752 * 7753 * This function creates channel (VSI) using num_queues specified by user, 7754 * reconfigs RSS if needed. 7755 */ 7756 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 7757 { 7758 struct ice_pf *pf = vsi->back; 7759 struct device *dev; 7760 7761 if (!ch) 7762 return -EINVAL; 7763 7764 dev = ice_pf_to_dev(pf); 7765 if (!ch->num_txq || !ch->num_rxq) { 7766 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 7767 return -EINVAL; 7768 } 7769 7770 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 7771 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 7772 vsi->cnt_q_avail, ch->num_txq); 7773 return -EINVAL; 7774 } 7775 7776 if (!ice_setup_channel(pf, vsi, ch)) { 7777 dev_info(dev, "Failed to setup channel\n"); 7778 return -EINVAL; 7779 } 7780 /* configure BW rate limit */ 7781 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 7782 int ret; 7783 7784 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 7785 ch->min_tx_rate); 7786 if (ret) 7787 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 7788 ch->max_tx_rate, ch->ch_vsi->vsi_num); 7789 else 7790 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 7791 ch->max_tx_rate, ch->ch_vsi->vsi_num); 7792 } 7793 7794 vsi->cnt_q_avail -= ch->num_txq; 7795 7796 return 0; 7797 } 7798 7799 /** 7800 * ice_rem_all_chnl_fltrs - removes all channel filters 7801 * @pf: ptr to PF, TC-flower based filter are tracked at PF level 7802 * 7803 * Remove all advanced switch filters only if they are channel specific 7804 * tc-flower based filter 7805 */ 7806 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 7807 { 7808 struct ice_tc_flower_fltr *fltr; 7809 struct hlist_node *node; 7810 7811 /* to remove all channel filters, iterate an ordered list of filters */ 7812 hlist_for_each_entry_safe(fltr, node, 7813 &pf->tc_flower_fltr_list, 7814 tc_flower_node) { 7815 struct ice_rule_query_data rule; 7816 int status; 7817 7818 /* for now process only channel specific filters */ 7819 if (!ice_is_chnl_fltr(fltr)) 7820 continue; 7821 7822 rule.rid = fltr->rid; 7823 rule.rule_id = fltr->rule_id; 7824 rule.vsi_handle = fltr->dest_id; 7825 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 7826 if (status) { 7827 if (status == -ENOENT) 7828 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 7829 rule.rule_id); 7830 else 7831 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 7832 status); 7833 } else if (fltr->dest_vsi) { 7834 /* update advanced switch filter count */ 7835 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 7836 u32 flags = fltr->flags; 7837 7838 fltr->dest_vsi->num_chnl_fltr--; 7839 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 7840 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 7841 pf->num_dmac_chnl_fltrs--; 7842 } 7843 } 7844 7845 hlist_del(&fltr->tc_flower_node); 7846 kfree(fltr); 7847 } 7848 } 7849 7850 /** 7851 * ice_remove_q_channels - Remove queue channels for the TCs 7852 * @vsi: VSI to be configured 7853 * @rem_fltr: delete advanced switch filter or not 7854 * 7855 * Remove queue channels for the TCs 7856 */ 7857 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 7858 { 7859 struct ice_channel *ch, *ch_tmp; 7860 struct ice_pf *pf = vsi->back; 7861 int i; 7862 7863 /* remove all tc-flower based filter if they are channel filters only */ 7864 if (rem_fltr) 7865 ice_rem_all_chnl_fltrs(pf); 7866 7867 /* remove ntuple filters since queue configuration is being changed */ 7868 if (vsi->netdev->features & NETIF_F_NTUPLE) { 7869 struct ice_hw *hw = &pf->hw; 7870 7871 mutex_lock(&hw->fdir_fltr_lock); 7872 ice_fdir_del_all_fltrs(vsi); 7873 mutex_unlock(&hw->fdir_fltr_lock); 7874 } 7875 7876 /* perform cleanup for channels if they exist */ 7877 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 7878 struct ice_vsi *ch_vsi; 7879 7880 list_del(&ch->list); 7881 ch_vsi = ch->ch_vsi; 7882 if (!ch_vsi) { 7883 kfree(ch); 7884 continue; 7885 } 7886 7887 /* Reset queue contexts */ 7888 for (i = 0; i < ch->num_rxq; i++) { 7889 struct ice_tx_ring *tx_ring; 7890 struct ice_rx_ring *rx_ring; 7891 7892 tx_ring = vsi->tx_rings[ch->base_q + i]; 7893 rx_ring = vsi->rx_rings[ch->base_q + i]; 7894 if (tx_ring) { 7895 tx_ring->ch = NULL; 7896 if (tx_ring->q_vector) 7897 tx_ring->q_vector->ch = NULL; 7898 } 7899 if (rx_ring) { 7900 rx_ring->ch = NULL; 7901 if (rx_ring->q_vector) 7902 rx_ring->q_vector->ch = NULL; 7903 } 7904 } 7905 7906 /* Release FD resources for the channel VSI */ 7907 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 7908 7909 /* clear the VSI from scheduler tree */ 7910 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 7911 7912 /* Delete VSI from FW */ 7913 ice_vsi_delete(ch->ch_vsi); 7914 7915 /* Delete VSI from PF and HW VSI arrays */ 7916 ice_vsi_clear(ch->ch_vsi); 7917 7918 /* free the channel */ 7919 kfree(ch); 7920 } 7921 7922 /* clear the channel VSI map which is stored in main VSI */ 7923 ice_for_each_chnl_tc(i) 7924 vsi->tc_map_vsi[i] = NULL; 7925 7926 /* reset main VSI's all TC information */ 7927 vsi->all_enatc = 0; 7928 vsi->all_numtc = 0; 7929 } 7930 7931 /** 7932 * ice_rebuild_channels - rebuild channel 7933 * @pf: ptr to PF 7934 * 7935 * Recreate channel VSIs and replay filters 7936 */ 7937 static int ice_rebuild_channels(struct ice_pf *pf) 7938 { 7939 struct device *dev = ice_pf_to_dev(pf); 7940 struct ice_vsi *main_vsi; 7941 bool rem_adv_fltr = true; 7942 struct ice_channel *ch; 7943 struct ice_vsi *vsi; 7944 int tc_idx = 1; 7945 int i, err; 7946 7947 main_vsi = ice_get_main_vsi(pf); 7948 if (!main_vsi) 7949 return 0; 7950 7951 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 7952 main_vsi->old_numtc == 1) 7953 return 0; /* nothing to be done */ 7954 7955 /* reconfigure main VSI based on old value of TC and cached values 7956 * for MQPRIO opts 7957 */ 7958 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 7959 if (err) { 7960 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 7961 main_vsi->old_ena_tc, main_vsi->vsi_num); 7962 return err; 7963 } 7964 7965 /* rebuild ADQ VSIs */ 7966 ice_for_each_vsi(pf, i) { 7967 enum ice_vsi_type type; 7968 7969 vsi = pf->vsi[i]; 7970 if (!vsi || vsi->type != ICE_VSI_CHNL) 7971 continue; 7972 7973 type = vsi->type; 7974 7975 /* rebuild ADQ VSI */ 7976 err = ice_vsi_rebuild(vsi, true); 7977 if (err) { 7978 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 7979 ice_vsi_type_str(type), vsi->idx, err); 7980 goto cleanup; 7981 } 7982 7983 /* Re-map HW VSI number, using VSI handle that has been 7984 * previously validated in ice_replay_vsi() call above 7985 */ 7986 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 7987 7988 /* replay filters for the VSI */ 7989 err = ice_replay_vsi(&pf->hw, vsi->idx); 7990 if (err) { 7991 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 7992 ice_vsi_type_str(type), err, vsi->idx); 7993 rem_adv_fltr = false; 7994 goto cleanup; 7995 } 7996 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 7997 ice_vsi_type_str(type), vsi->idx); 7998 7999 /* store ADQ VSI at correct TC index in main VSI's 8000 * map of TC to VSI 8001 */ 8002 main_vsi->tc_map_vsi[tc_idx++] = vsi; 8003 } 8004 8005 /* ADQ VSI(s) has been rebuilt successfully, so setup 8006 * channel for main VSI's Tx and Rx rings 8007 */ 8008 list_for_each_entry(ch, &main_vsi->ch_list, list) { 8009 struct ice_vsi *ch_vsi; 8010 8011 ch_vsi = ch->ch_vsi; 8012 if (!ch_vsi) 8013 continue; 8014 8015 /* reconfig channel resources */ 8016 ice_cfg_chnl_all_res(main_vsi, ch); 8017 8018 /* replay BW rate limit if it is non-zero */ 8019 if (!ch->max_tx_rate && !ch->min_tx_rate) 8020 continue; 8021 8022 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 8023 ch->min_tx_rate); 8024 if (err) 8025 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8026 err, ch->max_tx_rate, ch->min_tx_rate, 8027 ch_vsi->vsi_num); 8028 else 8029 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8030 ch->max_tx_rate, ch->min_tx_rate, 8031 ch_vsi->vsi_num); 8032 } 8033 8034 /* reconfig RSS for main VSI */ 8035 if (main_vsi->ch_rss_size) 8036 ice_vsi_cfg_rss_lut_key(main_vsi); 8037 8038 return 0; 8039 8040 cleanup: 8041 ice_remove_q_channels(main_vsi, rem_adv_fltr); 8042 return err; 8043 } 8044 8045 /** 8046 * ice_create_q_channels - Add queue channel for the given TCs 8047 * @vsi: VSI to be configured 8048 * 8049 * Configures queue channel mapping to the given TCs 8050 */ 8051 static int ice_create_q_channels(struct ice_vsi *vsi) 8052 { 8053 struct ice_pf *pf = vsi->back; 8054 struct ice_channel *ch; 8055 int ret = 0, i; 8056 8057 ice_for_each_chnl_tc(i) { 8058 if (!(vsi->all_enatc & BIT(i))) 8059 continue; 8060 8061 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 8062 if (!ch) { 8063 ret = -ENOMEM; 8064 goto err_free; 8065 } 8066 INIT_LIST_HEAD(&ch->list); 8067 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 8068 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 8069 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 8070 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 8071 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 8072 8073 /* convert to Kbits/s */ 8074 if (ch->max_tx_rate) 8075 ch->max_tx_rate = div_u64(ch->max_tx_rate, 8076 ICE_BW_KBPS_DIVISOR); 8077 if (ch->min_tx_rate) 8078 ch->min_tx_rate = div_u64(ch->min_tx_rate, 8079 ICE_BW_KBPS_DIVISOR); 8080 8081 ret = ice_create_q_channel(vsi, ch); 8082 if (ret) { 8083 dev_err(ice_pf_to_dev(pf), 8084 "failed creating channel TC:%d\n", i); 8085 kfree(ch); 8086 goto err_free; 8087 } 8088 list_add_tail(&ch->list, &vsi->ch_list); 8089 vsi->tc_map_vsi[i] = ch->ch_vsi; 8090 dev_dbg(ice_pf_to_dev(pf), 8091 "successfully created channel: VSI %pK\n", ch->ch_vsi); 8092 } 8093 return 0; 8094 8095 err_free: 8096 ice_remove_q_channels(vsi, false); 8097 8098 return ret; 8099 } 8100 8101 /** 8102 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 8103 * @netdev: net device to configure 8104 * @type_data: TC offload data 8105 */ 8106 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 8107 { 8108 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 8109 struct ice_netdev_priv *np = netdev_priv(netdev); 8110 struct ice_vsi *vsi = np->vsi; 8111 struct ice_pf *pf = vsi->back; 8112 u16 mode, ena_tc_qdisc = 0; 8113 int cur_txq, cur_rxq; 8114 u8 hw = 0, num_tcf; 8115 struct device *dev; 8116 int ret, i; 8117 8118 dev = ice_pf_to_dev(pf); 8119 num_tcf = mqprio_qopt->qopt.num_tc; 8120 hw = mqprio_qopt->qopt.hw; 8121 mode = mqprio_qopt->mode; 8122 if (!hw) { 8123 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8124 vsi->ch_rss_size = 0; 8125 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8126 goto config_tcf; 8127 } 8128 8129 /* Generate queue region map for number of TCF requested */ 8130 for (i = 0; i < num_tcf; i++) 8131 ena_tc_qdisc |= BIT(i); 8132 8133 switch (mode) { 8134 case TC_MQPRIO_MODE_CHANNEL: 8135 8136 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 8137 if (ret) { 8138 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 8139 ret); 8140 return ret; 8141 } 8142 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8143 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8144 /* don't assume state of hw_tc_offload during driver load 8145 * and set the flag for TC flower filter if hw_tc_offload 8146 * already ON 8147 */ 8148 if (vsi->netdev->features & NETIF_F_HW_TC) 8149 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 8150 break; 8151 default: 8152 return -EINVAL; 8153 } 8154 8155 config_tcf: 8156 8157 /* Requesting same TCF configuration as already enabled */ 8158 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 8159 mode != TC_MQPRIO_MODE_CHANNEL) 8160 return 0; 8161 8162 /* Pause VSI queues */ 8163 ice_dis_vsi(vsi, true); 8164 8165 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 8166 ice_remove_q_channels(vsi, true); 8167 8168 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8169 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 8170 num_online_cpus()); 8171 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 8172 num_online_cpus()); 8173 } else { 8174 /* logic to rebuild VSI, same like ethtool -L */ 8175 u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 8176 8177 for (i = 0; i < num_tcf; i++) { 8178 if (!(ena_tc_qdisc & BIT(i))) 8179 continue; 8180 8181 offset = vsi->mqprio_qopt.qopt.offset[i]; 8182 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 8183 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 8184 } 8185 vsi->req_txq = offset + qcount_tx; 8186 vsi->req_rxq = offset + qcount_rx; 8187 8188 /* store away original rss_size info, so that it gets reused 8189 * form ice_vsi_rebuild during tc-qdisc delete stage - to 8190 * determine, what should be the rss_sizefor main VSI 8191 */ 8192 vsi->orig_rss_size = vsi->rss_size; 8193 } 8194 8195 /* save current values of Tx and Rx queues before calling VSI rebuild 8196 * for fallback option 8197 */ 8198 cur_txq = vsi->num_txq; 8199 cur_rxq = vsi->num_rxq; 8200 8201 /* proceed with rebuild main VSI using correct number of queues */ 8202 ret = ice_vsi_rebuild(vsi, false); 8203 if (ret) { 8204 /* fallback to current number of queues */ 8205 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 8206 vsi->req_txq = cur_txq; 8207 vsi->req_rxq = cur_rxq; 8208 clear_bit(ICE_RESET_FAILED, pf->state); 8209 if (ice_vsi_rebuild(vsi, false)) { 8210 dev_err(dev, "Rebuild of main VSI failed again\n"); 8211 return ret; 8212 } 8213 } 8214 8215 vsi->all_numtc = num_tcf; 8216 vsi->all_enatc = ena_tc_qdisc; 8217 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 8218 if (ret) { 8219 netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 8220 vsi->vsi_num); 8221 goto exit; 8222 } 8223 8224 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8225 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 8226 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 8227 8228 /* set TC0 rate limit if specified */ 8229 if (max_tx_rate || min_tx_rate) { 8230 /* convert to Kbits/s */ 8231 if (max_tx_rate) 8232 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 8233 if (min_tx_rate) 8234 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 8235 8236 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 8237 if (!ret) { 8238 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 8239 max_tx_rate, min_tx_rate, vsi->vsi_num); 8240 } else { 8241 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 8242 max_tx_rate, min_tx_rate, vsi->vsi_num); 8243 goto exit; 8244 } 8245 } 8246 ret = ice_create_q_channels(vsi); 8247 if (ret) { 8248 netdev_err(netdev, "failed configuring queue channels\n"); 8249 goto exit; 8250 } else { 8251 netdev_dbg(netdev, "successfully configured channels\n"); 8252 } 8253 } 8254 8255 if (vsi->ch_rss_size) 8256 ice_vsi_cfg_rss_lut_key(vsi); 8257 8258 exit: 8259 /* if error, reset the all_numtc and all_enatc */ 8260 if (ret) { 8261 vsi->all_numtc = 0; 8262 vsi->all_enatc = 0; 8263 } 8264 /* resume VSI */ 8265 ice_ena_vsi(vsi, true); 8266 8267 return ret; 8268 } 8269 8270 static LIST_HEAD(ice_block_cb_list); 8271 8272 static int 8273 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 8274 void *type_data) 8275 { 8276 struct ice_netdev_priv *np = netdev_priv(netdev); 8277 struct ice_pf *pf = np->vsi->back; 8278 int err; 8279 8280 switch (type) { 8281 case TC_SETUP_BLOCK: 8282 return flow_block_cb_setup_simple(type_data, 8283 &ice_block_cb_list, 8284 ice_setup_tc_block_cb, 8285 np, np, true); 8286 case TC_SETUP_QDISC_MQPRIO: 8287 /* setup traffic classifier for receive side */ 8288 mutex_lock(&pf->tc_mutex); 8289 err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 8290 mutex_unlock(&pf->tc_mutex); 8291 return err; 8292 default: 8293 return -EOPNOTSUPP; 8294 } 8295 return -EOPNOTSUPP; 8296 } 8297 8298 static struct ice_indr_block_priv * 8299 ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 8300 struct net_device *netdev) 8301 { 8302 struct ice_indr_block_priv *cb_priv; 8303 8304 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 8305 if (!cb_priv->netdev) 8306 return NULL; 8307 if (cb_priv->netdev == netdev) 8308 return cb_priv; 8309 } 8310 return NULL; 8311 } 8312 8313 static int 8314 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 8315 void *indr_priv) 8316 { 8317 struct ice_indr_block_priv *priv = indr_priv; 8318 struct ice_netdev_priv *np = priv->np; 8319 8320 switch (type) { 8321 case TC_SETUP_CLSFLOWER: 8322 return ice_setup_tc_cls_flower(np, priv->netdev, 8323 (struct flow_cls_offload *) 8324 type_data); 8325 default: 8326 return -EOPNOTSUPP; 8327 } 8328 } 8329 8330 static int 8331 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 8332 struct ice_netdev_priv *np, 8333 struct flow_block_offload *f, void *data, 8334 void (*cleanup)(struct flow_block_cb *block_cb)) 8335 { 8336 struct ice_indr_block_priv *indr_priv; 8337 struct flow_block_cb *block_cb; 8338 8339 if (!ice_is_tunnel_supported(netdev) && 8340 !(is_vlan_dev(netdev) && 8341 vlan_dev_real_dev(netdev) == np->vsi->netdev)) 8342 return -EOPNOTSUPP; 8343 8344 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 8345 return -EOPNOTSUPP; 8346 8347 switch (f->command) { 8348 case FLOW_BLOCK_BIND: 8349 indr_priv = ice_indr_block_priv_lookup(np, netdev); 8350 if (indr_priv) 8351 return -EEXIST; 8352 8353 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 8354 if (!indr_priv) 8355 return -ENOMEM; 8356 8357 indr_priv->netdev = netdev; 8358 indr_priv->np = np; 8359 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 8360 8361 block_cb = 8362 flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 8363 indr_priv, indr_priv, 8364 ice_rep_indr_tc_block_unbind, 8365 f, netdev, sch, data, np, 8366 cleanup); 8367 8368 if (IS_ERR(block_cb)) { 8369 list_del(&indr_priv->list); 8370 kfree(indr_priv); 8371 return PTR_ERR(block_cb); 8372 } 8373 flow_block_cb_add(block_cb, f); 8374 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 8375 break; 8376 case FLOW_BLOCK_UNBIND: 8377 indr_priv = ice_indr_block_priv_lookup(np, netdev); 8378 if (!indr_priv) 8379 return -ENOENT; 8380 8381 block_cb = flow_block_cb_lookup(f->block, 8382 ice_indr_setup_block_cb, 8383 indr_priv); 8384 if (!block_cb) 8385 return -ENOENT; 8386 8387 flow_indr_block_cb_remove(block_cb, f); 8388 8389 list_del(&block_cb->driver_list); 8390 break; 8391 default: 8392 return -EOPNOTSUPP; 8393 } 8394 return 0; 8395 } 8396 8397 static int 8398 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 8399 void *cb_priv, enum tc_setup_type type, void *type_data, 8400 void *data, 8401 void (*cleanup)(struct flow_block_cb *block_cb)) 8402 { 8403 switch (type) { 8404 case TC_SETUP_BLOCK: 8405 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 8406 data, cleanup); 8407 8408 default: 8409 return -EOPNOTSUPP; 8410 } 8411 } 8412 8413 /** 8414 * ice_open - Called when a network interface becomes active 8415 * @netdev: network interface device structure 8416 * 8417 * The open entry point is called when a network interface is made 8418 * active by the system (IFF_UP). At this point all resources needed 8419 * for transmit and receive operations are allocated, the interrupt 8420 * handler is registered with the OS, the netdev watchdog is enabled, 8421 * and the stack is notified that the interface is ready. 8422 * 8423 * Returns 0 on success, negative value on failure 8424 */ 8425 int ice_open(struct net_device *netdev) 8426 { 8427 struct ice_netdev_priv *np = netdev_priv(netdev); 8428 struct ice_pf *pf = np->vsi->back; 8429 8430 if (ice_is_reset_in_progress(pf->state)) { 8431 netdev_err(netdev, "can't open net device while reset is in progress"); 8432 return -EBUSY; 8433 } 8434 8435 return ice_open_internal(netdev); 8436 } 8437 8438 /** 8439 * ice_open_internal - Called when a network interface becomes active 8440 * @netdev: network interface device structure 8441 * 8442 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 8443 * handling routine 8444 * 8445 * Returns 0 on success, negative value on failure 8446 */ 8447 int ice_open_internal(struct net_device *netdev) 8448 { 8449 struct ice_netdev_priv *np = netdev_priv(netdev); 8450 struct ice_vsi *vsi = np->vsi; 8451 struct ice_pf *pf = vsi->back; 8452 struct ice_port_info *pi; 8453 int err; 8454 8455 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 8456 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 8457 return -EIO; 8458 } 8459 8460 netif_carrier_off(netdev); 8461 8462 pi = vsi->port_info; 8463 err = ice_update_link_info(pi); 8464 if (err) { 8465 netdev_err(netdev, "Failed to get link info, error %d\n", err); 8466 return err; 8467 } 8468 8469 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 8470 8471 /* Set PHY if there is media, otherwise, turn off PHY */ 8472 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 8473 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8474 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 8475 err = ice_init_phy_user_cfg(pi); 8476 if (err) { 8477 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 8478 err); 8479 return err; 8480 } 8481 } 8482 8483 err = ice_configure_phy(vsi); 8484 if (err) { 8485 netdev_err(netdev, "Failed to set physical link up, error %d\n", 8486 err); 8487 return err; 8488 } 8489 } else { 8490 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8491 ice_set_link(vsi, false); 8492 } 8493 8494 err = ice_vsi_open(vsi); 8495 if (err) 8496 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 8497 vsi->vsi_num, vsi->vsw->sw_id); 8498 8499 /* Update existing tunnels information */ 8500 udp_tunnel_get_rx_info(netdev); 8501 8502 return err; 8503 } 8504 8505 /** 8506 * ice_stop - Disables a network interface 8507 * @netdev: network interface device structure 8508 * 8509 * The stop entry point is called when an interface is de-activated by the OS, 8510 * and the netdevice enters the DOWN state. The hardware is still under the 8511 * driver's control, but the netdev interface is disabled. 8512 * 8513 * Returns success only - not allowed to fail 8514 */ 8515 int ice_stop(struct net_device *netdev) 8516 { 8517 struct ice_netdev_priv *np = netdev_priv(netdev); 8518 struct ice_vsi *vsi = np->vsi; 8519 struct ice_pf *pf = vsi->back; 8520 8521 if (ice_is_reset_in_progress(pf->state)) { 8522 netdev_err(netdev, "can't stop net device while reset is in progress"); 8523 return -EBUSY; 8524 } 8525 8526 ice_vsi_close(vsi); 8527 8528 return 0; 8529 } 8530 8531 /** 8532 * ice_features_check - Validate encapsulated packet conforms to limits 8533 * @skb: skb buffer 8534 * @netdev: This port's netdev 8535 * @features: Offload features that the stack believes apply 8536 */ 8537 static netdev_features_t 8538 ice_features_check(struct sk_buff *skb, 8539 struct net_device __always_unused *netdev, 8540 netdev_features_t features) 8541 { 8542 bool gso = skb_is_gso(skb); 8543 size_t len; 8544 8545 /* No point in doing any of this if neither checksum nor GSO are 8546 * being requested for this frame. We can rule out both by just 8547 * checking for CHECKSUM_PARTIAL 8548 */ 8549 if (skb->ip_summed != CHECKSUM_PARTIAL) 8550 return features; 8551 8552 /* We cannot support GSO if the MSS is going to be less than 8553 * 64 bytes. If it is then we need to drop support for GSO. 8554 */ 8555 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 8556 features &= ~NETIF_F_GSO_MASK; 8557 8558 len = skb_network_offset(skb); 8559 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 8560 goto out_rm_features; 8561 8562 len = skb_network_header_len(skb); 8563 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 8564 goto out_rm_features; 8565 8566 if (skb->encapsulation) { 8567 /* this must work for VXLAN frames AND IPIP/SIT frames, and in 8568 * the case of IPIP frames, the transport header pointer is 8569 * after the inner header! So check to make sure that this 8570 * is a GRE or UDP_TUNNEL frame before doing that math. 8571 */ 8572 if (gso && (skb_shinfo(skb)->gso_type & 8573 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 8574 len = skb_inner_network_header(skb) - 8575 skb_transport_header(skb); 8576 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 8577 goto out_rm_features; 8578 } 8579 8580 len = skb_inner_network_header_len(skb); 8581 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 8582 goto out_rm_features; 8583 } 8584 8585 return features; 8586 out_rm_features: 8587 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 8588 } 8589 8590 static const struct net_device_ops ice_netdev_safe_mode_ops = { 8591 .ndo_open = ice_open, 8592 .ndo_stop = ice_stop, 8593 .ndo_start_xmit = ice_start_xmit, 8594 .ndo_set_mac_address = ice_set_mac_address, 8595 .ndo_validate_addr = eth_validate_addr, 8596 .ndo_change_mtu = ice_change_mtu, 8597 .ndo_get_stats64 = ice_get_stats64, 8598 .ndo_tx_timeout = ice_tx_timeout, 8599 .ndo_bpf = ice_xdp_safe_mode, 8600 }; 8601 8602 static const struct net_device_ops ice_netdev_ops = { 8603 .ndo_open = ice_open, 8604 .ndo_stop = ice_stop, 8605 .ndo_start_xmit = ice_start_xmit, 8606 .ndo_select_queue = ice_select_queue, 8607 .ndo_features_check = ice_features_check, 8608 .ndo_set_rx_mode = ice_set_rx_mode, 8609 .ndo_set_mac_address = ice_set_mac_address, 8610 .ndo_validate_addr = eth_validate_addr, 8611 .ndo_change_mtu = ice_change_mtu, 8612 .ndo_get_stats64 = ice_get_stats64, 8613 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 8614 .ndo_eth_ioctl = ice_eth_ioctl, 8615 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 8616 .ndo_set_vf_mac = ice_set_vf_mac, 8617 .ndo_get_vf_config = ice_get_vf_cfg, 8618 .ndo_set_vf_trust = ice_set_vf_trust, 8619 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 8620 .ndo_set_vf_link_state = ice_set_vf_link_state, 8621 .ndo_get_vf_stats = ice_get_vf_stats, 8622 .ndo_set_vf_rate = ice_set_vf_bw, 8623 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 8624 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 8625 .ndo_setup_tc = ice_setup_tc, 8626 .ndo_set_features = ice_set_features, 8627 .ndo_bridge_getlink = ice_bridge_getlink, 8628 .ndo_bridge_setlink = ice_bridge_setlink, 8629 .ndo_fdb_add = ice_fdb_add, 8630 .ndo_fdb_del = ice_fdb_del, 8631 #ifdef CONFIG_RFS_ACCEL 8632 .ndo_rx_flow_steer = ice_rx_flow_steer, 8633 #endif 8634 .ndo_tx_timeout = ice_tx_timeout, 8635 .ndo_bpf = ice_xdp, 8636 .ndo_xdp_xmit = ice_xdp_xmit, 8637 .ndo_xsk_wakeup = ice_xsk_wakeup, 8638 }; 8639