1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include "ice.h" 10 #include "ice_base.h" 11 #include "ice_lib.h" 12 #include "ice_fltr.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_dcb_nl.h" 15 #include "ice_devlink.h" 16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 17 * ice tracepoint functions. This must be done exactly once across the 18 * ice driver. 19 */ 20 #define CREATE_TRACE_POINTS 21 #include "ice_trace.h" 22 #include "ice_eswitch.h" 23 #include "ice_tc_lib.h" 24 #include "ice_vsi_vlan_ops.h" 25 26 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 27 static const char ice_driver_string[] = DRV_SUMMARY; 28 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 29 30 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 31 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 32 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 33 34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 35 MODULE_DESCRIPTION(DRV_SUMMARY); 36 MODULE_LICENSE("GPL v2"); 37 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 38 39 static int debug = -1; 40 module_param(debug, int, 0644); 41 #ifndef CONFIG_DYNAMIC_DEBUG 42 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 43 #else 44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 45 #endif /* !CONFIG_DYNAMIC_DEBUG */ 46 47 static DEFINE_IDA(ice_aux_ida); 48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 49 EXPORT_SYMBOL(ice_xdp_locking_key); 50 51 static struct workqueue_struct *ice_wq; 52 static const struct net_device_ops ice_netdev_safe_mode_ops; 53 static const struct net_device_ops ice_netdev_ops; 54 55 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 56 57 static void ice_vsi_release_all(struct ice_pf *pf); 58 59 static int ice_rebuild_channels(struct ice_pf *pf); 60 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 61 62 static int 63 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 64 void *cb_priv, enum tc_setup_type type, void *type_data, 65 void *data, 66 void (*cleanup)(struct flow_block_cb *block_cb)); 67 68 bool netif_is_ice(struct net_device *dev) 69 { 70 return dev && (dev->netdev_ops == &ice_netdev_ops); 71 } 72 73 /** 74 * ice_get_tx_pending - returns number of Tx descriptors not processed 75 * @ring: the ring of descriptors 76 */ 77 static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 78 { 79 u16 head, tail; 80 81 head = ring->next_to_clean; 82 tail = ring->next_to_use; 83 84 if (head != tail) 85 return (head < tail) ? 86 tail - head : (tail + ring->count - head); 87 return 0; 88 } 89 90 /** 91 * ice_check_for_hang_subtask - check for and recover hung queues 92 * @pf: pointer to PF struct 93 */ 94 static void ice_check_for_hang_subtask(struct ice_pf *pf) 95 { 96 struct ice_vsi *vsi = NULL; 97 struct ice_hw *hw; 98 unsigned int i; 99 int packets; 100 u32 v; 101 102 ice_for_each_vsi(pf, v) 103 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 104 vsi = pf->vsi[v]; 105 break; 106 } 107 108 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 109 return; 110 111 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 112 return; 113 114 hw = &vsi->back->hw; 115 116 ice_for_each_txq(vsi, i) { 117 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 118 119 if (!tx_ring) 120 continue; 121 if (ice_ring_ch_enabled(tx_ring)) 122 continue; 123 124 if (tx_ring->desc) { 125 /* If packet counter has not changed the queue is 126 * likely stalled, so force an interrupt for this 127 * queue. 128 * 129 * prev_pkt would be negative if there was no 130 * pending work. 131 */ 132 packets = tx_ring->stats.pkts & INT_MAX; 133 if (tx_ring->tx_stats.prev_pkt == packets) { 134 /* Trigger sw interrupt to revive the queue */ 135 ice_trigger_sw_intr(hw, tx_ring->q_vector); 136 continue; 137 } 138 139 /* Memory barrier between read of packet count and call 140 * to ice_get_tx_pending() 141 */ 142 smp_rmb(); 143 tx_ring->tx_stats.prev_pkt = 144 ice_get_tx_pending(tx_ring) ? packets : -1; 145 } 146 } 147 } 148 149 /** 150 * ice_init_mac_fltr - Set initial MAC filters 151 * @pf: board private structure 152 * 153 * Set initial set of MAC filters for PF VSI; configure filters for permanent 154 * address and broadcast address. If an error is encountered, netdevice will be 155 * unregistered. 156 */ 157 static int ice_init_mac_fltr(struct ice_pf *pf) 158 { 159 struct ice_vsi *vsi; 160 u8 *perm_addr; 161 162 vsi = ice_get_main_vsi(pf); 163 if (!vsi) 164 return -EINVAL; 165 166 perm_addr = vsi->port_info->mac.perm_addr; 167 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 168 } 169 170 /** 171 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 172 * @netdev: the net device on which the sync is happening 173 * @addr: MAC address to sync 174 * 175 * This is a callback function which is called by the in kernel device sync 176 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 177 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 178 * MAC filters from the hardware. 179 */ 180 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 181 { 182 struct ice_netdev_priv *np = netdev_priv(netdev); 183 struct ice_vsi *vsi = np->vsi; 184 185 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 186 ICE_FWD_TO_VSI)) 187 return -EINVAL; 188 189 return 0; 190 } 191 192 /** 193 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 194 * @netdev: the net device on which the unsync is happening 195 * @addr: MAC address to unsync 196 * 197 * This is a callback function which is called by the in kernel device unsync 198 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 199 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 200 * delete the MAC filters from the hardware. 201 */ 202 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 203 { 204 struct ice_netdev_priv *np = netdev_priv(netdev); 205 struct ice_vsi *vsi = np->vsi; 206 207 /* Under some circumstances, we might receive a request to delete our 208 * own device address from our uc list. Because we store the device 209 * address in the VSI's MAC filter list, we need to ignore such 210 * requests and not delete our device address from this list. 211 */ 212 if (ether_addr_equal(addr, netdev->dev_addr)) 213 return 0; 214 215 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 216 ICE_FWD_TO_VSI)) 217 return -EINVAL; 218 219 return 0; 220 } 221 222 /** 223 * ice_vsi_fltr_changed - check if filter state changed 224 * @vsi: VSI to be checked 225 * 226 * returns true if filter state has changed, false otherwise. 227 */ 228 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 229 { 230 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 231 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || 232 test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 233 } 234 235 /** 236 * ice_set_promisc - Enable promiscuous mode for a given PF 237 * @vsi: the VSI being configured 238 * @promisc_m: mask of promiscuous config bits 239 * 240 */ 241 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 242 { 243 int status; 244 245 if (vsi->type != ICE_VSI_PF) 246 return 0; 247 248 if (ice_vsi_has_non_zero_vlans(vsi)) 249 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); 250 else 251 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); 252 return status; 253 } 254 255 /** 256 * ice_clear_promisc - Disable promiscuous mode for a given PF 257 * @vsi: the VSI being configured 258 * @promisc_m: mask of promiscuous config bits 259 * 260 */ 261 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 262 { 263 int status; 264 265 if (vsi->type != ICE_VSI_PF) 266 return 0; 267 268 if (ice_vsi_has_non_zero_vlans(vsi)) 269 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); 270 else 271 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); 272 return status; 273 } 274 275 /** 276 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 277 * @vsi: ptr to the VSI 278 * 279 * Push any outstanding VSI filter changes through the AdminQ. 280 */ 281 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 282 { 283 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 284 struct device *dev = ice_pf_to_dev(vsi->back); 285 struct net_device *netdev = vsi->netdev; 286 bool promisc_forced_on = false; 287 struct ice_pf *pf = vsi->back; 288 struct ice_hw *hw = &pf->hw; 289 u32 changed_flags = 0; 290 u8 promisc_m; 291 int err; 292 293 if (!vsi->netdev) 294 return -EINVAL; 295 296 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 297 usleep_range(1000, 2000); 298 299 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 300 vsi->current_netdev_flags = vsi->netdev->flags; 301 302 INIT_LIST_HEAD(&vsi->tmp_sync_list); 303 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 304 305 if (ice_vsi_fltr_changed(vsi)) { 306 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 307 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 308 clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 309 310 /* grab the netdev's addr_list_lock */ 311 netif_addr_lock_bh(netdev); 312 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 313 ice_add_mac_to_unsync_list); 314 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 315 ice_add_mac_to_unsync_list); 316 /* our temp lists are populated. release lock */ 317 netif_addr_unlock_bh(netdev); 318 } 319 320 /* Remove MAC addresses in the unsync list */ 321 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 322 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 323 if (err) { 324 netdev_err(netdev, "Failed to delete MAC filters\n"); 325 /* if we failed because of alloc failures, just bail */ 326 if (err == -ENOMEM) 327 goto out; 328 } 329 330 /* Add MAC addresses in the sync list */ 331 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 332 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 333 /* If filter is added successfully or already exists, do not go into 334 * 'if' condition and report it as error. Instead continue processing 335 * rest of the function. 336 */ 337 if (err && err != -EEXIST) { 338 netdev_err(netdev, "Failed to add MAC filters\n"); 339 /* If there is no more space for new umac filters, VSI 340 * should go into promiscuous mode. There should be some 341 * space reserved for promiscuous filters. 342 */ 343 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 344 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 345 vsi->state)) { 346 promisc_forced_on = true; 347 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 348 vsi->vsi_num); 349 } else { 350 goto out; 351 } 352 } 353 err = 0; 354 /* check for changes in promiscuous modes */ 355 if (changed_flags & IFF_ALLMULTI) { 356 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 357 if (ice_vsi_has_non_zero_vlans(vsi)) 358 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 359 else 360 promisc_m = ICE_MCAST_PROMISC_BITS; 361 362 err = ice_set_promisc(vsi, promisc_m); 363 if (err) { 364 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", 365 vsi->vsi_num); 366 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 367 goto out_promisc; 368 } 369 } else { 370 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 371 if (ice_vsi_has_non_zero_vlans(vsi)) 372 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 373 else 374 promisc_m = ICE_MCAST_PROMISC_BITS; 375 376 err = ice_clear_promisc(vsi, promisc_m); 377 if (err) { 378 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", 379 vsi->vsi_num); 380 vsi->current_netdev_flags |= IFF_ALLMULTI; 381 goto out_promisc; 382 } 383 } 384 } 385 386 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 387 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 388 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 389 if (vsi->current_netdev_flags & IFF_PROMISC) { 390 /* Apply Rx filter rule to get traffic from wire */ 391 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { 392 err = ice_set_dflt_vsi(pf->first_sw, vsi); 393 if (err && err != -EEXIST) { 394 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 395 err, vsi->vsi_num); 396 vsi->current_netdev_flags &= 397 ~IFF_PROMISC; 398 goto out_promisc; 399 } 400 err = 0; 401 vlan_ops->dis_rx_filtering(vsi); 402 } 403 } else { 404 /* Clear Rx filter to remove traffic from wire */ 405 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { 406 err = ice_clear_dflt_vsi(pf->first_sw); 407 if (err) { 408 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 409 err, vsi->vsi_num); 410 vsi->current_netdev_flags |= 411 IFF_PROMISC; 412 goto out_promisc; 413 } 414 if (vsi->current_netdev_flags & 415 NETIF_F_HW_VLAN_CTAG_FILTER) 416 vlan_ops->ena_rx_filtering(vsi); 417 } 418 } 419 } 420 goto exit; 421 422 out_promisc: 423 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 424 goto exit; 425 out: 426 /* if something went wrong then set the changed flag so we try again */ 427 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 428 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 429 exit: 430 clear_bit(ICE_CFG_BUSY, vsi->state); 431 return err; 432 } 433 434 /** 435 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 436 * @pf: board private structure 437 */ 438 static void ice_sync_fltr_subtask(struct ice_pf *pf) 439 { 440 int v; 441 442 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 443 return; 444 445 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 446 447 ice_for_each_vsi(pf, v) 448 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 449 ice_vsi_sync_fltr(pf->vsi[v])) { 450 /* come back and try again later */ 451 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 452 break; 453 } 454 } 455 456 /** 457 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 458 * @pf: the PF 459 * @locked: is the rtnl_lock already held 460 */ 461 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 462 { 463 int node; 464 int v; 465 466 ice_for_each_vsi(pf, v) 467 if (pf->vsi[v]) 468 ice_dis_vsi(pf->vsi[v], locked); 469 470 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 471 pf->pf_agg_node[node].num_vsis = 0; 472 473 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 474 pf->vf_agg_node[node].num_vsis = 0; 475 } 476 477 /** 478 * ice_clear_sw_switch_recipes - clear switch recipes 479 * @pf: board private structure 480 * 481 * Mark switch recipes as not created in sw structures. There are cases where 482 * rules (especially advanced rules) need to be restored, either re-read from 483 * hardware or added again. For example after the reset. 'recp_created' flag 484 * prevents from doing that and need to be cleared upfront. 485 */ 486 static void ice_clear_sw_switch_recipes(struct ice_pf *pf) 487 { 488 struct ice_sw_recipe *recp; 489 u8 i; 490 491 recp = pf->hw.switch_info->recp_list; 492 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 493 recp[i].recp_created = false; 494 } 495 496 /** 497 * ice_prepare_for_reset - prep for reset 498 * @pf: board private structure 499 * @reset_type: reset type requested 500 * 501 * Inform or close all dependent features in prep for reset. 502 */ 503 static void 504 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 505 { 506 struct ice_hw *hw = &pf->hw; 507 struct ice_vsi *vsi; 508 struct ice_vf *vf; 509 unsigned int bkt; 510 511 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 512 513 /* already prepared for reset */ 514 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 515 return; 516 517 ice_unplug_aux_dev(pf); 518 519 /* Notify VFs of impending reset */ 520 if (ice_check_sq_alive(hw, &hw->mailboxq)) 521 ice_vc_notify_reset(pf); 522 523 /* Disable VFs until reset is completed */ 524 mutex_lock(&pf->vfs.table_lock); 525 ice_for_each_vf(pf, bkt, vf) 526 ice_set_vf_state_qs_dis(vf); 527 mutex_unlock(&pf->vfs.table_lock); 528 529 if (ice_is_eswitch_mode_switchdev(pf)) { 530 if (reset_type != ICE_RESET_PFR) 531 ice_clear_sw_switch_recipes(pf); 532 } 533 534 /* release ADQ specific HW and SW resources */ 535 vsi = ice_get_main_vsi(pf); 536 if (!vsi) 537 goto skip; 538 539 /* to be on safe side, reset orig_rss_size so that normal flow 540 * of deciding rss_size can take precedence 541 */ 542 vsi->orig_rss_size = 0; 543 544 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 545 if (reset_type == ICE_RESET_PFR) { 546 vsi->old_ena_tc = vsi->all_enatc; 547 vsi->old_numtc = vsi->all_numtc; 548 } else { 549 ice_remove_q_channels(vsi, true); 550 551 /* for other reset type, do not support channel rebuild 552 * hence reset needed info 553 */ 554 vsi->old_ena_tc = 0; 555 vsi->all_enatc = 0; 556 vsi->old_numtc = 0; 557 vsi->all_numtc = 0; 558 vsi->req_txq = 0; 559 vsi->req_rxq = 0; 560 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 561 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 562 } 563 } 564 skip: 565 566 /* clear SW filtering DB */ 567 ice_clear_hw_tbls(hw); 568 /* disable the VSIs and their queues that are not already DOWN */ 569 ice_pf_dis_all_vsi(pf, false); 570 571 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 572 ice_ptp_prepare_for_reset(pf); 573 574 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 575 ice_gnss_exit(pf); 576 577 if (hw->port_info) 578 ice_sched_clear_port(hw->port_info); 579 580 ice_shutdown_all_ctrlq(hw); 581 582 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 583 } 584 585 /** 586 * ice_do_reset - Initiate one of many types of resets 587 * @pf: board private structure 588 * @reset_type: reset type requested before this function was called. 589 */ 590 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 591 { 592 struct device *dev = ice_pf_to_dev(pf); 593 struct ice_hw *hw = &pf->hw; 594 595 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 596 597 ice_prepare_for_reset(pf, reset_type); 598 599 /* trigger the reset */ 600 if (ice_reset(hw, reset_type)) { 601 dev_err(dev, "reset %d failed\n", reset_type); 602 set_bit(ICE_RESET_FAILED, pf->state); 603 clear_bit(ICE_RESET_OICR_RECV, pf->state); 604 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 605 clear_bit(ICE_PFR_REQ, pf->state); 606 clear_bit(ICE_CORER_REQ, pf->state); 607 clear_bit(ICE_GLOBR_REQ, pf->state); 608 wake_up(&pf->reset_wait_queue); 609 return; 610 } 611 612 /* PFR is a bit of a special case because it doesn't result in an OICR 613 * interrupt. So for PFR, rebuild after the reset and clear the reset- 614 * associated state bits. 615 */ 616 if (reset_type == ICE_RESET_PFR) { 617 pf->pfr_count++; 618 ice_rebuild(pf, reset_type); 619 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 620 clear_bit(ICE_PFR_REQ, pf->state); 621 wake_up(&pf->reset_wait_queue); 622 ice_reset_all_vfs(pf, true); 623 } 624 } 625 626 /** 627 * ice_reset_subtask - Set up for resetting the device and driver 628 * @pf: board private structure 629 */ 630 static void ice_reset_subtask(struct ice_pf *pf) 631 { 632 enum ice_reset_req reset_type = ICE_RESET_INVAL; 633 634 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 635 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 636 * of reset is pending and sets bits in pf->state indicating the reset 637 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 638 * prepare for pending reset if not already (for PF software-initiated 639 * global resets the software should already be prepared for it as 640 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 641 * by firmware or software on other PFs, that bit is not set so prepare 642 * for the reset now), poll for reset done, rebuild and return. 643 */ 644 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 645 /* Perform the largest reset requested */ 646 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 647 reset_type = ICE_RESET_CORER; 648 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 649 reset_type = ICE_RESET_GLOBR; 650 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 651 reset_type = ICE_RESET_EMPR; 652 /* return if no valid reset type requested */ 653 if (reset_type == ICE_RESET_INVAL) 654 return; 655 ice_prepare_for_reset(pf, reset_type); 656 657 /* make sure we are ready to rebuild */ 658 if (ice_check_reset(&pf->hw)) { 659 set_bit(ICE_RESET_FAILED, pf->state); 660 } else { 661 /* done with reset. start rebuild */ 662 pf->hw.reset_ongoing = false; 663 ice_rebuild(pf, reset_type); 664 /* clear bit to resume normal operations, but 665 * ICE_NEEDS_RESTART bit is set in case rebuild failed 666 */ 667 clear_bit(ICE_RESET_OICR_RECV, pf->state); 668 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 669 clear_bit(ICE_PFR_REQ, pf->state); 670 clear_bit(ICE_CORER_REQ, pf->state); 671 clear_bit(ICE_GLOBR_REQ, pf->state); 672 wake_up(&pf->reset_wait_queue); 673 ice_reset_all_vfs(pf, true); 674 } 675 676 return; 677 } 678 679 /* No pending resets to finish processing. Check for new resets */ 680 if (test_bit(ICE_PFR_REQ, pf->state)) 681 reset_type = ICE_RESET_PFR; 682 if (test_bit(ICE_CORER_REQ, pf->state)) 683 reset_type = ICE_RESET_CORER; 684 if (test_bit(ICE_GLOBR_REQ, pf->state)) 685 reset_type = ICE_RESET_GLOBR; 686 /* If no valid reset type requested just return */ 687 if (reset_type == ICE_RESET_INVAL) 688 return; 689 690 /* reset if not already down or busy */ 691 if (!test_bit(ICE_DOWN, pf->state) && 692 !test_bit(ICE_CFG_BUSY, pf->state)) { 693 ice_do_reset(pf, reset_type); 694 } 695 } 696 697 /** 698 * ice_print_topo_conflict - print topology conflict message 699 * @vsi: the VSI whose topology status is being checked 700 */ 701 static void ice_print_topo_conflict(struct ice_vsi *vsi) 702 { 703 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 704 case ICE_AQ_LINK_TOPO_CONFLICT: 705 case ICE_AQ_LINK_MEDIA_CONFLICT: 706 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 707 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 708 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 709 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 710 break; 711 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 712 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 713 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 714 else 715 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 716 break; 717 default: 718 break; 719 } 720 } 721 722 /** 723 * ice_print_link_msg - print link up or down message 724 * @vsi: the VSI whose link status is being queried 725 * @isup: boolean for if the link is now up or down 726 */ 727 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 728 { 729 struct ice_aqc_get_phy_caps_data *caps; 730 const char *an_advertised; 731 const char *fec_req; 732 const char *speed; 733 const char *fec; 734 const char *fc; 735 const char *an; 736 int status; 737 738 if (!vsi) 739 return; 740 741 if (vsi->current_isup == isup) 742 return; 743 744 vsi->current_isup = isup; 745 746 if (!isup) { 747 netdev_info(vsi->netdev, "NIC Link is Down\n"); 748 return; 749 } 750 751 switch (vsi->port_info->phy.link_info.link_speed) { 752 case ICE_AQ_LINK_SPEED_100GB: 753 speed = "100 G"; 754 break; 755 case ICE_AQ_LINK_SPEED_50GB: 756 speed = "50 G"; 757 break; 758 case ICE_AQ_LINK_SPEED_40GB: 759 speed = "40 G"; 760 break; 761 case ICE_AQ_LINK_SPEED_25GB: 762 speed = "25 G"; 763 break; 764 case ICE_AQ_LINK_SPEED_20GB: 765 speed = "20 G"; 766 break; 767 case ICE_AQ_LINK_SPEED_10GB: 768 speed = "10 G"; 769 break; 770 case ICE_AQ_LINK_SPEED_5GB: 771 speed = "5 G"; 772 break; 773 case ICE_AQ_LINK_SPEED_2500MB: 774 speed = "2.5 G"; 775 break; 776 case ICE_AQ_LINK_SPEED_1000MB: 777 speed = "1 G"; 778 break; 779 case ICE_AQ_LINK_SPEED_100MB: 780 speed = "100 M"; 781 break; 782 default: 783 speed = "Unknown "; 784 break; 785 } 786 787 switch (vsi->port_info->fc.current_mode) { 788 case ICE_FC_FULL: 789 fc = "Rx/Tx"; 790 break; 791 case ICE_FC_TX_PAUSE: 792 fc = "Tx"; 793 break; 794 case ICE_FC_RX_PAUSE: 795 fc = "Rx"; 796 break; 797 case ICE_FC_NONE: 798 fc = "None"; 799 break; 800 default: 801 fc = "Unknown"; 802 break; 803 } 804 805 /* Get FEC mode based on negotiated link info */ 806 switch (vsi->port_info->phy.link_info.fec_info) { 807 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 808 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 809 fec = "RS-FEC"; 810 break; 811 case ICE_AQ_LINK_25G_KR_FEC_EN: 812 fec = "FC-FEC/BASE-R"; 813 break; 814 default: 815 fec = "NONE"; 816 break; 817 } 818 819 /* check if autoneg completed, might be false due to not supported */ 820 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 821 an = "True"; 822 else 823 an = "False"; 824 825 /* Get FEC mode requested based on PHY caps last SW configuration */ 826 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 827 if (!caps) { 828 fec_req = "Unknown"; 829 an_advertised = "Unknown"; 830 goto done; 831 } 832 833 status = ice_aq_get_phy_caps(vsi->port_info, false, 834 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 835 if (status) 836 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 837 838 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 839 840 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 841 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 842 fec_req = "RS-FEC"; 843 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 844 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 845 fec_req = "FC-FEC/BASE-R"; 846 else 847 fec_req = "NONE"; 848 849 kfree(caps); 850 851 done: 852 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 853 speed, fec_req, fec, an_advertised, an, fc); 854 ice_print_topo_conflict(vsi); 855 } 856 857 /** 858 * ice_vsi_link_event - update the VSI's netdev 859 * @vsi: the VSI on which the link event occurred 860 * @link_up: whether or not the VSI needs to be set up or down 861 */ 862 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 863 { 864 if (!vsi) 865 return; 866 867 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 868 return; 869 870 if (vsi->type == ICE_VSI_PF) { 871 if (link_up == netif_carrier_ok(vsi->netdev)) 872 return; 873 874 if (link_up) { 875 netif_carrier_on(vsi->netdev); 876 netif_tx_wake_all_queues(vsi->netdev); 877 } else { 878 netif_carrier_off(vsi->netdev); 879 netif_tx_stop_all_queues(vsi->netdev); 880 } 881 } 882 } 883 884 /** 885 * ice_set_dflt_mib - send a default config MIB to the FW 886 * @pf: private PF struct 887 * 888 * This function sends a default configuration MIB to the FW. 889 * 890 * If this function errors out at any point, the driver is still able to 891 * function. The main impact is that LFC may not operate as expected. 892 * Therefore an error state in this function should be treated with a DBG 893 * message and continue on with driver rebuild/reenable. 894 */ 895 static void ice_set_dflt_mib(struct ice_pf *pf) 896 { 897 struct device *dev = ice_pf_to_dev(pf); 898 u8 mib_type, *buf, *lldpmib = NULL; 899 u16 len, typelen, offset = 0; 900 struct ice_lldp_org_tlv *tlv; 901 struct ice_hw *hw = &pf->hw; 902 u32 ouisubtype; 903 904 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 905 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 906 if (!lldpmib) { 907 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 908 __func__); 909 return; 910 } 911 912 /* Add ETS CFG TLV */ 913 tlv = (struct ice_lldp_org_tlv *)lldpmib; 914 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 915 ICE_IEEE_ETS_TLV_LEN); 916 tlv->typelen = htons(typelen); 917 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 918 ICE_IEEE_SUBTYPE_ETS_CFG); 919 tlv->ouisubtype = htonl(ouisubtype); 920 921 buf = tlv->tlvinfo; 922 buf[0] = 0; 923 924 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 925 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 926 * Octets 13 - 20 are TSA values - leave as zeros 927 */ 928 buf[5] = 0x64; 929 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 930 offset += len + 2; 931 tlv = (struct ice_lldp_org_tlv *) 932 ((char *)tlv + sizeof(tlv->typelen) + len); 933 934 /* Add ETS REC TLV */ 935 buf = tlv->tlvinfo; 936 tlv->typelen = htons(typelen); 937 938 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 939 ICE_IEEE_SUBTYPE_ETS_REC); 940 tlv->ouisubtype = htonl(ouisubtype); 941 942 /* First octet of buf is reserved 943 * Octets 1 - 4 map UP to TC - all UPs map to zero 944 * Octets 5 - 12 are BW values - set TC 0 to 100%. 945 * Octets 13 - 20 are TSA value - leave as zeros 946 */ 947 buf[5] = 0x64; 948 offset += len + 2; 949 tlv = (struct ice_lldp_org_tlv *) 950 ((char *)tlv + sizeof(tlv->typelen) + len); 951 952 /* Add PFC CFG TLV */ 953 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 954 ICE_IEEE_PFC_TLV_LEN); 955 tlv->typelen = htons(typelen); 956 957 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 958 ICE_IEEE_SUBTYPE_PFC_CFG); 959 tlv->ouisubtype = htonl(ouisubtype); 960 961 /* Octet 1 left as all zeros - PFC disabled */ 962 buf[0] = 0x08; 963 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 964 offset += len + 2; 965 966 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 967 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 968 969 kfree(lldpmib); 970 } 971 972 /** 973 * ice_check_phy_fw_load - check if PHY FW load failed 974 * @pf: pointer to PF struct 975 * @link_cfg_err: bitmap from the link info structure 976 * 977 * check if external PHY FW load failed and print an error message if it did 978 */ 979 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 980 { 981 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 982 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 983 return; 984 } 985 986 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 987 return; 988 989 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 990 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 991 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 992 } 993 } 994 995 /** 996 * ice_check_module_power 997 * @pf: pointer to PF struct 998 * @link_cfg_err: bitmap from the link info structure 999 * 1000 * check module power level returned by a previous call to aq_get_link_info 1001 * and print error messages if module power level is not supported 1002 */ 1003 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 1004 { 1005 /* if module power level is supported, clear the flag */ 1006 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 1007 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 1008 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1009 return; 1010 } 1011 1012 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1013 * above block didn't clear this bit, there's nothing to do 1014 */ 1015 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1016 return; 1017 1018 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1019 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1020 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1021 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1022 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1023 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1024 } 1025 } 1026 1027 /** 1028 * ice_check_link_cfg_err - check if link configuration failed 1029 * @pf: pointer to the PF struct 1030 * @link_cfg_err: bitmap from the link info structure 1031 * 1032 * print if any link configuration failure happens due to the value in the 1033 * link_cfg_err parameter in the link info structure 1034 */ 1035 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 1036 { 1037 ice_check_module_power(pf, link_cfg_err); 1038 ice_check_phy_fw_load(pf, link_cfg_err); 1039 } 1040 1041 /** 1042 * ice_link_event - process the link event 1043 * @pf: PF that the link event is associated with 1044 * @pi: port_info for the port that the link event is associated with 1045 * @link_up: true if the physical link is up and false if it is down 1046 * @link_speed: current link speed received from the link event 1047 * 1048 * Returns 0 on success and negative on failure 1049 */ 1050 static int 1051 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1052 u16 link_speed) 1053 { 1054 struct device *dev = ice_pf_to_dev(pf); 1055 struct ice_phy_info *phy_info; 1056 struct ice_vsi *vsi; 1057 u16 old_link_speed; 1058 bool old_link; 1059 int status; 1060 1061 phy_info = &pi->phy; 1062 phy_info->link_info_old = phy_info->link_info; 1063 1064 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 1065 old_link_speed = phy_info->link_info_old.link_speed; 1066 1067 /* update the link info structures and re-enable link events, 1068 * don't bail on failure due to other book keeping needed 1069 */ 1070 status = ice_update_link_info(pi); 1071 if (status) 1072 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 1073 pi->lport, status, 1074 ice_aq_str(pi->hw->adminq.sq_last_status)); 1075 1076 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1077 1078 /* Check if the link state is up after updating link info, and treat 1079 * this event as an UP event since the link is actually UP now. 1080 */ 1081 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 1082 link_up = true; 1083 1084 vsi = ice_get_main_vsi(pf); 1085 if (!vsi || !vsi->port_info) 1086 return -EINVAL; 1087 1088 /* turn off PHY if media was removed */ 1089 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 1090 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1091 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1092 ice_set_link(vsi, false); 1093 } 1094 1095 /* if the old link up/down and speed is the same as the new */ 1096 if (link_up == old_link && link_speed == old_link_speed) 1097 return 0; 1098 1099 if (!ice_is_e810(&pf->hw)) 1100 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); 1101 1102 if (ice_is_dcb_active(pf)) { 1103 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1104 ice_dcb_rebuild(pf); 1105 } else { 1106 if (link_up) 1107 ice_set_dflt_mib(pf); 1108 } 1109 ice_vsi_link_event(vsi, link_up); 1110 ice_print_link_msg(vsi, link_up); 1111 1112 ice_vc_notify_link_state(pf); 1113 1114 return 0; 1115 } 1116 1117 /** 1118 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 1119 * @pf: board private structure 1120 */ 1121 static void ice_watchdog_subtask(struct ice_pf *pf) 1122 { 1123 int i; 1124 1125 /* if interface is down do nothing */ 1126 if (test_bit(ICE_DOWN, pf->state) || 1127 test_bit(ICE_CFG_BUSY, pf->state)) 1128 return; 1129 1130 /* make sure we don't do these things too often */ 1131 if (time_before(jiffies, 1132 pf->serv_tmr_prev + pf->serv_tmr_period)) 1133 return; 1134 1135 pf->serv_tmr_prev = jiffies; 1136 1137 /* Update the stats for active netdevs so the network stack 1138 * can look at updated numbers whenever it cares to 1139 */ 1140 ice_update_pf_stats(pf); 1141 ice_for_each_vsi(pf, i) 1142 if (pf->vsi[i] && pf->vsi[i]->netdev) 1143 ice_update_vsi_stats(pf->vsi[i]); 1144 } 1145 1146 /** 1147 * ice_init_link_events - enable/initialize link events 1148 * @pi: pointer to the port_info instance 1149 * 1150 * Returns -EIO on failure, 0 on success 1151 */ 1152 static int ice_init_link_events(struct ice_port_info *pi) 1153 { 1154 u16 mask; 1155 1156 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1157 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 1158 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1159 1160 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1161 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1162 pi->lport); 1163 return -EIO; 1164 } 1165 1166 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1167 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1168 pi->lport); 1169 return -EIO; 1170 } 1171 1172 return 0; 1173 } 1174 1175 /** 1176 * ice_handle_link_event - handle link event via ARQ 1177 * @pf: PF that the link event is associated with 1178 * @event: event structure containing link status info 1179 */ 1180 static int 1181 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1182 { 1183 struct ice_aqc_get_link_status_data *link_data; 1184 struct ice_port_info *port_info; 1185 int status; 1186 1187 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1188 port_info = pf->hw.port_info; 1189 if (!port_info) 1190 return -EINVAL; 1191 1192 status = ice_link_event(pf, port_info, 1193 !!(link_data->link_info & ICE_AQ_LINK_UP), 1194 le16_to_cpu(link_data->link_speed)); 1195 if (status) 1196 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1197 status); 1198 1199 return status; 1200 } 1201 1202 enum ice_aq_task_state { 1203 ICE_AQ_TASK_WAITING = 0, 1204 ICE_AQ_TASK_COMPLETE, 1205 ICE_AQ_TASK_CANCELED, 1206 }; 1207 1208 struct ice_aq_task { 1209 struct hlist_node entry; 1210 1211 u16 opcode; 1212 struct ice_rq_event_info *event; 1213 enum ice_aq_task_state state; 1214 }; 1215 1216 /** 1217 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1218 * @pf: pointer to the PF private structure 1219 * @opcode: the opcode to wait for 1220 * @timeout: how long to wait, in jiffies 1221 * @event: storage for the event info 1222 * 1223 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1224 * current thread will be put to sleep until the specified event occurs or 1225 * until the given timeout is reached. 1226 * 1227 * To obtain only the descriptor contents, pass an event without an allocated 1228 * msg_buf. If the complete data buffer is desired, allocate the 1229 * event->msg_buf with enough space ahead of time. 1230 * 1231 * Returns: zero on success, or a negative error code on failure. 1232 */ 1233 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1234 struct ice_rq_event_info *event) 1235 { 1236 struct device *dev = ice_pf_to_dev(pf); 1237 struct ice_aq_task *task; 1238 unsigned long start; 1239 long ret; 1240 int err; 1241 1242 task = kzalloc(sizeof(*task), GFP_KERNEL); 1243 if (!task) 1244 return -ENOMEM; 1245 1246 INIT_HLIST_NODE(&task->entry); 1247 task->opcode = opcode; 1248 task->event = event; 1249 task->state = ICE_AQ_TASK_WAITING; 1250 1251 spin_lock_bh(&pf->aq_wait_lock); 1252 hlist_add_head(&task->entry, &pf->aq_wait_list); 1253 spin_unlock_bh(&pf->aq_wait_lock); 1254 1255 start = jiffies; 1256 1257 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1258 timeout); 1259 switch (task->state) { 1260 case ICE_AQ_TASK_WAITING: 1261 err = ret < 0 ? ret : -ETIMEDOUT; 1262 break; 1263 case ICE_AQ_TASK_CANCELED: 1264 err = ret < 0 ? ret : -ECANCELED; 1265 break; 1266 case ICE_AQ_TASK_COMPLETE: 1267 err = ret < 0 ? ret : 0; 1268 break; 1269 default: 1270 WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1271 err = -EINVAL; 1272 break; 1273 } 1274 1275 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1276 jiffies_to_msecs(jiffies - start), 1277 jiffies_to_msecs(timeout), 1278 opcode); 1279 1280 spin_lock_bh(&pf->aq_wait_lock); 1281 hlist_del(&task->entry); 1282 spin_unlock_bh(&pf->aq_wait_lock); 1283 kfree(task); 1284 1285 return err; 1286 } 1287 1288 /** 1289 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1290 * @pf: pointer to the PF private structure 1291 * @opcode: the opcode of the event 1292 * @event: the event to check 1293 * 1294 * Loops over the current list of pending threads waiting for an AdminQ event. 1295 * For each matching task, copy the contents of the event into the task 1296 * structure and wake up the thread. 1297 * 1298 * If multiple threads wait for the same opcode, they will all be woken up. 1299 * 1300 * Note that event->msg_buf will only be duplicated if the event has a buffer 1301 * with enough space already allocated. Otherwise, only the descriptor and 1302 * message length will be copied. 1303 * 1304 * Returns: true if an event was found, false otherwise 1305 */ 1306 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1307 struct ice_rq_event_info *event) 1308 { 1309 struct ice_aq_task *task; 1310 bool found = false; 1311 1312 spin_lock_bh(&pf->aq_wait_lock); 1313 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1314 if (task->state || task->opcode != opcode) 1315 continue; 1316 1317 memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1318 task->event->msg_len = event->msg_len; 1319 1320 /* Only copy the data buffer if a destination was set */ 1321 if (task->event->msg_buf && 1322 task->event->buf_len > event->buf_len) { 1323 memcpy(task->event->msg_buf, event->msg_buf, 1324 event->buf_len); 1325 task->event->buf_len = event->buf_len; 1326 } 1327 1328 task->state = ICE_AQ_TASK_COMPLETE; 1329 found = true; 1330 } 1331 spin_unlock_bh(&pf->aq_wait_lock); 1332 1333 if (found) 1334 wake_up(&pf->aq_wait_queue); 1335 } 1336 1337 /** 1338 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1339 * @pf: the PF private structure 1340 * 1341 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1342 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1343 */ 1344 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1345 { 1346 struct ice_aq_task *task; 1347 1348 spin_lock_bh(&pf->aq_wait_lock); 1349 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1350 task->state = ICE_AQ_TASK_CANCELED; 1351 spin_unlock_bh(&pf->aq_wait_lock); 1352 1353 wake_up(&pf->aq_wait_queue); 1354 } 1355 1356 /** 1357 * __ice_clean_ctrlq - helper function to clean controlq rings 1358 * @pf: ptr to struct ice_pf 1359 * @q_type: specific Control queue type 1360 */ 1361 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1362 { 1363 struct device *dev = ice_pf_to_dev(pf); 1364 struct ice_rq_event_info event; 1365 struct ice_hw *hw = &pf->hw; 1366 struct ice_ctl_q_info *cq; 1367 u16 pending, i = 0; 1368 const char *qtype; 1369 u32 oldval, val; 1370 1371 /* Do not clean control queue if/when PF reset fails */ 1372 if (test_bit(ICE_RESET_FAILED, pf->state)) 1373 return 0; 1374 1375 switch (q_type) { 1376 case ICE_CTL_Q_ADMIN: 1377 cq = &hw->adminq; 1378 qtype = "Admin"; 1379 break; 1380 case ICE_CTL_Q_SB: 1381 cq = &hw->sbq; 1382 qtype = "Sideband"; 1383 break; 1384 case ICE_CTL_Q_MAILBOX: 1385 cq = &hw->mailboxq; 1386 qtype = "Mailbox"; 1387 /* we are going to try to detect a malicious VF, so set the 1388 * state to begin detection 1389 */ 1390 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1391 break; 1392 default: 1393 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1394 return 0; 1395 } 1396 1397 /* check for error indications - PF_xx_AxQLEN register layout for 1398 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1399 */ 1400 val = rd32(hw, cq->rq.len); 1401 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1402 PF_FW_ARQLEN_ARQCRIT_M)) { 1403 oldval = val; 1404 if (val & PF_FW_ARQLEN_ARQVFE_M) 1405 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1406 qtype); 1407 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1408 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1409 qtype); 1410 } 1411 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1412 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1413 qtype); 1414 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1415 PF_FW_ARQLEN_ARQCRIT_M); 1416 if (oldval != val) 1417 wr32(hw, cq->rq.len, val); 1418 } 1419 1420 val = rd32(hw, cq->sq.len); 1421 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1422 PF_FW_ATQLEN_ATQCRIT_M)) { 1423 oldval = val; 1424 if (val & PF_FW_ATQLEN_ATQVFE_M) 1425 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1426 qtype); 1427 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1428 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1429 qtype); 1430 } 1431 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1432 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1433 qtype); 1434 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1435 PF_FW_ATQLEN_ATQCRIT_M); 1436 if (oldval != val) 1437 wr32(hw, cq->sq.len, val); 1438 } 1439 1440 event.buf_len = cq->rq_buf_size; 1441 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1442 if (!event.msg_buf) 1443 return 0; 1444 1445 do { 1446 u16 opcode; 1447 int ret; 1448 1449 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1450 if (ret == -EALREADY) 1451 break; 1452 if (ret) { 1453 dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1454 ret); 1455 break; 1456 } 1457 1458 opcode = le16_to_cpu(event.desc.opcode); 1459 1460 /* Notify any thread that might be waiting for this event */ 1461 ice_aq_check_events(pf, opcode, &event); 1462 1463 switch (opcode) { 1464 case ice_aqc_opc_get_link_status: 1465 if (ice_handle_link_event(pf, &event)) 1466 dev_err(dev, "Could not handle link event\n"); 1467 break; 1468 case ice_aqc_opc_event_lan_overflow: 1469 ice_vf_lan_overflow_event(pf, &event); 1470 break; 1471 case ice_mbx_opc_send_msg_to_pf: 1472 if (!ice_is_malicious_vf(pf, &event, i, pending)) 1473 ice_vc_process_vf_msg(pf, &event); 1474 break; 1475 case ice_aqc_opc_fw_logging: 1476 ice_output_fw_log(hw, &event.desc, event.msg_buf); 1477 break; 1478 case ice_aqc_opc_lldp_set_mib_change: 1479 ice_dcb_process_lldp_set_mib_change(pf, &event); 1480 break; 1481 default: 1482 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1483 qtype, opcode); 1484 break; 1485 } 1486 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1487 1488 kfree(event.msg_buf); 1489 1490 return pending && (i == ICE_DFLT_IRQ_WORK); 1491 } 1492 1493 /** 1494 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1495 * @hw: pointer to hardware info 1496 * @cq: control queue information 1497 * 1498 * returns true if there are pending messages in a queue, false if there aren't 1499 */ 1500 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1501 { 1502 u16 ntu; 1503 1504 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1505 return cq->rq.next_to_clean != ntu; 1506 } 1507 1508 /** 1509 * ice_clean_adminq_subtask - clean the AdminQ rings 1510 * @pf: board private structure 1511 */ 1512 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1513 { 1514 struct ice_hw *hw = &pf->hw; 1515 1516 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1517 return; 1518 1519 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1520 return; 1521 1522 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1523 1524 /* There might be a situation where new messages arrive to a control 1525 * queue between processing the last message and clearing the 1526 * EVENT_PENDING bit. So before exiting, check queue head again (using 1527 * ice_ctrlq_pending) and process new messages if any. 1528 */ 1529 if (ice_ctrlq_pending(hw, &hw->adminq)) 1530 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1531 1532 ice_flush(hw); 1533 } 1534 1535 /** 1536 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1537 * @pf: board private structure 1538 */ 1539 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1540 { 1541 struct ice_hw *hw = &pf->hw; 1542 1543 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1544 return; 1545 1546 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1547 return; 1548 1549 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1550 1551 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1552 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1553 1554 ice_flush(hw); 1555 } 1556 1557 /** 1558 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1559 * @pf: board private structure 1560 */ 1561 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1562 { 1563 struct ice_hw *hw = &pf->hw; 1564 1565 /* Nothing to do here if sideband queue is not supported */ 1566 if (!ice_is_sbq_supported(hw)) { 1567 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1568 return; 1569 } 1570 1571 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1572 return; 1573 1574 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1575 return; 1576 1577 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1578 1579 if (ice_ctrlq_pending(hw, &hw->sbq)) 1580 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1581 1582 ice_flush(hw); 1583 } 1584 1585 /** 1586 * ice_service_task_schedule - schedule the service task to wake up 1587 * @pf: board private structure 1588 * 1589 * If not already scheduled, this puts the task into the work queue. 1590 */ 1591 void ice_service_task_schedule(struct ice_pf *pf) 1592 { 1593 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1594 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1595 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1596 queue_work(ice_wq, &pf->serv_task); 1597 } 1598 1599 /** 1600 * ice_service_task_complete - finish up the service task 1601 * @pf: board private structure 1602 */ 1603 static void ice_service_task_complete(struct ice_pf *pf) 1604 { 1605 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1606 1607 /* force memory (pf->state) to sync before next service task */ 1608 smp_mb__before_atomic(); 1609 clear_bit(ICE_SERVICE_SCHED, pf->state); 1610 } 1611 1612 /** 1613 * ice_service_task_stop - stop service task and cancel works 1614 * @pf: board private structure 1615 * 1616 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1617 * 1 otherwise. 1618 */ 1619 static int ice_service_task_stop(struct ice_pf *pf) 1620 { 1621 int ret; 1622 1623 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1624 1625 if (pf->serv_tmr.function) 1626 del_timer_sync(&pf->serv_tmr); 1627 if (pf->serv_task.func) 1628 cancel_work_sync(&pf->serv_task); 1629 1630 clear_bit(ICE_SERVICE_SCHED, pf->state); 1631 return ret; 1632 } 1633 1634 /** 1635 * ice_service_task_restart - restart service task and schedule works 1636 * @pf: board private structure 1637 * 1638 * This function is needed for suspend and resume works (e.g WoL scenario) 1639 */ 1640 static void ice_service_task_restart(struct ice_pf *pf) 1641 { 1642 clear_bit(ICE_SERVICE_DIS, pf->state); 1643 ice_service_task_schedule(pf); 1644 } 1645 1646 /** 1647 * ice_service_timer - timer callback to schedule service task 1648 * @t: pointer to timer_list 1649 */ 1650 static void ice_service_timer(struct timer_list *t) 1651 { 1652 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1653 1654 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1655 ice_service_task_schedule(pf); 1656 } 1657 1658 /** 1659 * ice_handle_mdd_event - handle malicious driver detect event 1660 * @pf: pointer to the PF structure 1661 * 1662 * Called from service task. OICR interrupt handler indicates MDD event. 1663 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1664 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1665 * disable the queue, the PF can be configured to reset the VF using ethtool 1666 * private flag mdd-auto-reset-vf. 1667 */ 1668 static void ice_handle_mdd_event(struct ice_pf *pf) 1669 { 1670 struct device *dev = ice_pf_to_dev(pf); 1671 struct ice_hw *hw = &pf->hw; 1672 struct ice_vf *vf; 1673 unsigned int bkt; 1674 u32 reg; 1675 1676 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1677 /* Since the VF MDD event logging is rate limited, check if 1678 * there are pending MDD events. 1679 */ 1680 ice_print_vfs_mdd_events(pf); 1681 return; 1682 } 1683 1684 /* find what triggered an MDD event */ 1685 reg = rd32(hw, GL_MDET_TX_PQM); 1686 if (reg & GL_MDET_TX_PQM_VALID_M) { 1687 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1688 GL_MDET_TX_PQM_PF_NUM_S; 1689 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1690 GL_MDET_TX_PQM_VF_NUM_S; 1691 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1692 GL_MDET_TX_PQM_MAL_TYPE_S; 1693 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1694 GL_MDET_TX_PQM_QNUM_S); 1695 1696 if (netif_msg_tx_err(pf)) 1697 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1698 event, queue, pf_num, vf_num); 1699 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1700 } 1701 1702 reg = rd32(hw, GL_MDET_TX_TCLAN); 1703 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1704 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1705 GL_MDET_TX_TCLAN_PF_NUM_S; 1706 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1707 GL_MDET_TX_TCLAN_VF_NUM_S; 1708 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1709 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1710 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1711 GL_MDET_TX_TCLAN_QNUM_S); 1712 1713 if (netif_msg_tx_err(pf)) 1714 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1715 event, queue, pf_num, vf_num); 1716 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1717 } 1718 1719 reg = rd32(hw, GL_MDET_RX); 1720 if (reg & GL_MDET_RX_VALID_M) { 1721 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1722 GL_MDET_RX_PF_NUM_S; 1723 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1724 GL_MDET_RX_VF_NUM_S; 1725 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1726 GL_MDET_RX_MAL_TYPE_S; 1727 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1728 GL_MDET_RX_QNUM_S); 1729 1730 if (netif_msg_rx_err(pf)) 1731 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1732 event, queue, pf_num, vf_num); 1733 wr32(hw, GL_MDET_RX, 0xffffffff); 1734 } 1735 1736 /* check to see if this PF caused an MDD event */ 1737 reg = rd32(hw, PF_MDET_TX_PQM); 1738 if (reg & PF_MDET_TX_PQM_VALID_M) { 1739 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1740 if (netif_msg_tx_err(pf)) 1741 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1742 } 1743 1744 reg = rd32(hw, PF_MDET_TX_TCLAN); 1745 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1746 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 1747 if (netif_msg_tx_err(pf)) 1748 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1749 } 1750 1751 reg = rd32(hw, PF_MDET_RX); 1752 if (reg & PF_MDET_RX_VALID_M) { 1753 wr32(hw, PF_MDET_RX, 0xFFFF); 1754 if (netif_msg_rx_err(pf)) 1755 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1756 } 1757 1758 /* Check to see if one of the VFs caused an MDD event, and then 1759 * increment counters and set print pending 1760 */ 1761 mutex_lock(&pf->vfs.table_lock); 1762 ice_for_each_vf(pf, bkt, vf) { 1763 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); 1764 if (reg & VP_MDET_TX_PQM_VALID_M) { 1765 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); 1766 vf->mdd_tx_events.count++; 1767 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1768 if (netif_msg_tx_err(pf)) 1769 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1770 vf->vf_id); 1771 } 1772 1773 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); 1774 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1775 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); 1776 vf->mdd_tx_events.count++; 1777 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1778 if (netif_msg_tx_err(pf)) 1779 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1780 vf->vf_id); 1781 } 1782 1783 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); 1784 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1785 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); 1786 vf->mdd_tx_events.count++; 1787 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1788 if (netif_msg_tx_err(pf)) 1789 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1790 vf->vf_id); 1791 } 1792 1793 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); 1794 if (reg & VP_MDET_RX_VALID_M) { 1795 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); 1796 vf->mdd_rx_events.count++; 1797 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1798 if (netif_msg_rx_err(pf)) 1799 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1800 vf->vf_id); 1801 1802 /* Since the queue is disabled on VF Rx MDD events, the 1803 * PF can be configured to reset the VF through ethtool 1804 * private flag mdd-auto-reset-vf. 1805 */ 1806 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1807 /* VF MDD event counters will be cleared by 1808 * reset, so print the event prior to reset. 1809 */ 1810 ice_print_vf_rx_mdd_event(vf); 1811 mutex_lock(&vf->cfg_lock); 1812 ice_reset_vf(vf, false); 1813 mutex_unlock(&vf->cfg_lock); 1814 } 1815 } 1816 } 1817 mutex_unlock(&pf->vfs.table_lock); 1818 1819 ice_print_vfs_mdd_events(pf); 1820 } 1821 1822 /** 1823 * ice_force_phys_link_state - Force the physical link state 1824 * @vsi: VSI to force the physical link state to up/down 1825 * @link_up: true/false indicates to set the physical link to up/down 1826 * 1827 * Force the physical link state by getting the current PHY capabilities from 1828 * hardware and setting the PHY config based on the determined capabilities. If 1829 * link changes a link event will be triggered because both the Enable Automatic 1830 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1831 * 1832 * Returns 0 on success, negative on failure 1833 */ 1834 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1835 { 1836 struct ice_aqc_get_phy_caps_data *pcaps; 1837 struct ice_aqc_set_phy_cfg_data *cfg; 1838 struct ice_port_info *pi; 1839 struct device *dev; 1840 int retcode; 1841 1842 if (!vsi || !vsi->port_info || !vsi->back) 1843 return -EINVAL; 1844 if (vsi->type != ICE_VSI_PF) 1845 return 0; 1846 1847 dev = ice_pf_to_dev(vsi->back); 1848 1849 pi = vsi->port_info; 1850 1851 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1852 if (!pcaps) 1853 return -ENOMEM; 1854 1855 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1856 NULL); 1857 if (retcode) { 1858 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1859 vsi->vsi_num, retcode); 1860 retcode = -EIO; 1861 goto out; 1862 } 1863 1864 /* No change in link */ 1865 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1866 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1867 goto out; 1868 1869 /* Use the current user PHY configuration. The current user PHY 1870 * configuration is initialized during probe from PHY capabilities 1871 * software mode, and updated on set PHY configuration. 1872 */ 1873 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1874 if (!cfg) { 1875 retcode = -ENOMEM; 1876 goto out; 1877 } 1878 1879 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1880 if (link_up) 1881 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1882 else 1883 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1884 1885 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1886 if (retcode) { 1887 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1888 vsi->vsi_num, retcode); 1889 retcode = -EIO; 1890 } 1891 1892 kfree(cfg); 1893 out: 1894 kfree(pcaps); 1895 return retcode; 1896 } 1897 1898 /** 1899 * ice_init_nvm_phy_type - Initialize the NVM PHY type 1900 * @pi: port info structure 1901 * 1902 * Initialize nvm_phy_type_[low|high] for link lenient mode support 1903 */ 1904 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1905 { 1906 struct ice_aqc_get_phy_caps_data *pcaps; 1907 struct ice_pf *pf = pi->hw->back; 1908 int err; 1909 1910 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1911 if (!pcaps) 1912 return -ENOMEM; 1913 1914 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 1915 pcaps, NULL); 1916 1917 if (err) { 1918 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1919 goto out; 1920 } 1921 1922 pf->nvm_phy_type_hi = pcaps->phy_type_high; 1923 pf->nvm_phy_type_lo = pcaps->phy_type_low; 1924 1925 out: 1926 kfree(pcaps); 1927 return err; 1928 } 1929 1930 /** 1931 * ice_init_link_dflt_override - Initialize link default override 1932 * @pi: port info structure 1933 * 1934 * Initialize link default override and PHY total port shutdown during probe 1935 */ 1936 static void ice_init_link_dflt_override(struct ice_port_info *pi) 1937 { 1938 struct ice_link_default_override_tlv *ldo; 1939 struct ice_pf *pf = pi->hw->back; 1940 1941 ldo = &pf->link_dflt_override; 1942 if (ice_get_link_default_override(ldo, pi)) 1943 return; 1944 1945 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1946 return; 1947 1948 /* Enable Total Port Shutdown (override/replace link-down-on-close 1949 * ethtool private flag) for ports with Port Disable bit set. 1950 */ 1951 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1952 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1953 } 1954 1955 /** 1956 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1957 * @pi: port info structure 1958 * 1959 * If default override is enabled, initialize the user PHY cfg speed and FEC 1960 * settings using the default override mask from the NVM. 1961 * 1962 * The PHY should only be configured with the default override settings the 1963 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1964 * is used to indicate that the user PHY cfg default override is initialized 1965 * and the PHY has not been configured with the default override settings. The 1966 * state is set here, and cleared in ice_configure_phy the first time the PHY is 1967 * configured. 1968 * 1969 * This function should be called only if the FW doesn't support default 1970 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 1971 */ 1972 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1973 { 1974 struct ice_link_default_override_tlv *ldo; 1975 struct ice_aqc_set_phy_cfg_data *cfg; 1976 struct ice_phy_info *phy = &pi->phy; 1977 struct ice_pf *pf = pi->hw->back; 1978 1979 ldo = &pf->link_dflt_override; 1980 1981 /* If link default override is enabled, use to mask NVM PHY capabilities 1982 * for speed and FEC default configuration. 1983 */ 1984 cfg = &phy->curr_user_phy_cfg; 1985 1986 if (ldo->phy_type_low || ldo->phy_type_high) { 1987 cfg->phy_type_low = pf->nvm_phy_type_lo & 1988 cpu_to_le64(ldo->phy_type_low); 1989 cfg->phy_type_high = pf->nvm_phy_type_hi & 1990 cpu_to_le64(ldo->phy_type_high); 1991 } 1992 cfg->link_fec_opt = ldo->fec_options; 1993 phy->curr_user_fec_req = ICE_FEC_AUTO; 1994 1995 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 1996 } 1997 1998 /** 1999 * ice_init_phy_user_cfg - Initialize the PHY user configuration 2000 * @pi: port info structure 2001 * 2002 * Initialize the current user PHY configuration, speed, FEC, and FC requested 2003 * mode to default. The PHY defaults are from get PHY capabilities topology 2004 * with media so call when media is first available. An error is returned if 2005 * called when media is not available. The PHY initialization completed state is 2006 * set here. 2007 * 2008 * These configurations are used when setting PHY 2009 * configuration. The user PHY configuration is updated on set PHY 2010 * configuration. Returns 0 on success, negative on failure 2011 */ 2012 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 2013 { 2014 struct ice_aqc_get_phy_caps_data *pcaps; 2015 struct ice_phy_info *phy = &pi->phy; 2016 struct ice_pf *pf = pi->hw->back; 2017 int err; 2018 2019 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2020 return -EIO; 2021 2022 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2023 if (!pcaps) 2024 return -ENOMEM; 2025 2026 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2027 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2028 pcaps, NULL); 2029 else 2030 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2031 pcaps, NULL); 2032 if (err) { 2033 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2034 goto err_out; 2035 } 2036 2037 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2038 2039 /* check if lenient mode is supported and enabled */ 2040 if (ice_fw_supports_link_override(pi->hw) && 2041 !(pcaps->module_compliance_enforcement & 2042 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2043 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2044 2045 /* if the FW supports default PHY configuration mode, then the driver 2046 * does not have to apply link override settings. If not, 2047 * initialize user PHY configuration with link override values 2048 */ 2049 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 2050 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2051 ice_init_phy_cfg_dflt_override(pi); 2052 goto out; 2053 } 2054 } 2055 2056 /* if link default override is not enabled, set user flow control and 2057 * FEC settings based on what get_phy_caps returned 2058 */ 2059 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 2060 pcaps->link_fec_options); 2061 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 2062 2063 out: 2064 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 2065 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 2066 err_out: 2067 kfree(pcaps); 2068 return err; 2069 } 2070 2071 /** 2072 * ice_configure_phy - configure PHY 2073 * @vsi: VSI of PHY 2074 * 2075 * Set the PHY configuration. If the current PHY configuration is the same as 2076 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 2077 * configure the based get PHY capabilities for topology with media. 2078 */ 2079 static int ice_configure_phy(struct ice_vsi *vsi) 2080 { 2081 struct device *dev = ice_pf_to_dev(vsi->back); 2082 struct ice_port_info *pi = vsi->port_info; 2083 struct ice_aqc_get_phy_caps_data *pcaps; 2084 struct ice_aqc_set_phy_cfg_data *cfg; 2085 struct ice_phy_info *phy = &pi->phy; 2086 struct ice_pf *pf = vsi->back; 2087 int err; 2088 2089 /* Ensure we have media as we cannot configure a medialess port */ 2090 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2091 return -EPERM; 2092 2093 ice_print_topo_conflict(vsi); 2094 2095 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 2096 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 2097 return -EPERM; 2098 2099 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 2100 return ice_force_phys_link_state(vsi, true); 2101 2102 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2103 if (!pcaps) 2104 return -ENOMEM; 2105 2106 /* Get current PHY config */ 2107 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 2108 NULL); 2109 if (err) { 2110 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 2111 vsi->vsi_num, err); 2112 goto done; 2113 } 2114 2115 /* If PHY enable link is configured and configuration has not changed, 2116 * there's nothing to do 2117 */ 2118 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2119 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 2120 goto done; 2121 2122 /* Use PHY topology as baseline for configuration */ 2123 memset(pcaps, 0, sizeof(*pcaps)); 2124 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2125 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2126 pcaps, NULL); 2127 else 2128 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2129 pcaps, NULL); 2130 if (err) { 2131 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 2132 vsi->vsi_num, err); 2133 goto done; 2134 } 2135 2136 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2137 if (!cfg) { 2138 err = -ENOMEM; 2139 goto done; 2140 } 2141 2142 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2143 2144 /* Speed - If default override pending, use curr_user_phy_cfg set in 2145 * ice_init_phy_user_cfg_ldo. 2146 */ 2147 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2148 vsi->back->state)) { 2149 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2150 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2151 } else { 2152 u64 phy_low = 0, phy_high = 0; 2153 2154 ice_update_phy_type(&phy_low, &phy_high, 2155 pi->phy.curr_user_speed_req); 2156 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2157 cfg->phy_type_high = pcaps->phy_type_high & 2158 cpu_to_le64(phy_high); 2159 } 2160 2161 /* Can't provide what was requested; use PHY capabilities */ 2162 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2163 cfg->phy_type_low = pcaps->phy_type_low; 2164 cfg->phy_type_high = pcaps->phy_type_high; 2165 } 2166 2167 /* FEC */ 2168 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2169 2170 /* Can't provide what was requested; use PHY capabilities */ 2171 if (cfg->link_fec_opt != 2172 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2173 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2174 cfg->link_fec_opt = pcaps->link_fec_options; 2175 } 2176 2177 /* Flow Control - always supported; no need to check against 2178 * capabilities 2179 */ 2180 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2181 2182 /* Enable link and link update */ 2183 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2184 2185 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2186 if (err) 2187 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2188 vsi->vsi_num, err); 2189 2190 kfree(cfg); 2191 done: 2192 kfree(pcaps); 2193 return err; 2194 } 2195 2196 /** 2197 * ice_check_media_subtask - Check for media 2198 * @pf: pointer to PF struct 2199 * 2200 * If media is available, then initialize PHY user configuration if it is not 2201 * been, and configure the PHY if the interface is up. 2202 */ 2203 static void ice_check_media_subtask(struct ice_pf *pf) 2204 { 2205 struct ice_port_info *pi; 2206 struct ice_vsi *vsi; 2207 int err; 2208 2209 /* No need to check for media if it's already present */ 2210 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2211 return; 2212 2213 vsi = ice_get_main_vsi(pf); 2214 if (!vsi) 2215 return; 2216 2217 /* Refresh link info and check if media is present */ 2218 pi = vsi->port_info; 2219 err = ice_update_link_info(pi); 2220 if (err) 2221 return; 2222 2223 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2224 2225 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2226 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2227 ice_init_phy_user_cfg(pi); 2228 2229 /* PHY settings are reset on media insertion, reconfigure 2230 * PHY to preserve settings. 2231 */ 2232 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2233 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2234 return; 2235 2236 err = ice_configure_phy(vsi); 2237 if (!err) 2238 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2239 2240 /* A Link Status Event will be generated; the event handler 2241 * will complete bringing the interface up 2242 */ 2243 } 2244 } 2245 2246 /** 2247 * ice_service_task - manage and run subtasks 2248 * @work: pointer to work_struct contained by the PF struct 2249 */ 2250 static void ice_service_task(struct work_struct *work) 2251 { 2252 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2253 unsigned long start_time = jiffies; 2254 2255 /* subtasks */ 2256 2257 /* process reset requests first */ 2258 ice_reset_subtask(pf); 2259 2260 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2261 if (ice_is_reset_in_progress(pf->state) || 2262 test_bit(ICE_SUSPENDED, pf->state) || 2263 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2264 ice_service_task_complete(pf); 2265 return; 2266 } 2267 2268 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { 2269 /* Plug aux device per request */ 2270 ice_plug_aux_dev(pf); 2271 2272 /* Mark plugging as done but check whether unplug was 2273 * requested during ice_plug_aux_dev() call 2274 * (e.g. from ice_clear_rdma_cap()) and if so then 2275 * plug aux device. 2276 */ 2277 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2278 ice_unplug_aux_dev(pf); 2279 } 2280 2281 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2282 struct iidc_event *event; 2283 2284 event = kzalloc(sizeof(*event), GFP_KERNEL); 2285 if (event) { 2286 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 2287 ice_send_event_to_aux(pf, event); 2288 kfree(event); 2289 } 2290 } 2291 2292 ice_clean_adminq_subtask(pf); 2293 ice_check_media_subtask(pf); 2294 ice_check_for_hang_subtask(pf); 2295 ice_sync_fltr_subtask(pf); 2296 ice_handle_mdd_event(pf); 2297 ice_watchdog_subtask(pf); 2298 2299 if (ice_is_safe_mode(pf)) { 2300 ice_service_task_complete(pf); 2301 return; 2302 } 2303 2304 ice_process_vflr_event(pf); 2305 ice_clean_mailboxq_subtask(pf); 2306 ice_clean_sbq_subtask(pf); 2307 ice_sync_arfs_fltrs(pf); 2308 ice_flush_fdir_ctx(pf); 2309 2310 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2311 ice_service_task_complete(pf); 2312 2313 /* If the tasks have taken longer than one service timer period 2314 * or there is more work to be done, reset the service timer to 2315 * schedule the service task now. 2316 */ 2317 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2318 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2319 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2320 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2321 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2322 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2323 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2324 mod_timer(&pf->serv_tmr, jiffies); 2325 } 2326 2327 /** 2328 * ice_set_ctrlq_len - helper function to set controlq length 2329 * @hw: pointer to the HW instance 2330 */ 2331 static void ice_set_ctrlq_len(struct ice_hw *hw) 2332 { 2333 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2334 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2335 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2336 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2337 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2338 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2339 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2340 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2341 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2342 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2343 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2344 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2345 } 2346 2347 /** 2348 * ice_schedule_reset - schedule a reset 2349 * @pf: board private structure 2350 * @reset: reset being requested 2351 */ 2352 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2353 { 2354 struct device *dev = ice_pf_to_dev(pf); 2355 2356 /* bail out if earlier reset has failed */ 2357 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2358 dev_dbg(dev, "earlier reset has failed\n"); 2359 return -EIO; 2360 } 2361 /* bail if reset/recovery already in progress */ 2362 if (ice_is_reset_in_progress(pf->state)) { 2363 dev_dbg(dev, "Reset already in progress\n"); 2364 return -EBUSY; 2365 } 2366 2367 ice_unplug_aux_dev(pf); 2368 2369 switch (reset) { 2370 case ICE_RESET_PFR: 2371 set_bit(ICE_PFR_REQ, pf->state); 2372 break; 2373 case ICE_RESET_CORER: 2374 set_bit(ICE_CORER_REQ, pf->state); 2375 break; 2376 case ICE_RESET_GLOBR: 2377 set_bit(ICE_GLOBR_REQ, pf->state); 2378 break; 2379 default: 2380 return -EINVAL; 2381 } 2382 2383 ice_service_task_schedule(pf); 2384 return 0; 2385 } 2386 2387 /** 2388 * ice_irq_affinity_notify - Callback for affinity changes 2389 * @notify: context as to what irq was changed 2390 * @mask: the new affinity mask 2391 * 2392 * This is a callback function used by the irq_set_affinity_notifier function 2393 * so that we may register to receive changes to the irq affinity masks. 2394 */ 2395 static void 2396 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2397 const cpumask_t *mask) 2398 { 2399 struct ice_q_vector *q_vector = 2400 container_of(notify, struct ice_q_vector, affinity_notify); 2401 2402 cpumask_copy(&q_vector->affinity_mask, mask); 2403 } 2404 2405 /** 2406 * ice_irq_affinity_release - Callback for affinity notifier release 2407 * @ref: internal core kernel usage 2408 * 2409 * This is a callback function used by the irq_set_affinity_notifier function 2410 * to inform the current notification subscriber that they will no longer 2411 * receive notifications. 2412 */ 2413 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2414 2415 /** 2416 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2417 * @vsi: the VSI being configured 2418 */ 2419 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2420 { 2421 struct ice_hw *hw = &vsi->back->hw; 2422 int i; 2423 2424 ice_for_each_q_vector(vsi, i) 2425 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2426 2427 ice_flush(hw); 2428 return 0; 2429 } 2430 2431 /** 2432 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2433 * @vsi: the VSI being configured 2434 * @basename: name for the vector 2435 */ 2436 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2437 { 2438 int q_vectors = vsi->num_q_vectors; 2439 struct ice_pf *pf = vsi->back; 2440 int base = vsi->base_vector; 2441 struct device *dev; 2442 int rx_int_idx = 0; 2443 int tx_int_idx = 0; 2444 int vector, err; 2445 int irq_num; 2446 2447 dev = ice_pf_to_dev(pf); 2448 for (vector = 0; vector < q_vectors; vector++) { 2449 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2450 2451 irq_num = pf->msix_entries[base + vector].vector; 2452 2453 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2454 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2455 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2456 tx_int_idx++; 2457 } else if (q_vector->rx.rx_ring) { 2458 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2459 "%s-%s-%d", basename, "rx", rx_int_idx++); 2460 } else if (q_vector->tx.tx_ring) { 2461 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2462 "%s-%s-%d", basename, "tx", tx_int_idx++); 2463 } else { 2464 /* skip this unused q_vector */ 2465 continue; 2466 } 2467 if (vsi->type == ICE_VSI_CTRL && vsi->vf) 2468 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2469 IRQF_SHARED, q_vector->name, 2470 q_vector); 2471 else 2472 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2473 0, q_vector->name, q_vector); 2474 if (err) { 2475 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2476 err); 2477 goto free_q_irqs; 2478 } 2479 2480 /* register for affinity change notifications */ 2481 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2482 struct irq_affinity_notify *affinity_notify; 2483 2484 affinity_notify = &q_vector->affinity_notify; 2485 affinity_notify->notify = ice_irq_affinity_notify; 2486 affinity_notify->release = ice_irq_affinity_release; 2487 irq_set_affinity_notifier(irq_num, affinity_notify); 2488 } 2489 2490 /* assign the mask for this irq */ 2491 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2492 } 2493 2494 vsi->irqs_ready = true; 2495 return 0; 2496 2497 free_q_irqs: 2498 while (vector) { 2499 vector--; 2500 irq_num = pf->msix_entries[base + vector].vector; 2501 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2502 irq_set_affinity_notifier(irq_num, NULL); 2503 irq_set_affinity_hint(irq_num, NULL); 2504 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2505 } 2506 return err; 2507 } 2508 2509 /** 2510 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2511 * @vsi: VSI to setup Tx rings used by XDP 2512 * 2513 * Return 0 on success and negative value on error 2514 */ 2515 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2516 { 2517 struct device *dev = ice_pf_to_dev(vsi->back); 2518 struct ice_tx_desc *tx_desc; 2519 int i, j; 2520 2521 ice_for_each_xdp_txq(vsi, i) { 2522 u16 xdp_q_idx = vsi->alloc_txq + i; 2523 struct ice_tx_ring *xdp_ring; 2524 2525 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2526 2527 if (!xdp_ring) 2528 goto free_xdp_rings; 2529 2530 xdp_ring->q_index = xdp_q_idx; 2531 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2532 xdp_ring->vsi = vsi; 2533 xdp_ring->netdev = NULL; 2534 xdp_ring->dev = dev; 2535 xdp_ring->count = vsi->num_tx_desc; 2536 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1; 2537 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1; 2538 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2539 if (ice_setup_tx_ring(xdp_ring)) 2540 goto free_xdp_rings; 2541 ice_set_ring_xdp(xdp_ring); 2542 xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); 2543 spin_lock_init(&xdp_ring->tx_lock); 2544 for (j = 0; j < xdp_ring->count; j++) { 2545 tx_desc = ICE_TX_DESC(xdp_ring, j); 2546 tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); 2547 } 2548 } 2549 2550 ice_for_each_rxq(vsi, i) { 2551 if (static_key_enabled(&ice_xdp_locking_key)) 2552 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; 2553 else 2554 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i]; 2555 } 2556 2557 return 0; 2558 2559 free_xdp_rings: 2560 for (; i >= 0; i--) 2561 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 2562 ice_free_tx_ring(vsi->xdp_rings[i]); 2563 return -ENOMEM; 2564 } 2565 2566 /** 2567 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2568 * @vsi: VSI to set the bpf prog on 2569 * @prog: the bpf prog pointer 2570 */ 2571 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2572 { 2573 struct bpf_prog *old_prog; 2574 int i; 2575 2576 old_prog = xchg(&vsi->xdp_prog, prog); 2577 if (old_prog) 2578 bpf_prog_put(old_prog); 2579 2580 ice_for_each_rxq(vsi, i) 2581 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2582 } 2583 2584 /** 2585 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2586 * @vsi: VSI to bring up Tx rings used by XDP 2587 * @prog: bpf program that will be assigned to VSI 2588 * 2589 * Return 0 on success and negative value on error 2590 */ 2591 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2592 { 2593 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2594 int xdp_rings_rem = vsi->num_xdp_txq; 2595 struct ice_pf *pf = vsi->back; 2596 struct ice_qs_cfg xdp_qs_cfg = { 2597 .qs_mutex = &pf->avail_q_mutex, 2598 .pf_map = pf->avail_txqs, 2599 .pf_map_size = pf->max_pf_txqs, 2600 .q_count = vsi->num_xdp_txq, 2601 .scatter_count = ICE_MAX_SCATTER_TXQS, 2602 .vsi_map = vsi->txq_map, 2603 .vsi_map_offset = vsi->alloc_txq, 2604 .mapping_mode = ICE_VSI_MAP_CONTIG 2605 }; 2606 struct device *dev; 2607 int i, v_idx; 2608 int status; 2609 2610 dev = ice_pf_to_dev(pf); 2611 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2612 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2613 if (!vsi->xdp_rings) 2614 return -ENOMEM; 2615 2616 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2617 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2618 goto err_map_xdp; 2619 2620 if (static_key_enabled(&ice_xdp_locking_key)) 2621 netdev_warn(vsi->netdev, 2622 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 2623 2624 if (ice_xdp_alloc_setup_rings(vsi)) 2625 goto clear_xdp_rings; 2626 2627 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2628 ice_for_each_q_vector(vsi, v_idx) { 2629 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2630 int xdp_rings_per_v, q_id, q_base; 2631 2632 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2633 vsi->num_q_vectors - v_idx); 2634 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2635 2636 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2637 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2638 2639 xdp_ring->q_vector = q_vector; 2640 xdp_ring->next = q_vector->tx.tx_ring; 2641 q_vector->tx.tx_ring = xdp_ring; 2642 } 2643 xdp_rings_rem -= xdp_rings_per_v; 2644 } 2645 2646 /* omit the scheduler update if in reset path; XDP queues will be 2647 * taken into account at the end of ice_vsi_rebuild, where 2648 * ice_cfg_vsi_lan is being called 2649 */ 2650 if (ice_is_reset_in_progress(pf->state)) 2651 return 0; 2652 2653 /* tell the Tx scheduler that right now we have 2654 * additional queues 2655 */ 2656 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2657 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2658 2659 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2660 max_txqs); 2661 if (status) { 2662 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2663 status); 2664 goto clear_xdp_rings; 2665 } 2666 2667 /* assign the prog only when it's not already present on VSI; 2668 * this flow is a subject of both ethtool -L and ndo_bpf flows; 2669 * VSI rebuild that happens under ethtool -L can expose us to 2670 * the bpf_prog refcount issues as we would be swapping same 2671 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2672 * on it as it would be treated as an 'old_prog'; for ndo_bpf 2673 * this is not harmful as dev_xdp_install bumps the refcount 2674 * before calling the op exposed by the driver; 2675 */ 2676 if (!ice_is_xdp_ena_vsi(vsi)) 2677 ice_vsi_assign_bpf_prog(vsi, prog); 2678 2679 return 0; 2680 clear_xdp_rings: 2681 ice_for_each_xdp_txq(vsi, i) 2682 if (vsi->xdp_rings[i]) { 2683 kfree_rcu(vsi->xdp_rings[i], rcu); 2684 vsi->xdp_rings[i] = NULL; 2685 } 2686 2687 err_map_xdp: 2688 mutex_lock(&pf->avail_q_mutex); 2689 ice_for_each_xdp_txq(vsi, i) { 2690 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2691 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2692 } 2693 mutex_unlock(&pf->avail_q_mutex); 2694 2695 devm_kfree(dev, vsi->xdp_rings); 2696 return -ENOMEM; 2697 } 2698 2699 /** 2700 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2701 * @vsi: VSI to remove XDP rings 2702 * 2703 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2704 * resources 2705 */ 2706 int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2707 { 2708 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2709 struct ice_pf *pf = vsi->back; 2710 int i, v_idx; 2711 2712 /* q_vectors are freed in reset path so there's no point in detaching 2713 * rings; in case of rebuild being triggered not from reset bits 2714 * in pf->state won't be set, so additionally check first q_vector 2715 * against NULL 2716 */ 2717 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2718 goto free_qmap; 2719 2720 ice_for_each_q_vector(vsi, v_idx) { 2721 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2722 struct ice_tx_ring *ring; 2723 2724 ice_for_each_tx_ring(ring, q_vector->tx) 2725 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2726 break; 2727 2728 /* restore the value of last node prior to XDP setup */ 2729 q_vector->tx.tx_ring = ring; 2730 } 2731 2732 free_qmap: 2733 mutex_lock(&pf->avail_q_mutex); 2734 ice_for_each_xdp_txq(vsi, i) { 2735 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2736 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2737 } 2738 mutex_unlock(&pf->avail_q_mutex); 2739 2740 ice_for_each_xdp_txq(vsi, i) 2741 if (vsi->xdp_rings[i]) { 2742 if (vsi->xdp_rings[i]->desc) 2743 ice_free_tx_ring(vsi->xdp_rings[i]); 2744 kfree_rcu(vsi->xdp_rings[i], rcu); 2745 vsi->xdp_rings[i] = NULL; 2746 } 2747 2748 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2749 vsi->xdp_rings = NULL; 2750 2751 if (static_key_enabled(&ice_xdp_locking_key)) 2752 static_branch_dec(&ice_xdp_locking_key); 2753 2754 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2755 return 0; 2756 2757 ice_vsi_assign_bpf_prog(vsi, NULL); 2758 2759 /* notify Tx scheduler that we destroyed XDP queues and bring 2760 * back the old number of child nodes 2761 */ 2762 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2763 max_txqs[i] = vsi->num_txq; 2764 2765 /* change number of XDP Tx queues to 0 */ 2766 vsi->num_xdp_txq = 0; 2767 2768 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2769 max_txqs); 2770 } 2771 2772 /** 2773 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2774 * @vsi: VSI to schedule napi on 2775 */ 2776 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2777 { 2778 int i; 2779 2780 ice_for_each_rxq(vsi, i) { 2781 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2782 2783 if (rx_ring->xsk_pool) 2784 napi_schedule(&rx_ring->q_vector->napi); 2785 } 2786 } 2787 2788 /** 2789 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 2790 * @vsi: VSI to determine the count of XDP Tx qs 2791 * 2792 * returns 0 if Tx qs count is higher than at least half of CPU count, 2793 * -ENOMEM otherwise 2794 */ 2795 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 2796 { 2797 u16 avail = ice_get_avail_txq_count(vsi->back); 2798 u16 cpus = num_possible_cpus(); 2799 2800 if (avail < cpus / 2) 2801 return -ENOMEM; 2802 2803 vsi->num_xdp_txq = min_t(u16, avail, cpus); 2804 2805 if (vsi->num_xdp_txq < cpus) 2806 static_branch_inc(&ice_xdp_locking_key); 2807 2808 return 0; 2809 } 2810 2811 /** 2812 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2813 * @vsi: VSI to setup XDP for 2814 * @prog: XDP program 2815 * @extack: netlink extended ack 2816 */ 2817 static int 2818 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2819 struct netlink_ext_ack *extack) 2820 { 2821 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2822 bool if_running = netif_running(vsi->netdev); 2823 int ret = 0, xdp_ring_err = 0; 2824 2825 if (frame_size > vsi->rx_buf_len) { 2826 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 2827 return -EOPNOTSUPP; 2828 } 2829 2830 /* need to stop netdev while setting up the program for Rx rings */ 2831 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2832 ret = ice_down(vsi); 2833 if (ret) { 2834 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2835 return ret; 2836 } 2837 } 2838 2839 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 2840 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 2841 if (xdp_ring_err) { 2842 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 2843 } else { 2844 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2845 if (xdp_ring_err) 2846 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 2847 } 2848 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2849 xdp_ring_err = ice_destroy_xdp_rings(vsi); 2850 if (xdp_ring_err) 2851 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2852 } else { 2853 /* safe to call even when prog == vsi->xdp_prog as 2854 * dev_xdp_install in net/core/dev.c incremented prog's 2855 * refcount so corresponding bpf_prog_put won't cause 2856 * underflow 2857 */ 2858 ice_vsi_assign_bpf_prog(vsi, prog); 2859 } 2860 2861 if (if_running) 2862 ret = ice_up(vsi); 2863 2864 if (!ret && prog) 2865 ice_vsi_rx_napi_schedule(vsi); 2866 2867 return (ret || xdp_ring_err) ? -ENOMEM : 0; 2868 } 2869 2870 /** 2871 * ice_xdp_safe_mode - XDP handler for safe mode 2872 * @dev: netdevice 2873 * @xdp: XDP command 2874 */ 2875 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 2876 struct netdev_bpf *xdp) 2877 { 2878 NL_SET_ERR_MSG_MOD(xdp->extack, 2879 "Please provide working DDP firmware package in order to use XDP\n" 2880 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 2881 return -EOPNOTSUPP; 2882 } 2883 2884 /** 2885 * ice_xdp - implements XDP handler 2886 * @dev: netdevice 2887 * @xdp: XDP command 2888 */ 2889 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2890 { 2891 struct ice_netdev_priv *np = netdev_priv(dev); 2892 struct ice_vsi *vsi = np->vsi; 2893 2894 if (vsi->type != ICE_VSI_PF) { 2895 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 2896 return -EINVAL; 2897 } 2898 2899 switch (xdp->command) { 2900 case XDP_SETUP_PROG: 2901 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 2902 case XDP_SETUP_XSK_POOL: 2903 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 2904 xdp->xsk.queue_id); 2905 default: 2906 return -EINVAL; 2907 } 2908 } 2909 2910 /** 2911 * ice_ena_misc_vector - enable the non-queue interrupts 2912 * @pf: board private structure 2913 */ 2914 static void ice_ena_misc_vector(struct ice_pf *pf) 2915 { 2916 struct ice_hw *hw = &pf->hw; 2917 u32 val; 2918 2919 /* Disable anti-spoof detection interrupt to prevent spurious event 2920 * interrupts during a function reset. Anti-spoof functionally is 2921 * still supported. 2922 */ 2923 val = rd32(hw, GL_MDCK_TX_TDPU); 2924 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 2925 wr32(hw, GL_MDCK_TX_TDPU, val); 2926 2927 /* clear things first */ 2928 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2929 rd32(hw, PFINT_OICR); /* read to clear */ 2930 2931 val = (PFINT_OICR_ECC_ERR_M | 2932 PFINT_OICR_MAL_DETECT_M | 2933 PFINT_OICR_GRST_M | 2934 PFINT_OICR_PCI_EXCEPTION_M | 2935 PFINT_OICR_VFLR_M | 2936 PFINT_OICR_HMC_ERR_M | 2937 PFINT_OICR_PE_PUSH_M | 2938 PFINT_OICR_PE_CRITERR_M); 2939 2940 wr32(hw, PFINT_OICR_ENA, val); 2941 2942 /* SW_ITR_IDX = 0, but don't change INTENA */ 2943 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 2944 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 2945 } 2946 2947 /** 2948 * ice_misc_intr - misc interrupt handler 2949 * @irq: interrupt number 2950 * @data: pointer to a q_vector 2951 */ 2952 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 2953 { 2954 struct ice_pf *pf = (struct ice_pf *)data; 2955 struct ice_hw *hw = &pf->hw; 2956 irqreturn_t ret = IRQ_NONE; 2957 struct device *dev; 2958 u32 oicr, ena_mask; 2959 2960 dev = ice_pf_to_dev(pf); 2961 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2962 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 2963 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 2964 2965 oicr = rd32(hw, PFINT_OICR); 2966 ena_mask = rd32(hw, PFINT_OICR_ENA); 2967 2968 if (oicr & PFINT_OICR_SWINT_M) { 2969 ena_mask &= ~PFINT_OICR_SWINT_M; 2970 pf->sw_int_count++; 2971 } 2972 2973 if (oicr & PFINT_OICR_MAL_DETECT_M) { 2974 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 2975 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 2976 } 2977 if (oicr & PFINT_OICR_VFLR_M) { 2978 /* disable any further VFLR event notifications */ 2979 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 2980 u32 reg = rd32(hw, PFINT_OICR_ENA); 2981 2982 reg &= ~PFINT_OICR_VFLR_M; 2983 wr32(hw, PFINT_OICR_ENA, reg); 2984 } else { 2985 ena_mask &= ~PFINT_OICR_VFLR_M; 2986 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 2987 } 2988 } 2989 2990 if (oicr & PFINT_OICR_GRST_M) { 2991 u32 reset; 2992 2993 /* we have a reset warning */ 2994 ena_mask &= ~PFINT_OICR_GRST_M; 2995 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 2996 GLGEN_RSTAT_RESET_TYPE_S; 2997 2998 if (reset == ICE_RESET_CORER) 2999 pf->corer_count++; 3000 else if (reset == ICE_RESET_GLOBR) 3001 pf->globr_count++; 3002 else if (reset == ICE_RESET_EMPR) 3003 pf->empr_count++; 3004 else 3005 dev_dbg(dev, "Invalid reset type %d\n", reset); 3006 3007 /* If a reset cycle isn't already in progress, we set a bit in 3008 * pf->state so that the service task can start a reset/rebuild. 3009 */ 3010 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 3011 if (reset == ICE_RESET_CORER) 3012 set_bit(ICE_CORER_RECV, pf->state); 3013 else if (reset == ICE_RESET_GLOBR) 3014 set_bit(ICE_GLOBR_RECV, pf->state); 3015 else 3016 set_bit(ICE_EMPR_RECV, pf->state); 3017 3018 /* There are couple of different bits at play here. 3019 * hw->reset_ongoing indicates whether the hardware is 3020 * in reset. This is set to true when a reset interrupt 3021 * is received and set back to false after the driver 3022 * has determined that the hardware is out of reset. 3023 * 3024 * ICE_RESET_OICR_RECV in pf->state indicates 3025 * that a post reset rebuild is required before the 3026 * driver is operational again. This is set above. 3027 * 3028 * As this is the start of the reset/rebuild cycle, set 3029 * both to indicate that. 3030 */ 3031 hw->reset_ongoing = true; 3032 } 3033 } 3034 3035 if (oicr & PFINT_OICR_TSYN_TX_M) { 3036 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3037 ice_ptp_process_ts(pf); 3038 } 3039 3040 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3041 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3042 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3043 3044 /* Save EVENTs from GTSYN register */ 3045 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | 3046 GLTSYN_STAT_EVENT1_M | 3047 GLTSYN_STAT_EVENT2_M); 3048 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3049 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); 3050 } 3051 3052 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3053 if (oicr & ICE_AUX_CRIT_ERR) { 3054 struct iidc_event *event; 3055 3056 ena_mask &= ~ICE_AUX_CRIT_ERR; 3057 event = kzalloc(sizeof(*event), GFP_ATOMIC); 3058 if (event) { 3059 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 3060 /* report the entire OICR value to AUX driver */ 3061 event->reg = oicr; 3062 ice_send_event_to_aux(pf, event); 3063 kfree(event); 3064 } 3065 } 3066 3067 /* Report any remaining unexpected interrupts */ 3068 oicr &= ena_mask; 3069 if (oicr) { 3070 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3071 /* If a critical error is pending there is no choice but to 3072 * reset the device. 3073 */ 3074 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 3075 PFINT_OICR_ECC_ERR_M)) { 3076 set_bit(ICE_PFR_REQ, pf->state); 3077 ice_service_task_schedule(pf); 3078 } 3079 } 3080 ret = IRQ_HANDLED; 3081 3082 ice_service_task_schedule(pf); 3083 ice_irq_dynamic_ena(hw, NULL, NULL); 3084 3085 return ret; 3086 } 3087 3088 /** 3089 * ice_dis_ctrlq_interrupts - disable control queue interrupts 3090 * @hw: pointer to HW structure 3091 */ 3092 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 3093 { 3094 /* disable Admin queue Interrupt causes */ 3095 wr32(hw, PFINT_FW_CTL, 3096 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 3097 3098 /* disable Mailbox queue Interrupt causes */ 3099 wr32(hw, PFINT_MBX_CTL, 3100 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 3101 3102 wr32(hw, PFINT_SB_CTL, 3103 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 3104 3105 /* disable Control queue Interrupt causes */ 3106 wr32(hw, PFINT_OICR_CTL, 3107 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 3108 3109 ice_flush(hw); 3110 } 3111 3112 /** 3113 * ice_free_irq_msix_misc - Unroll misc vector setup 3114 * @pf: board private structure 3115 */ 3116 static void ice_free_irq_msix_misc(struct ice_pf *pf) 3117 { 3118 struct ice_hw *hw = &pf->hw; 3119 3120 ice_dis_ctrlq_interrupts(hw); 3121 3122 /* disable OICR interrupt */ 3123 wr32(hw, PFINT_OICR_ENA, 0); 3124 ice_flush(hw); 3125 3126 if (pf->msix_entries) { 3127 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 3128 devm_free_irq(ice_pf_to_dev(pf), 3129 pf->msix_entries[pf->oicr_idx].vector, pf); 3130 } 3131 3132 pf->num_avail_sw_msix += 1; 3133 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 3134 } 3135 3136 /** 3137 * ice_ena_ctrlq_interrupts - enable control queue interrupts 3138 * @hw: pointer to HW structure 3139 * @reg_idx: HW vector index to associate the control queue interrupts with 3140 */ 3141 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 3142 { 3143 u32 val; 3144 3145 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 3146 PFINT_OICR_CTL_CAUSE_ENA_M); 3147 wr32(hw, PFINT_OICR_CTL, val); 3148 3149 /* enable Admin queue Interrupt causes */ 3150 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 3151 PFINT_FW_CTL_CAUSE_ENA_M); 3152 wr32(hw, PFINT_FW_CTL, val); 3153 3154 /* enable Mailbox queue Interrupt causes */ 3155 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 3156 PFINT_MBX_CTL_CAUSE_ENA_M); 3157 wr32(hw, PFINT_MBX_CTL, val); 3158 3159 /* This enables Sideband queue Interrupt causes */ 3160 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 3161 PFINT_SB_CTL_CAUSE_ENA_M); 3162 wr32(hw, PFINT_SB_CTL, val); 3163 3164 ice_flush(hw); 3165 } 3166 3167 /** 3168 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3169 * @pf: board private structure 3170 * 3171 * This sets up the handler for MSIX 0, which is used to manage the 3172 * non-queue interrupts, e.g. AdminQ and errors. This is not used 3173 * when in MSI or Legacy interrupt mode. 3174 */ 3175 static int ice_req_irq_msix_misc(struct ice_pf *pf) 3176 { 3177 struct device *dev = ice_pf_to_dev(pf); 3178 struct ice_hw *hw = &pf->hw; 3179 int oicr_idx, err = 0; 3180 3181 if (!pf->int_name[0]) 3182 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 3183 dev_driver_string(dev), dev_name(dev)); 3184 3185 /* Do not request IRQ but do enable OICR interrupt since settings are 3186 * lost during reset. Note that this function is called only during 3187 * rebuild path and not while reset is in progress. 3188 */ 3189 if (ice_is_reset_in_progress(pf->state)) 3190 goto skip_req_irq; 3191 3192 /* reserve one vector in irq_tracker for misc interrupts */ 3193 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3194 if (oicr_idx < 0) 3195 return oicr_idx; 3196 3197 pf->num_avail_sw_msix -= 1; 3198 pf->oicr_idx = (u16)oicr_idx; 3199 3200 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, 3201 ice_misc_intr, 0, pf->int_name, pf); 3202 if (err) { 3203 dev_err(dev, "devm_request_irq for %s failed: %d\n", 3204 pf->int_name, err); 3205 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 3206 pf->num_avail_sw_msix += 1; 3207 return err; 3208 } 3209 3210 skip_req_irq: 3211 ice_ena_misc_vector(pf); 3212 3213 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 3214 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 3215 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3216 3217 ice_flush(hw); 3218 ice_irq_dynamic_ena(hw, NULL, NULL); 3219 3220 return 0; 3221 } 3222 3223 /** 3224 * ice_napi_add - register NAPI handler for the VSI 3225 * @vsi: VSI for which NAPI handler is to be registered 3226 * 3227 * This function is only called in the driver's load path. Registering the NAPI 3228 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 3229 * reset/rebuild, etc.) 3230 */ 3231 static void ice_napi_add(struct ice_vsi *vsi) 3232 { 3233 int v_idx; 3234 3235 if (!vsi->netdev) 3236 return; 3237 3238 ice_for_each_q_vector(vsi, v_idx) 3239 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 3240 ice_napi_poll, NAPI_POLL_WEIGHT); 3241 } 3242 3243 /** 3244 * ice_set_ops - set netdev and ethtools ops for the given netdev 3245 * @netdev: netdev instance 3246 */ 3247 static void ice_set_ops(struct net_device *netdev) 3248 { 3249 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3250 3251 if (ice_is_safe_mode(pf)) { 3252 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3253 ice_set_ethtool_safe_mode_ops(netdev); 3254 return; 3255 } 3256 3257 netdev->netdev_ops = &ice_netdev_ops; 3258 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3259 ice_set_ethtool_ops(netdev); 3260 } 3261 3262 /** 3263 * ice_set_netdev_features - set features for the given netdev 3264 * @netdev: netdev instance 3265 */ 3266 static void ice_set_netdev_features(struct net_device *netdev) 3267 { 3268 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3269 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); 3270 netdev_features_t csumo_features; 3271 netdev_features_t vlano_features; 3272 netdev_features_t dflt_features; 3273 netdev_features_t tso_features; 3274 3275 if (ice_is_safe_mode(pf)) { 3276 /* safe mode */ 3277 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3278 netdev->hw_features = netdev->features; 3279 return; 3280 } 3281 3282 dflt_features = NETIF_F_SG | 3283 NETIF_F_HIGHDMA | 3284 NETIF_F_NTUPLE | 3285 NETIF_F_RXHASH; 3286 3287 csumo_features = NETIF_F_RXCSUM | 3288 NETIF_F_IP_CSUM | 3289 NETIF_F_SCTP_CRC | 3290 NETIF_F_IPV6_CSUM; 3291 3292 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3293 NETIF_F_HW_VLAN_CTAG_TX | 3294 NETIF_F_HW_VLAN_CTAG_RX; 3295 3296 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ 3297 if (is_dvm_ena) 3298 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; 3299 3300 tso_features = NETIF_F_TSO | 3301 NETIF_F_TSO_ECN | 3302 NETIF_F_TSO6 | 3303 NETIF_F_GSO_GRE | 3304 NETIF_F_GSO_UDP_TUNNEL | 3305 NETIF_F_GSO_GRE_CSUM | 3306 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3307 NETIF_F_GSO_PARTIAL | 3308 NETIF_F_GSO_IPXIP4 | 3309 NETIF_F_GSO_IPXIP6 | 3310 NETIF_F_GSO_UDP_L4; 3311 3312 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3313 NETIF_F_GSO_GRE_CSUM; 3314 /* set features that user can change */ 3315 netdev->hw_features = dflt_features | csumo_features | 3316 vlano_features | tso_features; 3317 3318 /* add support for HW_CSUM on packets with MPLS header */ 3319 netdev->mpls_features = NETIF_F_HW_CSUM; 3320 3321 /* enable features */ 3322 netdev->features |= netdev->hw_features; 3323 3324 netdev->hw_features |= NETIF_F_HW_TC; 3325 3326 /* encap and VLAN devices inherit default, csumo and tso features */ 3327 netdev->hw_enc_features |= dflt_features | csumo_features | 3328 tso_features; 3329 netdev->vlan_features |= dflt_features | csumo_features | 3330 tso_features; 3331 3332 /* advertise support but don't enable by default since only one type of 3333 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one 3334 * type turns on the other has to be turned off. This is enforced by the 3335 * ice_fix_features() ndo callback. 3336 */ 3337 if (is_dvm_ena) 3338 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | 3339 NETIF_F_HW_VLAN_STAG_TX; 3340 } 3341 3342 /** 3343 * ice_cfg_netdev - Allocate, configure and register a netdev 3344 * @vsi: the VSI associated with the new netdev 3345 * 3346 * Returns 0 on success, negative value on failure 3347 */ 3348 static int ice_cfg_netdev(struct ice_vsi *vsi) 3349 { 3350 struct ice_netdev_priv *np; 3351 struct net_device *netdev; 3352 u8 mac_addr[ETH_ALEN]; 3353 3354 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 3355 vsi->alloc_rxq); 3356 if (!netdev) 3357 return -ENOMEM; 3358 3359 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3360 vsi->netdev = netdev; 3361 np = netdev_priv(netdev); 3362 np->vsi = vsi; 3363 3364 ice_set_netdev_features(netdev); 3365 3366 ice_set_ops(netdev); 3367 3368 if (vsi->type == ICE_VSI_PF) { 3369 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 3370 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3371 eth_hw_addr_set(netdev, mac_addr); 3372 ether_addr_copy(netdev->perm_addr, mac_addr); 3373 } 3374 3375 netdev->priv_flags |= IFF_UNICAST_FLT; 3376 3377 /* Setup netdev TC information */ 3378 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 3379 3380 /* setup watchdog timeout value to be 5 second */ 3381 netdev->watchdog_timeo = 5 * HZ; 3382 3383 netdev->min_mtu = ETH_MIN_MTU; 3384 netdev->max_mtu = ICE_MAX_MTU; 3385 3386 return 0; 3387 } 3388 3389 /** 3390 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3391 * @lut: Lookup table 3392 * @rss_table_size: Lookup table size 3393 * @rss_size: Range of queue number for hashing 3394 */ 3395 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3396 { 3397 u16 i; 3398 3399 for (i = 0; i < rss_table_size; i++) 3400 lut[i] = i % rss_size; 3401 } 3402 3403 /** 3404 * ice_pf_vsi_setup - Set up a PF VSI 3405 * @pf: board private structure 3406 * @pi: pointer to the port_info instance 3407 * 3408 * Returns pointer to the successfully allocated VSI software struct 3409 * on success, otherwise returns NULL on failure. 3410 */ 3411 static struct ice_vsi * 3412 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3413 { 3414 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); 3415 } 3416 3417 static struct ice_vsi * 3418 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3419 struct ice_channel *ch) 3420 { 3421 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); 3422 } 3423 3424 /** 3425 * ice_ctrl_vsi_setup - Set up a control VSI 3426 * @pf: board private structure 3427 * @pi: pointer to the port_info instance 3428 * 3429 * Returns pointer to the successfully allocated VSI software struct 3430 * on success, otherwise returns NULL on failure. 3431 */ 3432 static struct ice_vsi * 3433 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3434 { 3435 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); 3436 } 3437 3438 /** 3439 * ice_lb_vsi_setup - Set up a loopback VSI 3440 * @pf: board private structure 3441 * @pi: pointer to the port_info instance 3442 * 3443 * Returns pointer to the successfully allocated VSI software struct 3444 * on success, otherwise returns NULL on failure. 3445 */ 3446 struct ice_vsi * 3447 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3448 { 3449 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); 3450 } 3451 3452 /** 3453 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3454 * @netdev: network interface to be adjusted 3455 * @proto: VLAN TPID 3456 * @vid: VLAN ID to be added 3457 * 3458 * net_device_ops implementation for adding VLAN IDs 3459 */ 3460 static int 3461 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3462 { 3463 struct ice_netdev_priv *np = netdev_priv(netdev); 3464 struct ice_vsi_vlan_ops *vlan_ops; 3465 struct ice_vsi *vsi = np->vsi; 3466 struct ice_vlan vlan; 3467 int ret; 3468 3469 /* VLAN 0 is added by default during load/reset */ 3470 if (!vid) 3471 return 0; 3472 3473 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3474 3475 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3476 * packets aren't pruned by the device's internal switch on Rx 3477 */ 3478 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3479 ret = vlan_ops->add_vlan(vsi, &vlan); 3480 if (!ret) 3481 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3482 3483 return ret; 3484 } 3485 3486 /** 3487 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3488 * @netdev: network interface to be adjusted 3489 * @proto: VLAN TPID 3490 * @vid: VLAN ID to be removed 3491 * 3492 * net_device_ops implementation for removing VLAN IDs 3493 */ 3494 static int 3495 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3496 { 3497 struct ice_netdev_priv *np = netdev_priv(netdev); 3498 struct ice_vsi_vlan_ops *vlan_ops; 3499 struct ice_vsi *vsi = np->vsi; 3500 struct ice_vlan vlan; 3501 int ret; 3502 3503 /* don't allow removal of VLAN 0 */ 3504 if (!vid) 3505 return 0; 3506 3507 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3508 3509 /* Make sure VLAN delete is successful before updating VLAN 3510 * information 3511 */ 3512 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3513 ret = vlan_ops->del_vlan(vsi, &vlan); 3514 if (ret) 3515 return ret; 3516 3517 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3518 return 0; 3519 } 3520 3521 /** 3522 * ice_rep_indr_tc_block_unbind 3523 * @cb_priv: indirection block private data 3524 */ 3525 static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3526 { 3527 struct ice_indr_block_priv *indr_priv = cb_priv; 3528 3529 list_del(&indr_priv->list); 3530 kfree(indr_priv); 3531 } 3532 3533 /** 3534 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3535 * @vsi: VSI struct which has the netdev 3536 */ 3537 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3538 { 3539 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3540 3541 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3542 ice_rep_indr_tc_block_unbind); 3543 } 3544 3545 /** 3546 * ice_tc_indir_block_remove - clean indirect TC block notifications 3547 * @pf: PF structure 3548 */ 3549 static void ice_tc_indir_block_remove(struct ice_pf *pf) 3550 { 3551 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 3552 3553 if (!pf_vsi) 3554 return; 3555 3556 ice_tc_indir_block_unregister(pf_vsi); 3557 } 3558 3559 /** 3560 * ice_tc_indir_block_register - Register TC indirect block notifications 3561 * @vsi: VSI struct which has the netdev 3562 * 3563 * Returns 0 on success, negative value on failure 3564 */ 3565 static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3566 { 3567 struct ice_netdev_priv *np; 3568 3569 if (!vsi || !vsi->netdev) 3570 return -EINVAL; 3571 3572 np = netdev_priv(vsi->netdev); 3573 3574 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3575 return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3576 } 3577 3578 /** 3579 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 3580 * @pf: board private structure 3581 * 3582 * Returns 0 on success, negative value on failure 3583 */ 3584 static int ice_setup_pf_sw(struct ice_pf *pf) 3585 { 3586 struct device *dev = ice_pf_to_dev(pf); 3587 bool dvm = ice_is_dvm_ena(&pf->hw); 3588 struct ice_vsi *vsi; 3589 int status; 3590 3591 if (ice_is_reset_in_progress(pf->state)) 3592 return -EBUSY; 3593 3594 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 3595 if (status) 3596 return -EIO; 3597 3598 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 3599 if (!vsi) 3600 return -ENOMEM; 3601 3602 /* init channel list */ 3603 INIT_LIST_HEAD(&vsi->ch_list); 3604 3605 status = ice_cfg_netdev(vsi); 3606 if (status) 3607 goto unroll_vsi_setup; 3608 /* netdev has to be configured before setting frame size */ 3609 ice_vsi_cfg_frame_size(vsi); 3610 3611 /* init indirect block notifications */ 3612 status = ice_tc_indir_block_register(vsi); 3613 if (status) { 3614 dev_err(dev, "Failed to register netdev notifier\n"); 3615 goto unroll_cfg_netdev; 3616 } 3617 3618 /* Setup DCB netlink interface */ 3619 ice_dcbnl_setup(vsi); 3620 3621 /* registering the NAPI handler requires both the queues and 3622 * netdev to be created, which are done in ice_pf_vsi_setup() 3623 * and ice_cfg_netdev() respectively 3624 */ 3625 ice_napi_add(vsi); 3626 3627 status = ice_set_cpu_rx_rmap(vsi); 3628 if (status) { 3629 dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", 3630 vsi->vsi_num, status); 3631 goto unroll_napi_add; 3632 } 3633 status = ice_init_mac_fltr(pf); 3634 if (status) 3635 goto free_cpu_rx_map; 3636 3637 return 0; 3638 3639 free_cpu_rx_map: 3640 ice_free_cpu_rx_rmap(vsi); 3641 unroll_napi_add: 3642 ice_tc_indir_block_unregister(vsi); 3643 unroll_cfg_netdev: 3644 if (vsi) { 3645 ice_napi_del(vsi); 3646 if (vsi->netdev) { 3647 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3648 free_netdev(vsi->netdev); 3649 vsi->netdev = NULL; 3650 } 3651 } 3652 3653 unroll_vsi_setup: 3654 ice_vsi_release(vsi); 3655 return status; 3656 } 3657 3658 /** 3659 * ice_get_avail_q_count - Get count of queues in use 3660 * @pf_qmap: bitmap to get queue use count from 3661 * @lock: pointer to a mutex that protects access to pf_qmap 3662 * @size: size of the bitmap 3663 */ 3664 static u16 3665 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3666 { 3667 unsigned long bit; 3668 u16 count = 0; 3669 3670 mutex_lock(lock); 3671 for_each_clear_bit(bit, pf_qmap, size) 3672 count++; 3673 mutex_unlock(lock); 3674 3675 return count; 3676 } 3677 3678 /** 3679 * ice_get_avail_txq_count - Get count of Tx queues in use 3680 * @pf: pointer to an ice_pf instance 3681 */ 3682 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3683 { 3684 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3685 pf->max_pf_txqs); 3686 } 3687 3688 /** 3689 * ice_get_avail_rxq_count - Get count of Rx queues in use 3690 * @pf: pointer to an ice_pf instance 3691 */ 3692 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3693 { 3694 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3695 pf->max_pf_rxqs); 3696 } 3697 3698 /** 3699 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3700 * @pf: board private structure to initialize 3701 */ 3702 static void ice_deinit_pf(struct ice_pf *pf) 3703 { 3704 ice_service_task_stop(pf); 3705 mutex_destroy(&pf->sw_mutex); 3706 mutex_destroy(&pf->tc_mutex); 3707 mutex_destroy(&pf->avail_q_mutex); 3708 mutex_destroy(&pf->vfs.table_lock); 3709 3710 if (pf->avail_txqs) { 3711 bitmap_free(pf->avail_txqs); 3712 pf->avail_txqs = NULL; 3713 } 3714 3715 if (pf->avail_rxqs) { 3716 bitmap_free(pf->avail_rxqs); 3717 pf->avail_rxqs = NULL; 3718 } 3719 3720 if (pf->ptp.clock) 3721 ptp_clock_unregister(pf->ptp.clock); 3722 } 3723 3724 /** 3725 * ice_set_pf_caps - set PFs capability flags 3726 * @pf: pointer to the PF instance 3727 */ 3728 static void ice_set_pf_caps(struct ice_pf *pf) 3729 { 3730 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3731 3732 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3733 if (func_caps->common_cap.rdma) 3734 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3735 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3736 if (func_caps->common_cap.dcb) 3737 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3738 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3739 if (func_caps->common_cap.sr_iov_1_1) { 3740 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3741 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, 3742 ICE_MAX_VF_COUNT); 3743 } 3744 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3745 if (func_caps->common_cap.rss_table_size) 3746 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3747 3748 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3749 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3750 u16 unused; 3751 3752 /* ctrl_vsi_idx will be set to a valid value when flow director 3753 * is setup by ice_init_fdir 3754 */ 3755 pf->ctrl_vsi_idx = ICE_NO_VSI; 3756 set_bit(ICE_FLAG_FD_ENA, pf->flags); 3757 /* force guaranteed filter pool for PF */ 3758 ice_alloc_fd_guar_item(&pf->hw, &unused, 3759 func_caps->fd_fltr_guar); 3760 /* force shared filter pool for PF */ 3761 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3762 func_caps->fd_fltr_best_effort); 3763 } 3764 3765 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3766 if (func_caps->common_cap.ieee_1588) 3767 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3768 3769 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3770 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3771 } 3772 3773 /** 3774 * ice_init_pf - Initialize general software structures (struct ice_pf) 3775 * @pf: board private structure to initialize 3776 */ 3777 static int ice_init_pf(struct ice_pf *pf) 3778 { 3779 ice_set_pf_caps(pf); 3780 3781 mutex_init(&pf->sw_mutex); 3782 mutex_init(&pf->tc_mutex); 3783 3784 INIT_HLIST_HEAD(&pf->aq_wait_list); 3785 spin_lock_init(&pf->aq_wait_lock); 3786 init_waitqueue_head(&pf->aq_wait_queue); 3787 3788 init_waitqueue_head(&pf->reset_wait_queue); 3789 3790 /* setup service timer and periodic service task */ 3791 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3792 pf->serv_tmr_period = HZ; 3793 INIT_WORK(&pf->serv_task, ice_service_task); 3794 clear_bit(ICE_SERVICE_SCHED, pf->state); 3795 3796 mutex_init(&pf->avail_q_mutex); 3797 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 3798 if (!pf->avail_txqs) 3799 return -ENOMEM; 3800 3801 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 3802 if (!pf->avail_rxqs) { 3803 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); 3804 pf->avail_txqs = NULL; 3805 return -ENOMEM; 3806 } 3807 3808 mutex_init(&pf->vfs.table_lock); 3809 hash_init(pf->vfs.table); 3810 3811 return 0; 3812 } 3813 3814 /** 3815 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 3816 * @pf: board private structure 3817 * 3818 * compute the number of MSIX vectors required (v_budget) and request from 3819 * the OS. Return the number of vectors reserved or negative on failure 3820 */ 3821 static int ice_ena_msix_range(struct ice_pf *pf) 3822 { 3823 int num_cpus, v_left, v_actual, v_other, v_budget = 0; 3824 struct device *dev = ice_pf_to_dev(pf); 3825 int needed, err, i; 3826 3827 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 3828 num_cpus = num_online_cpus(); 3829 3830 /* reserve for LAN miscellaneous handler */ 3831 needed = ICE_MIN_LAN_OICR_MSIX; 3832 if (v_left < needed) 3833 goto no_hw_vecs_left_err; 3834 v_budget += needed; 3835 v_left -= needed; 3836 3837 /* reserve for flow director */ 3838 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 3839 needed = ICE_FDIR_MSIX; 3840 if (v_left < needed) 3841 goto no_hw_vecs_left_err; 3842 v_budget += needed; 3843 v_left -= needed; 3844 } 3845 3846 /* reserve for switchdev */ 3847 needed = ICE_ESWITCH_MSIX; 3848 if (v_left < needed) 3849 goto no_hw_vecs_left_err; 3850 v_budget += needed; 3851 v_left -= needed; 3852 3853 /* total used for non-traffic vectors */ 3854 v_other = v_budget; 3855 3856 /* reserve vectors for LAN traffic */ 3857 needed = num_cpus; 3858 if (v_left < needed) 3859 goto no_hw_vecs_left_err; 3860 pf->num_lan_msix = needed; 3861 v_budget += needed; 3862 v_left -= needed; 3863 3864 /* reserve vectors for RDMA auxiliary driver */ 3865 if (ice_is_rdma_ena(pf)) { 3866 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; 3867 if (v_left < needed) 3868 goto no_hw_vecs_left_err; 3869 pf->num_rdma_msix = needed; 3870 v_budget += needed; 3871 v_left -= needed; 3872 } 3873 3874 pf->msix_entries = devm_kcalloc(dev, v_budget, 3875 sizeof(*pf->msix_entries), GFP_KERNEL); 3876 if (!pf->msix_entries) { 3877 err = -ENOMEM; 3878 goto exit_err; 3879 } 3880 3881 for (i = 0; i < v_budget; i++) 3882 pf->msix_entries[i].entry = i; 3883 3884 /* actually reserve the vectors */ 3885 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 3886 ICE_MIN_MSIX, v_budget); 3887 if (v_actual < 0) { 3888 dev_err(dev, "unable to reserve MSI-X vectors\n"); 3889 err = v_actual; 3890 goto msix_err; 3891 } 3892 3893 if (v_actual < v_budget) { 3894 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 3895 v_budget, v_actual); 3896 3897 if (v_actual < ICE_MIN_MSIX) { 3898 /* error if we can't get minimum vectors */ 3899 pci_disable_msix(pf->pdev); 3900 err = -ERANGE; 3901 goto msix_err; 3902 } else { 3903 int v_remain = v_actual - v_other; 3904 int v_rdma = 0, v_min_rdma = 0; 3905 3906 if (ice_is_rdma_ena(pf)) { 3907 /* Need at least 1 interrupt in addition to 3908 * AEQ MSIX 3909 */ 3910 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; 3911 v_min_rdma = ICE_MIN_RDMA_MSIX; 3912 } 3913 3914 if (v_actual == ICE_MIN_MSIX || 3915 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { 3916 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); 3917 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3918 3919 pf->num_rdma_msix = 0; 3920 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; 3921 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || 3922 (v_remain - v_rdma < v_rdma)) { 3923 /* Support minimum RDMA and give remaining 3924 * vectors to LAN MSIX 3925 */ 3926 pf->num_rdma_msix = v_min_rdma; 3927 pf->num_lan_msix = v_remain - v_min_rdma; 3928 } else { 3929 /* Split remaining MSIX with RDMA after 3930 * accounting for AEQ MSIX 3931 */ 3932 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + 3933 ICE_RDMA_NUM_AEQ_MSIX; 3934 pf->num_lan_msix = v_remain - pf->num_rdma_msix; 3935 } 3936 3937 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", 3938 pf->num_lan_msix); 3939 3940 if (ice_is_rdma_ena(pf)) 3941 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", 3942 pf->num_rdma_msix); 3943 } 3944 } 3945 3946 return v_actual; 3947 3948 msix_err: 3949 devm_kfree(dev, pf->msix_entries); 3950 goto exit_err; 3951 3952 no_hw_vecs_left_err: 3953 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", 3954 needed, v_left); 3955 err = -ERANGE; 3956 exit_err: 3957 pf->num_rdma_msix = 0; 3958 pf->num_lan_msix = 0; 3959 return err; 3960 } 3961 3962 /** 3963 * ice_dis_msix - Disable MSI-X interrupt setup in OS 3964 * @pf: board private structure 3965 */ 3966 static void ice_dis_msix(struct ice_pf *pf) 3967 { 3968 pci_disable_msix(pf->pdev); 3969 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 3970 pf->msix_entries = NULL; 3971 } 3972 3973 /** 3974 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 3975 * @pf: board private structure 3976 */ 3977 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 3978 { 3979 ice_dis_msix(pf); 3980 3981 if (pf->irq_tracker) { 3982 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 3983 pf->irq_tracker = NULL; 3984 } 3985 } 3986 3987 /** 3988 * ice_init_interrupt_scheme - Determine proper interrupt scheme 3989 * @pf: board private structure to initialize 3990 */ 3991 static int ice_init_interrupt_scheme(struct ice_pf *pf) 3992 { 3993 int vectors; 3994 3995 vectors = ice_ena_msix_range(pf); 3996 3997 if (vectors < 0) 3998 return vectors; 3999 4000 /* set up vector assignment tracking */ 4001 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), 4002 struct_size(pf->irq_tracker, list, vectors), 4003 GFP_KERNEL); 4004 if (!pf->irq_tracker) { 4005 ice_dis_msix(pf); 4006 return -ENOMEM; 4007 } 4008 4009 /* populate SW interrupts pool with number of OS granted IRQs. */ 4010 pf->num_avail_sw_msix = (u16)vectors; 4011 pf->irq_tracker->num_entries = (u16)vectors; 4012 pf->irq_tracker->end = pf->irq_tracker->num_entries; 4013 4014 return 0; 4015 } 4016 4017 /** 4018 * ice_is_wol_supported - check if WoL is supported 4019 * @hw: pointer to hardware info 4020 * 4021 * Check if WoL is supported based on the HW configuration. 4022 * Returns true if NVM supports and enables WoL for this port, false otherwise 4023 */ 4024 bool ice_is_wol_supported(struct ice_hw *hw) 4025 { 4026 u16 wol_ctrl; 4027 4028 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 4029 * word) indicates WoL is not supported on the corresponding PF ID. 4030 */ 4031 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 4032 return false; 4033 4034 return !(BIT(hw->port_info->lport) & wol_ctrl); 4035 } 4036 4037 /** 4038 * ice_vsi_recfg_qs - Change the number of queues on a VSI 4039 * @vsi: VSI being changed 4040 * @new_rx: new number of Rx queues 4041 * @new_tx: new number of Tx queues 4042 * 4043 * Only change the number of queues if new_tx, or new_rx is non-0. 4044 * 4045 * Returns 0 on success. 4046 */ 4047 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 4048 { 4049 struct ice_pf *pf = vsi->back; 4050 int err = 0, timeout = 50; 4051 4052 if (!new_rx && !new_tx) 4053 return -EINVAL; 4054 4055 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 4056 timeout--; 4057 if (!timeout) 4058 return -EBUSY; 4059 usleep_range(1000, 2000); 4060 } 4061 4062 if (new_tx) 4063 vsi->req_txq = (u16)new_tx; 4064 if (new_rx) 4065 vsi->req_rxq = (u16)new_rx; 4066 4067 /* set for the next time the netdev is started */ 4068 if (!netif_running(vsi->netdev)) { 4069 ice_vsi_rebuild(vsi, false); 4070 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 4071 goto done; 4072 } 4073 4074 ice_vsi_close(vsi); 4075 ice_vsi_rebuild(vsi, false); 4076 ice_pf_dcb_recfg(pf); 4077 ice_vsi_open(vsi); 4078 done: 4079 clear_bit(ICE_CFG_BUSY, pf->state); 4080 return err; 4081 } 4082 4083 /** 4084 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 4085 * @pf: PF to configure 4086 * 4087 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4088 * VSI can still Tx/Rx VLAN tagged packets. 4089 */ 4090 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4091 { 4092 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4093 struct ice_vsi_ctx *ctxt; 4094 struct ice_hw *hw; 4095 int status; 4096 4097 if (!vsi) 4098 return; 4099 4100 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4101 if (!ctxt) 4102 return; 4103 4104 hw = &pf->hw; 4105 ctxt->info = vsi->info; 4106 4107 ctxt->info.valid_sections = 4108 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4109 ICE_AQ_VSI_PROP_SECURITY_VALID | 4110 ICE_AQ_VSI_PROP_SW_VALID); 4111 4112 /* disable VLAN anti-spoof */ 4113 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4114 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4115 4116 /* disable VLAN pruning and keep all other settings */ 4117 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4118 4119 /* allow all VLANs on Tx and don't strip on Rx */ 4120 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | 4121 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4122 4123 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4124 if (status) { 4125 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 4126 status, ice_aq_str(hw->adminq.sq_last_status)); 4127 } else { 4128 vsi->info.sec_flags = ctxt->info.sec_flags; 4129 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4130 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; 4131 } 4132 4133 kfree(ctxt); 4134 } 4135 4136 /** 4137 * ice_log_pkg_init - log result of DDP package load 4138 * @hw: pointer to hardware info 4139 * @state: state of package load 4140 */ 4141 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4142 { 4143 struct ice_pf *pf = hw->back; 4144 struct device *dev; 4145 4146 dev = ice_pf_to_dev(pf); 4147 4148 switch (state) { 4149 case ICE_DDP_PKG_SUCCESS: 4150 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4151 hw->active_pkg_name, 4152 hw->active_pkg_ver.major, 4153 hw->active_pkg_ver.minor, 4154 hw->active_pkg_ver.update, 4155 hw->active_pkg_ver.draft); 4156 break; 4157 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4158 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4159 hw->active_pkg_name, 4160 hw->active_pkg_ver.major, 4161 hw->active_pkg_ver.minor, 4162 hw->active_pkg_ver.update, 4163 hw->active_pkg_ver.draft); 4164 break; 4165 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 4166 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4167 hw->active_pkg_name, 4168 hw->active_pkg_ver.major, 4169 hw->active_pkg_ver.minor, 4170 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4171 break; 4172 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 4173 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4174 hw->active_pkg_name, 4175 hw->active_pkg_ver.major, 4176 hw->active_pkg_ver.minor, 4177 hw->active_pkg_ver.update, 4178 hw->active_pkg_ver.draft, 4179 hw->pkg_name, 4180 hw->pkg_ver.major, 4181 hw->pkg_ver.minor, 4182 hw->pkg_ver.update, 4183 hw->pkg_ver.draft); 4184 break; 4185 case ICE_DDP_PKG_FW_MISMATCH: 4186 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4187 break; 4188 case ICE_DDP_PKG_INVALID_FILE: 4189 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4190 break; 4191 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 4192 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4193 break; 4194 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 4195 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4196 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4197 break; 4198 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 4199 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4200 break; 4201 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 4202 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4203 break; 4204 case ICE_DDP_PKG_LOAD_ERROR: 4205 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 4206 /* poll for reset to complete */ 4207 if (ice_check_reset(hw)) 4208 dev_err(dev, "Error resetting device. Please reload the driver\n"); 4209 break; 4210 case ICE_DDP_PKG_ERR: 4211 default: 4212 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 4213 break; 4214 } 4215 } 4216 4217 /** 4218 * ice_load_pkg - load/reload the DDP Package file 4219 * @firmware: firmware structure when firmware requested or NULL for reload 4220 * @pf: pointer to the PF instance 4221 * 4222 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4223 * initialize HW tables. 4224 */ 4225 static void 4226 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4227 { 4228 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 4229 struct device *dev = ice_pf_to_dev(pf); 4230 struct ice_hw *hw = &pf->hw; 4231 4232 /* Load DDP Package */ 4233 if (firmware && !hw->pkg_copy) { 4234 state = ice_copy_and_init_pkg(hw, firmware->data, 4235 firmware->size); 4236 ice_log_pkg_init(hw, state); 4237 } else if (!firmware && hw->pkg_copy) { 4238 /* Reload package during rebuild after CORER/GLOBR reset */ 4239 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4240 ice_log_pkg_init(hw, state); 4241 } else { 4242 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4243 } 4244 4245 if (!ice_is_init_pkg_successful(state)) { 4246 /* Safe Mode */ 4247 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4248 return; 4249 } 4250 4251 /* Successful download package is the precondition for advanced 4252 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4253 */ 4254 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4255 } 4256 4257 /** 4258 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4259 * @pf: pointer to the PF structure 4260 * 4261 * There is no error returned here because the driver should be able to handle 4262 * 128 Byte cache lines, so we only print a warning in case issues are seen, 4263 * specifically with Tx. 4264 */ 4265 static void ice_verify_cacheline_size(struct ice_pf *pf) 4266 { 4267 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 4268 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4269 ICE_CACHE_LINE_BYTES); 4270 } 4271 4272 /** 4273 * ice_send_version - update firmware with driver version 4274 * @pf: PF struct 4275 * 4276 * Returns 0 on success, else error code 4277 */ 4278 static int ice_send_version(struct ice_pf *pf) 4279 { 4280 struct ice_driver_ver dv; 4281 4282 dv.major_ver = 0xff; 4283 dv.minor_ver = 0xff; 4284 dv.build_ver = 0xff; 4285 dv.subbuild_ver = 0; 4286 strscpy((char *)dv.driver_string, UTS_RELEASE, 4287 sizeof(dv.driver_string)); 4288 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4289 } 4290 4291 /** 4292 * ice_init_fdir - Initialize flow director VSI and configuration 4293 * @pf: pointer to the PF instance 4294 * 4295 * returns 0 on success, negative on error 4296 */ 4297 static int ice_init_fdir(struct ice_pf *pf) 4298 { 4299 struct device *dev = ice_pf_to_dev(pf); 4300 struct ice_vsi *ctrl_vsi; 4301 int err; 4302 4303 /* Side Band Flow Director needs to have a control VSI. 4304 * Allocate it and store it in the PF. 4305 */ 4306 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4307 if (!ctrl_vsi) { 4308 dev_dbg(dev, "could not create control VSI\n"); 4309 return -ENOMEM; 4310 } 4311 4312 err = ice_vsi_open_ctrl(ctrl_vsi); 4313 if (err) { 4314 dev_dbg(dev, "could not open control VSI\n"); 4315 goto err_vsi_open; 4316 } 4317 4318 mutex_init(&pf->hw.fdir_fltr_lock); 4319 4320 err = ice_fdir_create_dflt_rules(pf); 4321 if (err) 4322 goto err_fdir_rule; 4323 4324 return 0; 4325 4326 err_fdir_rule: 4327 ice_fdir_release_flows(&pf->hw); 4328 ice_vsi_close(ctrl_vsi); 4329 err_vsi_open: 4330 ice_vsi_release(ctrl_vsi); 4331 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4332 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4333 pf->ctrl_vsi_idx = ICE_NO_VSI; 4334 } 4335 return err; 4336 } 4337 4338 /** 4339 * ice_get_opt_fw_name - return optional firmware file name or NULL 4340 * @pf: pointer to the PF instance 4341 */ 4342 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4343 { 4344 /* Optional firmware name same as default with additional dash 4345 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4346 */ 4347 struct pci_dev *pdev = pf->pdev; 4348 char *opt_fw_filename; 4349 u64 dsn; 4350 4351 /* Determine the name of the optional file using the DSN (two 4352 * dwords following the start of the DSN Capability). 4353 */ 4354 dsn = pci_get_dsn(pdev); 4355 if (!dsn) 4356 return NULL; 4357 4358 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4359 if (!opt_fw_filename) 4360 return NULL; 4361 4362 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4363 ICE_DDP_PKG_PATH, dsn); 4364 4365 return opt_fw_filename; 4366 } 4367 4368 /** 4369 * ice_request_fw - Device initialization routine 4370 * @pf: pointer to the PF instance 4371 */ 4372 static void ice_request_fw(struct ice_pf *pf) 4373 { 4374 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4375 const struct firmware *firmware = NULL; 4376 struct device *dev = ice_pf_to_dev(pf); 4377 int err = 0; 4378 4379 /* optional device-specific DDP (if present) overrides the default DDP 4380 * package file. kernel logs a debug message if the file doesn't exist, 4381 * and warning messages for other errors. 4382 */ 4383 if (opt_fw_filename) { 4384 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4385 if (err) { 4386 kfree(opt_fw_filename); 4387 goto dflt_pkg_load; 4388 } 4389 4390 /* request for firmware was successful. Download to device */ 4391 ice_load_pkg(firmware, pf); 4392 kfree(opt_fw_filename); 4393 release_firmware(firmware); 4394 return; 4395 } 4396 4397 dflt_pkg_load: 4398 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4399 if (err) { 4400 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4401 return; 4402 } 4403 4404 /* request for firmware was successful. Download to device */ 4405 ice_load_pkg(firmware, pf); 4406 release_firmware(firmware); 4407 } 4408 4409 /** 4410 * ice_print_wake_reason - show the wake up cause in the log 4411 * @pf: pointer to the PF struct 4412 */ 4413 static void ice_print_wake_reason(struct ice_pf *pf) 4414 { 4415 u32 wus = pf->wakeup_reason; 4416 const char *wake_str; 4417 4418 /* if no wake event, nothing to print */ 4419 if (!wus) 4420 return; 4421 4422 if (wus & PFPM_WUS_LNKC_M) 4423 wake_str = "Link\n"; 4424 else if (wus & PFPM_WUS_MAG_M) 4425 wake_str = "Magic Packet\n"; 4426 else if (wus & PFPM_WUS_MNG_M) 4427 wake_str = "Management\n"; 4428 else if (wus & PFPM_WUS_FW_RST_WK_M) 4429 wake_str = "Firmware Reset\n"; 4430 else 4431 wake_str = "Unknown\n"; 4432 4433 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4434 } 4435 4436 /** 4437 * ice_register_netdev - register netdev and devlink port 4438 * @pf: pointer to the PF struct 4439 */ 4440 static int ice_register_netdev(struct ice_pf *pf) 4441 { 4442 struct ice_vsi *vsi; 4443 int err = 0; 4444 4445 vsi = ice_get_main_vsi(pf); 4446 if (!vsi || !vsi->netdev) 4447 return -EIO; 4448 4449 err = register_netdev(vsi->netdev); 4450 if (err) 4451 goto err_register_netdev; 4452 4453 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4454 netif_carrier_off(vsi->netdev); 4455 netif_tx_stop_all_queues(vsi->netdev); 4456 err = ice_devlink_create_pf_port(pf); 4457 if (err) 4458 goto err_devlink_create; 4459 4460 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); 4461 4462 return 0; 4463 err_devlink_create: 4464 unregister_netdev(vsi->netdev); 4465 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4466 err_register_netdev: 4467 free_netdev(vsi->netdev); 4468 vsi->netdev = NULL; 4469 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4470 return err; 4471 } 4472 4473 /** 4474 * ice_probe - Device initialization routine 4475 * @pdev: PCI device information struct 4476 * @ent: entry in ice_pci_tbl 4477 * 4478 * Returns 0 on success, negative on failure 4479 */ 4480 static int 4481 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4482 { 4483 struct device *dev = &pdev->dev; 4484 struct ice_pf *pf; 4485 struct ice_hw *hw; 4486 int i, err; 4487 4488 if (pdev->is_virtfn) { 4489 dev_err(dev, "can't probe a virtual function\n"); 4490 return -EINVAL; 4491 } 4492 4493 /* this driver uses devres, see 4494 * Documentation/driver-api/driver-model/devres.rst 4495 */ 4496 err = pcim_enable_device(pdev); 4497 if (err) 4498 return err; 4499 4500 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 4501 if (err) { 4502 dev_err(dev, "BAR0 I/O map error %d\n", err); 4503 return err; 4504 } 4505 4506 pf = ice_allocate_pf(dev); 4507 if (!pf) 4508 return -ENOMEM; 4509 4510 /* initialize Auxiliary index to invalid value */ 4511 pf->aux_idx = -1; 4512 4513 /* set up for high or low DMA */ 4514 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4515 if (err) { 4516 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 4517 return err; 4518 } 4519 4520 pci_enable_pcie_error_reporting(pdev); 4521 pci_set_master(pdev); 4522 4523 pf->pdev = pdev; 4524 pci_set_drvdata(pdev, pf); 4525 set_bit(ICE_DOWN, pf->state); 4526 /* Disable service task until DOWN bit is cleared */ 4527 set_bit(ICE_SERVICE_DIS, pf->state); 4528 4529 hw = &pf->hw; 4530 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 4531 pci_save_state(pdev); 4532 4533 hw->back = pf; 4534 hw->vendor_id = pdev->vendor; 4535 hw->device_id = pdev->device; 4536 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4537 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4538 hw->subsystem_device_id = pdev->subsystem_device; 4539 hw->bus.device = PCI_SLOT(pdev->devfn); 4540 hw->bus.func = PCI_FUNC(pdev->devfn); 4541 ice_set_ctrlq_len(hw); 4542 4543 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 4544 4545 #ifndef CONFIG_DYNAMIC_DEBUG 4546 if (debug < -1) 4547 hw->debug_mask = debug; 4548 #endif 4549 4550 err = ice_init_hw(hw); 4551 if (err) { 4552 dev_err(dev, "ice_init_hw failed: %d\n", err); 4553 err = -EIO; 4554 goto err_exit_unroll; 4555 } 4556 4557 ice_init_feature_support(pf); 4558 4559 ice_request_fw(pf); 4560 4561 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4562 * set in pf->state, which will cause ice_is_safe_mode to return 4563 * true 4564 */ 4565 if (ice_is_safe_mode(pf)) { 4566 /* we already got function/device capabilities but these don't 4567 * reflect what the driver needs to do in safe mode. Instead of 4568 * adding conditional logic everywhere to ignore these 4569 * device/function capabilities, override them. 4570 */ 4571 ice_set_safe_mode_caps(hw); 4572 } 4573 4574 err = ice_init_pf(pf); 4575 if (err) { 4576 dev_err(dev, "ice_init_pf failed: %d\n", err); 4577 goto err_init_pf_unroll; 4578 } 4579 4580 ice_devlink_init_regions(pf); 4581 4582 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4583 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4584 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4585 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4586 i = 0; 4587 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4588 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4589 pf->hw.tnl.valid_count[TNL_VXLAN]; 4590 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4591 UDP_TUNNEL_TYPE_VXLAN; 4592 i++; 4593 } 4594 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4595 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4596 pf->hw.tnl.valid_count[TNL_GENEVE]; 4597 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4598 UDP_TUNNEL_TYPE_GENEVE; 4599 i++; 4600 } 4601 4602 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 4603 if (!pf->num_alloc_vsi) { 4604 err = -EIO; 4605 goto err_init_pf_unroll; 4606 } 4607 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4608 dev_warn(&pf->pdev->dev, 4609 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4610 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4611 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4612 } 4613 4614 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4615 GFP_KERNEL); 4616 if (!pf->vsi) { 4617 err = -ENOMEM; 4618 goto err_init_pf_unroll; 4619 } 4620 4621 err = ice_init_interrupt_scheme(pf); 4622 if (err) { 4623 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4624 err = -EIO; 4625 goto err_init_vsi_unroll; 4626 } 4627 4628 /* In case of MSIX we are going to setup the misc vector right here 4629 * to handle admin queue events etc. In case of legacy and MSI 4630 * the misc functionality and queue processing is combined in 4631 * the same vector and that gets setup at open. 4632 */ 4633 err = ice_req_irq_msix_misc(pf); 4634 if (err) { 4635 dev_err(dev, "setup of misc vector failed: %d\n", err); 4636 goto err_init_interrupt_unroll; 4637 } 4638 4639 /* create switch struct for the switch element created by FW on boot */ 4640 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 4641 if (!pf->first_sw) { 4642 err = -ENOMEM; 4643 goto err_msix_misc_unroll; 4644 } 4645 4646 if (hw->evb_veb) 4647 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4648 else 4649 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4650 4651 pf->first_sw->pf = pf; 4652 4653 /* record the sw_id available for later use */ 4654 pf->first_sw->sw_id = hw->port_info->sw_id; 4655 4656 err = ice_setup_pf_sw(pf); 4657 if (err) { 4658 dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 4659 goto err_alloc_sw_unroll; 4660 } 4661 4662 clear_bit(ICE_SERVICE_DIS, pf->state); 4663 4664 /* tell the firmware we are up */ 4665 err = ice_send_version(pf); 4666 if (err) { 4667 dev_err(dev, "probe failed sending driver version %s. error: %d\n", 4668 UTS_RELEASE, err); 4669 goto err_send_version_unroll; 4670 } 4671 4672 /* since everything is good, start the service timer */ 4673 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4674 4675 err = ice_init_link_events(pf->hw.port_info); 4676 if (err) { 4677 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4678 goto err_send_version_unroll; 4679 } 4680 4681 /* not a fatal error if this fails */ 4682 err = ice_init_nvm_phy_type(pf->hw.port_info); 4683 if (err) 4684 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4685 4686 /* not a fatal error if this fails */ 4687 err = ice_update_link_info(pf->hw.port_info); 4688 if (err) 4689 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4690 4691 ice_init_link_dflt_override(pf->hw.port_info); 4692 4693 ice_check_link_cfg_err(pf, 4694 pf->hw.port_info->phy.link_info.link_cfg_err); 4695 4696 /* if media available, initialize PHY settings */ 4697 if (pf->hw.port_info->phy.link_info.link_info & 4698 ICE_AQ_MEDIA_AVAILABLE) { 4699 /* not a fatal error if this fails */ 4700 err = ice_init_phy_user_cfg(pf->hw.port_info); 4701 if (err) 4702 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4703 4704 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4705 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4706 4707 if (vsi) 4708 ice_configure_phy(vsi); 4709 } 4710 } else { 4711 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4712 } 4713 4714 ice_verify_cacheline_size(pf); 4715 4716 /* Save wakeup reason register for later use */ 4717 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4718 4719 /* check for a power management event */ 4720 ice_print_wake_reason(pf); 4721 4722 /* clear wake status, all bits */ 4723 wr32(hw, PFPM_WUS, U32_MAX); 4724 4725 /* Disable WoL at init, wait for user to enable */ 4726 device_set_wakeup_enable(dev, false); 4727 4728 if (ice_is_safe_mode(pf)) { 4729 ice_set_safe_mode_vlan_cfg(pf); 4730 goto probe_done; 4731 } 4732 4733 /* initialize DDP driven features */ 4734 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4735 ice_ptp_init(pf); 4736 4737 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4738 ice_gnss_init(pf); 4739 4740 /* Note: Flow director init failure is non-fatal to load */ 4741 if (ice_init_fdir(pf)) 4742 dev_err(dev, "could not initialize flow director\n"); 4743 4744 /* Note: DCB init failure is non-fatal to load */ 4745 if (ice_init_pf_dcb(pf, false)) { 4746 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4747 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4748 } else { 4749 ice_cfg_lldp_mib_change(&pf->hw, true); 4750 } 4751 4752 if (ice_init_lag(pf)) 4753 dev_warn(dev, "Failed to init link aggregation support\n"); 4754 4755 /* print PCI link speed and width */ 4756 pcie_print_link_status(pf->pdev); 4757 4758 probe_done: 4759 err = ice_register_netdev(pf); 4760 if (err) 4761 goto err_netdev_reg; 4762 4763 err = ice_devlink_register_params(pf); 4764 if (err) 4765 goto err_netdev_reg; 4766 4767 /* ready to go, so clear down state bit */ 4768 clear_bit(ICE_DOWN, pf->state); 4769 if (ice_is_rdma_ena(pf)) { 4770 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); 4771 if (pf->aux_idx < 0) { 4772 dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 4773 err = -ENOMEM; 4774 goto err_devlink_reg_param; 4775 } 4776 4777 err = ice_init_rdma(pf); 4778 if (err) { 4779 dev_err(dev, "Failed to initialize RDMA: %d\n", err); 4780 err = -EIO; 4781 goto err_init_aux_unroll; 4782 } 4783 } else { 4784 dev_warn(dev, "RDMA is not supported on this device\n"); 4785 } 4786 4787 ice_devlink_register(pf); 4788 return 0; 4789 4790 err_init_aux_unroll: 4791 pf->adev = NULL; 4792 ida_free(&ice_aux_ida, pf->aux_idx); 4793 err_devlink_reg_param: 4794 ice_devlink_unregister_params(pf); 4795 err_netdev_reg: 4796 err_send_version_unroll: 4797 ice_vsi_release_all(pf); 4798 err_alloc_sw_unroll: 4799 set_bit(ICE_SERVICE_DIS, pf->state); 4800 set_bit(ICE_DOWN, pf->state); 4801 devm_kfree(dev, pf->first_sw); 4802 err_msix_misc_unroll: 4803 ice_free_irq_msix_misc(pf); 4804 err_init_interrupt_unroll: 4805 ice_clear_interrupt_scheme(pf); 4806 err_init_vsi_unroll: 4807 devm_kfree(dev, pf->vsi); 4808 err_init_pf_unroll: 4809 ice_deinit_pf(pf); 4810 ice_devlink_destroy_regions(pf); 4811 ice_deinit_hw(hw); 4812 err_exit_unroll: 4813 pci_disable_pcie_error_reporting(pdev); 4814 pci_disable_device(pdev); 4815 return err; 4816 } 4817 4818 /** 4819 * ice_set_wake - enable or disable Wake on LAN 4820 * @pf: pointer to the PF struct 4821 * 4822 * Simple helper for WoL control 4823 */ 4824 static void ice_set_wake(struct ice_pf *pf) 4825 { 4826 struct ice_hw *hw = &pf->hw; 4827 bool wol = pf->wol_ena; 4828 4829 /* clear wake state, otherwise new wake events won't fire */ 4830 wr32(hw, PFPM_WUS, U32_MAX); 4831 4832 /* enable / disable APM wake up, no RMW needed */ 4833 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 4834 4835 /* set magic packet filter enabled */ 4836 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 4837 } 4838 4839 /** 4840 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 4841 * @pf: pointer to the PF struct 4842 * 4843 * Issue firmware command to enable multicast magic wake, making 4844 * sure that any locally administered address (LAA) is used for 4845 * wake, and that PF reset doesn't undo the LAA. 4846 */ 4847 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 4848 { 4849 struct device *dev = ice_pf_to_dev(pf); 4850 struct ice_hw *hw = &pf->hw; 4851 u8 mac_addr[ETH_ALEN]; 4852 struct ice_vsi *vsi; 4853 int status; 4854 u8 flags; 4855 4856 if (!pf->wol_ena) 4857 return; 4858 4859 vsi = ice_get_main_vsi(pf); 4860 if (!vsi) 4861 return; 4862 4863 /* Get current MAC address in case it's an LAA */ 4864 if (vsi->netdev) 4865 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 4866 else 4867 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4868 4869 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 4870 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 4871 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 4872 4873 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 4874 if (status) 4875 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 4876 status, ice_aq_str(hw->adminq.sq_last_status)); 4877 } 4878 4879 /** 4880 * ice_remove - Device removal routine 4881 * @pdev: PCI device information struct 4882 */ 4883 static void ice_remove(struct pci_dev *pdev) 4884 { 4885 struct ice_pf *pf = pci_get_drvdata(pdev); 4886 int i; 4887 4888 ice_devlink_unregister(pf); 4889 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 4890 if (!ice_is_reset_in_progress(pf->state)) 4891 break; 4892 msleep(100); 4893 } 4894 4895 ice_tc_indir_block_remove(pf); 4896 4897 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 4898 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 4899 ice_free_vfs(pf); 4900 } 4901 4902 ice_service_task_stop(pf); 4903 4904 ice_aq_cancel_waiting_tasks(pf); 4905 ice_unplug_aux_dev(pf); 4906 if (pf->aux_idx >= 0) 4907 ida_free(&ice_aux_ida, pf->aux_idx); 4908 ice_devlink_unregister_params(pf); 4909 set_bit(ICE_DOWN, pf->state); 4910 4911 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4912 ice_deinit_lag(pf); 4913 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4914 ice_ptp_release(pf); 4915 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4916 ice_gnss_exit(pf); 4917 if (!ice_is_safe_mode(pf)) 4918 ice_remove_arfs(pf); 4919 ice_setup_mc_magic_wake(pf); 4920 ice_vsi_release_all(pf); 4921 ice_set_wake(pf); 4922 ice_free_irq_msix_misc(pf); 4923 ice_for_each_vsi(pf, i) { 4924 if (!pf->vsi[i]) 4925 continue; 4926 ice_vsi_free_q_vectors(pf->vsi[i]); 4927 } 4928 ice_deinit_pf(pf); 4929 ice_devlink_destroy_regions(pf); 4930 ice_deinit_hw(&pf->hw); 4931 4932 /* Issue a PFR as part of the prescribed driver unload flow. Do not 4933 * do it via ice_schedule_reset() since there is no need to rebuild 4934 * and the service task is already stopped. 4935 */ 4936 ice_reset(&pf->hw, ICE_RESET_PFR); 4937 pci_wait_for_pending_transaction(pdev); 4938 ice_clear_interrupt_scheme(pf); 4939 pci_disable_pcie_error_reporting(pdev); 4940 pci_disable_device(pdev); 4941 } 4942 4943 /** 4944 * ice_shutdown - PCI callback for shutting down device 4945 * @pdev: PCI device information struct 4946 */ 4947 static void ice_shutdown(struct pci_dev *pdev) 4948 { 4949 struct ice_pf *pf = pci_get_drvdata(pdev); 4950 4951 ice_remove(pdev); 4952 4953 if (system_state == SYSTEM_POWER_OFF) { 4954 pci_wake_from_d3(pdev, pf->wol_ena); 4955 pci_set_power_state(pdev, PCI_D3hot); 4956 } 4957 } 4958 4959 #ifdef CONFIG_PM 4960 /** 4961 * ice_prepare_for_shutdown - prep for PCI shutdown 4962 * @pf: board private structure 4963 * 4964 * Inform or close all dependent features in prep for PCI device shutdown 4965 */ 4966 static void ice_prepare_for_shutdown(struct ice_pf *pf) 4967 { 4968 struct ice_hw *hw = &pf->hw; 4969 u32 v; 4970 4971 /* Notify VFs of impending reset */ 4972 if (ice_check_sq_alive(hw, &hw->mailboxq)) 4973 ice_vc_notify_reset(pf); 4974 4975 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 4976 4977 /* disable the VSIs and their queues that are not already DOWN */ 4978 ice_pf_dis_all_vsi(pf, false); 4979 4980 ice_for_each_vsi(pf, v) 4981 if (pf->vsi[v]) 4982 pf->vsi[v]->vsi_num = 0; 4983 4984 ice_shutdown_all_ctrlq(hw); 4985 } 4986 4987 /** 4988 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 4989 * @pf: board private structure to reinitialize 4990 * 4991 * This routine reinitialize interrupt scheme that was cleared during 4992 * power management suspend callback. 4993 * 4994 * This should be called during resume routine to re-allocate the q_vectors 4995 * and reacquire interrupts. 4996 */ 4997 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 4998 { 4999 struct device *dev = ice_pf_to_dev(pf); 5000 int ret, v; 5001 5002 /* Since we clear MSIX flag during suspend, we need to 5003 * set it back during resume... 5004 */ 5005 5006 ret = ice_init_interrupt_scheme(pf); 5007 if (ret) { 5008 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 5009 return ret; 5010 } 5011 5012 /* Remap vectors and rings, after successful re-init interrupts */ 5013 ice_for_each_vsi(pf, v) { 5014 if (!pf->vsi[v]) 5015 continue; 5016 5017 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 5018 if (ret) 5019 goto err_reinit; 5020 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 5021 } 5022 5023 ret = ice_req_irq_msix_misc(pf); 5024 if (ret) { 5025 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 5026 ret); 5027 goto err_reinit; 5028 } 5029 5030 return 0; 5031 5032 err_reinit: 5033 while (v--) 5034 if (pf->vsi[v]) 5035 ice_vsi_free_q_vectors(pf->vsi[v]); 5036 5037 return ret; 5038 } 5039 5040 /** 5041 * ice_suspend 5042 * @dev: generic device information structure 5043 * 5044 * Power Management callback to quiesce the device and prepare 5045 * for D3 transition. 5046 */ 5047 static int __maybe_unused ice_suspend(struct device *dev) 5048 { 5049 struct pci_dev *pdev = to_pci_dev(dev); 5050 struct ice_pf *pf; 5051 int disabled, v; 5052 5053 pf = pci_get_drvdata(pdev); 5054 5055 if (!ice_pf_state_is_nominal(pf)) { 5056 dev_err(dev, "Device is not ready, no need to suspend it\n"); 5057 return -EBUSY; 5058 } 5059 5060 /* Stop watchdog tasks until resume completion. 5061 * Even though it is most likely that the service task is 5062 * disabled if the device is suspended or down, the service task's 5063 * state is controlled by a different state bit, and we should 5064 * store and honor whatever state that bit is in at this point. 5065 */ 5066 disabled = ice_service_task_stop(pf); 5067 5068 ice_unplug_aux_dev(pf); 5069 5070 /* Already suspended?, then there is nothing to do */ 5071 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5072 if (!disabled) 5073 ice_service_task_restart(pf); 5074 return 0; 5075 } 5076 5077 if (test_bit(ICE_DOWN, pf->state) || 5078 ice_is_reset_in_progress(pf->state)) { 5079 dev_err(dev, "can't suspend device in reset or already down\n"); 5080 if (!disabled) 5081 ice_service_task_restart(pf); 5082 return 0; 5083 } 5084 5085 ice_setup_mc_magic_wake(pf); 5086 5087 ice_prepare_for_shutdown(pf); 5088 5089 ice_set_wake(pf); 5090 5091 /* Free vectors, clear the interrupt scheme and release IRQs 5092 * for proper hibernation, especially with large number of CPUs. 5093 * Otherwise hibernation might fail when mapping all the vectors back 5094 * to CPU0. 5095 */ 5096 ice_free_irq_msix_misc(pf); 5097 ice_for_each_vsi(pf, v) { 5098 if (!pf->vsi[v]) 5099 continue; 5100 ice_vsi_free_q_vectors(pf->vsi[v]); 5101 } 5102 ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); 5103 ice_clear_interrupt_scheme(pf); 5104 5105 pci_save_state(pdev); 5106 pci_wake_from_d3(pdev, pf->wol_ena); 5107 pci_set_power_state(pdev, PCI_D3hot); 5108 return 0; 5109 } 5110 5111 /** 5112 * ice_resume - PM callback for waking up from D3 5113 * @dev: generic device information structure 5114 */ 5115 static int __maybe_unused ice_resume(struct device *dev) 5116 { 5117 struct pci_dev *pdev = to_pci_dev(dev); 5118 enum ice_reset_req reset_type; 5119 struct ice_pf *pf; 5120 struct ice_hw *hw; 5121 int ret; 5122 5123 pci_set_power_state(pdev, PCI_D0); 5124 pci_restore_state(pdev); 5125 pci_save_state(pdev); 5126 5127 if (!pci_device_is_present(pdev)) 5128 return -ENODEV; 5129 5130 ret = pci_enable_device_mem(pdev); 5131 if (ret) { 5132 dev_err(dev, "Cannot enable device after suspend\n"); 5133 return ret; 5134 } 5135 5136 pf = pci_get_drvdata(pdev); 5137 hw = &pf->hw; 5138 5139 pf->wakeup_reason = rd32(hw, PFPM_WUS); 5140 ice_print_wake_reason(pf); 5141 5142 /* We cleared the interrupt scheme when we suspended, so we need to 5143 * restore it now to resume device functionality. 5144 */ 5145 ret = ice_reinit_interrupt_scheme(pf); 5146 if (ret) 5147 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5148 5149 clear_bit(ICE_DOWN, pf->state); 5150 /* Now perform PF reset and rebuild */ 5151 reset_type = ICE_RESET_PFR; 5152 /* re-enable service task for reset, but allow reset to schedule it */ 5153 clear_bit(ICE_SERVICE_DIS, pf->state); 5154 5155 if (ice_schedule_reset(pf, reset_type)) 5156 dev_err(dev, "Reset during resume failed.\n"); 5157 5158 clear_bit(ICE_SUSPENDED, pf->state); 5159 ice_service_task_restart(pf); 5160 5161 /* Restart the service task */ 5162 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5163 5164 return 0; 5165 } 5166 #endif /* CONFIG_PM */ 5167 5168 /** 5169 * ice_pci_err_detected - warning that PCI error has been detected 5170 * @pdev: PCI device information struct 5171 * @err: the type of PCI error 5172 * 5173 * Called to warn that something happened on the PCI bus and the error handling 5174 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 5175 */ 5176 static pci_ers_result_t 5177 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 5178 { 5179 struct ice_pf *pf = pci_get_drvdata(pdev); 5180 5181 if (!pf) { 5182 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 5183 __func__, err); 5184 return PCI_ERS_RESULT_DISCONNECT; 5185 } 5186 5187 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5188 ice_service_task_stop(pf); 5189 5190 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5191 set_bit(ICE_PFR_REQ, pf->state); 5192 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5193 } 5194 } 5195 5196 return PCI_ERS_RESULT_NEED_RESET; 5197 } 5198 5199 /** 5200 * ice_pci_err_slot_reset - a PCI slot reset has just happened 5201 * @pdev: PCI device information struct 5202 * 5203 * Called to determine if the driver can recover from the PCI slot reset by 5204 * using a register read to determine if the device is recoverable. 5205 */ 5206 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 5207 { 5208 struct ice_pf *pf = pci_get_drvdata(pdev); 5209 pci_ers_result_t result; 5210 int err; 5211 u32 reg; 5212 5213 err = pci_enable_device_mem(pdev); 5214 if (err) { 5215 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 5216 err); 5217 result = PCI_ERS_RESULT_DISCONNECT; 5218 } else { 5219 pci_set_master(pdev); 5220 pci_restore_state(pdev); 5221 pci_save_state(pdev); 5222 pci_wake_from_d3(pdev, false); 5223 5224 /* Check for life */ 5225 reg = rd32(&pf->hw, GLGEN_RTRIG); 5226 if (!reg) 5227 result = PCI_ERS_RESULT_RECOVERED; 5228 else 5229 result = PCI_ERS_RESULT_DISCONNECT; 5230 } 5231 5232 err = pci_aer_clear_nonfatal_status(pdev); 5233 if (err) 5234 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", 5235 err); 5236 /* non-fatal, continue */ 5237 5238 return result; 5239 } 5240 5241 /** 5242 * ice_pci_err_resume - restart operations after PCI error recovery 5243 * @pdev: PCI device information struct 5244 * 5245 * Called to allow the driver to bring things back up after PCI error and/or 5246 * reset recovery have finished 5247 */ 5248 static void ice_pci_err_resume(struct pci_dev *pdev) 5249 { 5250 struct ice_pf *pf = pci_get_drvdata(pdev); 5251 5252 if (!pf) { 5253 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 5254 __func__); 5255 return; 5256 } 5257 5258 if (test_bit(ICE_SUSPENDED, pf->state)) { 5259 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 5260 __func__); 5261 return; 5262 } 5263 5264 ice_restore_all_vfs_msi_state(pdev); 5265 5266 ice_do_reset(pf, ICE_RESET_PFR); 5267 ice_service_task_restart(pf); 5268 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5269 } 5270 5271 /** 5272 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 5273 * @pdev: PCI device information struct 5274 */ 5275 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 5276 { 5277 struct ice_pf *pf = pci_get_drvdata(pdev); 5278 5279 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5280 ice_service_task_stop(pf); 5281 5282 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5283 set_bit(ICE_PFR_REQ, pf->state); 5284 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5285 } 5286 } 5287 } 5288 5289 /** 5290 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5291 * @pdev: PCI device information struct 5292 */ 5293 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5294 { 5295 ice_pci_err_resume(pdev); 5296 } 5297 5298 /* ice_pci_tbl - PCI Device ID Table 5299 * 5300 * Wildcard entries (PCI_ANY_ID) should come last 5301 * Last entry must be all 0s 5302 * 5303 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5304 * Class, Class Mask, private data (not used) } 5305 */ 5306 static const struct pci_device_id ice_pci_tbl[] = { 5307 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 5308 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 5309 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 5310 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, 5311 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, 5312 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, 5313 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, 5314 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, 5315 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, 5316 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, 5317 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, 5318 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 5319 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 5320 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 5321 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 5322 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 5323 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, 5324 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 5325 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 5326 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 5327 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, 5328 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, 5329 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, 5330 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, 5331 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, 5332 /* required last entry */ 5333 { 0, } 5334 }; 5335 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5336 5337 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5338 5339 static const struct pci_error_handlers ice_pci_err_handler = { 5340 .error_detected = ice_pci_err_detected, 5341 .slot_reset = ice_pci_err_slot_reset, 5342 .reset_prepare = ice_pci_err_reset_prepare, 5343 .reset_done = ice_pci_err_reset_done, 5344 .resume = ice_pci_err_resume 5345 }; 5346 5347 static struct pci_driver ice_driver = { 5348 .name = KBUILD_MODNAME, 5349 .id_table = ice_pci_tbl, 5350 .probe = ice_probe, 5351 .remove = ice_remove, 5352 #ifdef CONFIG_PM 5353 .driver.pm = &ice_pm_ops, 5354 #endif /* CONFIG_PM */ 5355 .shutdown = ice_shutdown, 5356 .sriov_configure = ice_sriov_configure, 5357 .err_handler = &ice_pci_err_handler 5358 }; 5359 5360 /** 5361 * ice_module_init - Driver registration routine 5362 * 5363 * ice_module_init is the first routine called when the driver is 5364 * loaded. All it does is register with the PCI subsystem. 5365 */ 5366 static int __init ice_module_init(void) 5367 { 5368 int status; 5369 5370 pr_info("%s\n", ice_driver_string); 5371 pr_info("%s\n", ice_copyright); 5372 5373 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 5374 if (!ice_wq) { 5375 pr_err("Failed to create workqueue\n"); 5376 return -ENOMEM; 5377 } 5378 5379 status = pci_register_driver(&ice_driver); 5380 if (status) { 5381 pr_err("failed to register PCI driver, err %d\n", status); 5382 destroy_workqueue(ice_wq); 5383 } 5384 5385 return status; 5386 } 5387 module_init(ice_module_init); 5388 5389 /** 5390 * ice_module_exit - Driver exit cleanup routine 5391 * 5392 * ice_module_exit is called just before the driver is removed 5393 * from memory. 5394 */ 5395 static void __exit ice_module_exit(void) 5396 { 5397 pci_unregister_driver(&ice_driver); 5398 destroy_workqueue(ice_wq); 5399 pr_info("module unloaded\n"); 5400 } 5401 module_exit(ice_module_exit); 5402 5403 /** 5404 * ice_set_mac_address - NDO callback to set MAC address 5405 * @netdev: network interface device structure 5406 * @pi: pointer to an address structure 5407 * 5408 * Returns 0 on success, negative on failure 5409 */ 5410 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5411 { 5412 struct ice_netdev_priv *np = netdev_priv(netdev); 5413 struct ice_vsi *vsi = np->vsi; 5414 struct ice_pf *pf = vsi->back; 5415 struct ice_hw *hw = &pf->hw; 5416 struct sockaddr *addr = pi; 5417 u8 old_mac[ETH_ALEN]; 5418 u8 flags = 0; 5419 u8 *mac; 5420 int err; 5421 5422 mac = (u8 *)addr->sa_data; 5423 5424 if (!is_valid_ether_addr(mac)) 5425 return -EADDRNOTAVAIL; 5426 5427 if (ether_addr_equal(netdev->dev_addr, mac)) { 5428 netdev_dbg(netdev, "already using mac %pM\n", mac); 5429 return 0; 5430 } 5431 5432 if (test_bit(ICE_DOWN, pf->state) || 5433 ice_is_reset_in_progress(pf->state)) { 5434 netdev_err(netdev, "can't set mac %pM. device not ready\n", 5435 mac); 5436 return -EBUSY; 5437 } 5438 5439 if (ice_chnl_dmac_fltr_cnt(pf)) { 5440 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 5441 mac); 5442 return -EAGAIN; 5443 } 5444 5445 netif_addr_lock_bh(netdev); 5446 ether_addr_copy(old_mac, netdev->dev_addr); 5447 /* change the netdev's MAC address */ 5448 eth_hw_addr_set(netdev, mac); 5449 netif_addr_unlock_bh(netdev); 5450 5451 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 5452 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 5453 if (err && err != -ENOENT) { 5454 err = -EADDRNOTAVAIL; 5455 goto err_update_filters; 5456 } 5457 5458 /* Add filter for new MAC. If filter exists, return success */ 5459 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 5460 if (err == -EEXIST) 5461 /* Although this MAC filter is already present in hardware it's 5462 * possible in some cases (e.g. bonding) that dev_addr was 5463 * modified outside of the driver and needs to be restored back 5464 * to this value. 5465 */ 5466 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 5467 else if (err) 5468 /* error if the new filter addition failed */ 5469 err = -EADDRNOTAVAIL; 5470 5471 err_update_filters: 5472 if (err) { 5473 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5474 mac); 5475 netif_addr_lock_bh(netdev); 5476 eth_hw_addr_set(netdev, old_mac); 5477 netif_addr_unlock_bh(netdev); 5478 return err; 5479 } 5480 5481 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5482 netdev->dev_addr); 5483 5484 /* write new MAC address to the firmware */ 5485 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 5486 err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 5487 if (err) { 5488 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 5489 mac, err); 5490 } 5491 return 0; 5492 } 5493 5494 /** 5495 * ice_set_rx_mode - NDO callback to set the netdev filters 5496 * @netdev: network interface device structure 5497 */ 5498 static void ice_set_rx_mode(struct net_device *netdev) 5499 { 5500 struct ice_netdev_priv *np = netdev_priv(netdev); 5501 struct ice_vsi *vsi = np->vsi; 5502 5503 if (!vsi) 5504 return; 5505 5506 /* Set the flags to synchronize filters 5507 * ndo_set_rx_mode may be triggered even without a change in netdev 5508 * flags 5509 */ 5510 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5511 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5512 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5513 5514 /* schedule our worker thread which will take care of 5515 * applying the new filter changes 5516 */ 5517 ice_service_task_schedule(vsi->back); 5518 } 5519 5520 /** 5521 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 5522 * @netdev: network interface device structure 5523 * @queue_index: Queue ID 5524 * @maxrate: maximum bandwidth in Mbps 5525 */ 5526 static int 5527 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 5528 { 5529 struct ice_netdev_priv *np = netdev_priv(netdev); 5530 struct ice_vsi *vsi = np->vsi; 5531 u16 q_handle; 5532 int status; 5533 u8 tc; 5534 5535 /* Validate maxrate requested is within permitted range */ 5536 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 5537 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 5538 maxrate, queue_index); 5539 return -EINVAL; 5540 } 5541 5542 q_handle = vsi->tx_rings[queue_index]->q_handle; 5543 tc = ice_dcb_get_tc(vsi, queue_index); 5544 5545 /* Set BW back to default, when user set maxrate to 0 */ 5546 if (!maxrate) 5547 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 5548 q_handle, ICE_MAX_BW); 5549 else 5550 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 5551 q_handle, ICE_MAX_BW, maxrate * 1000); 5552 if (status) 5553 netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 5554 status); 5555 5556 return status; 5557 } 5558 5559 /** 5560 * ice_fdb_add - add an entry to the hardware database 5561 * @ndm: the input from the stack 5562 * @tb: pointer to array of nladdr (unused) 5563 * @dev: the net device pointer 5564 * @addr: the MAC address entry being added 5565 * @vid: VLAN ID 5566 * @flags: instructions from stack about fdb operation 5567 * @extack: netlink extended ack 5568 */ 5569 static int 5570 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 5571 struct net_device *dev, const unsigned char *addr, u16 vid, 5572 u16 flags, struct netlink_ext_ack __always_unused *extack) 5573 { 5574 int err; 5575 5576 if (vid) { 5577 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5578 return -EINVAL; 5579 } 5580 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5581 netdev_err(dev, "FDB only supports static addresses\n"); 5582 return -EINVAL; 5583 } 5584 5585 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5586 err = dev_uc_add_excl(dev, addr); 5587 else if (is_multicast_ether_addr(addr)) 5588 err = dev_mc_add_excl(dev, addr); 5589 else 5590 err = -EINVAL; 5591 5592 /* Only return duplicate errors if NLM_F_EXCL is set */ 5593 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5594 err = 0; 5595 5596 return err; 5597 } 5598 5599 /** 5600 * ice_fdb_del - delete an entry from the hardware database 5601 * @ndm: the input from the stack 5602 * @tb: pointer to array of nladdr (unused) 5603 * @dev: the net device pointer 5604 * @addr: the MAC address entry being added 5605 * @vid: VLAN ID 5606 */ 5607 static int 5608 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5609 struct net_device *dev, const unsigned char *addr, 5610 __always_unused u16 vid) 5611 { 5612 int err; 5613 5614 if (ndm->ndm_state & NUD_PERMANENT) { 5615 netdev_err(dev, "FDB only supports static addresses\n"); 5616 return -EINVAL; 5617 } 5618 5619 if (is_unicast_ether_addr(addr)) 5620 err = dev_uc_del(dev, addr); 5621 else if (is_multicast_ether_addr(addr)) 5622 err = dev_mc_del(dev, addr); 5623 else 5624 err = -EINVAL; 5625 5626 return err; 5627 } 5628 5629 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 5630 NETIF_F_HW_VLAN_CTAG_TX | \ 5631 NETIF_F_HW_VLAN_STAG_RX | \ 5632 NETIF_F_HW_VLAN_STAG_TX) 5633 5634 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ 5635 NETIF_F_HW_VLAN_STAG_FILTER) 5636 5637 /** 5638 * ice_fix_features - fix the netdev features flags based on device limitations 5639 * @netdev: ptr to the netdev that flags are being fixed on 5640 * @features: features that need to be checked and possibly fixed 5641 * 5642 * Make sure any fixups are made to features in this callback. This enables the 5643 * driver to not have to check unsupported configurations throughout the driver 5644 * because that's the responsiblity of this callback. 5645 * 5646 * Single VLAN Mode (SVM) Supported Features: 5647 * NETIF_F_HW_VLAN_CTAG_FILTER 5648 * NETIF_F_HW_VLAN_CTAG_RX 5649 * NETIF_F_HW_VLAN_CTAG_TX 5650 * 5651 * Double VLAN Mode (DVM) Supported Features: 5652 * NETIF_F_HW_VLAN_CTAG_FILTER 5653 * NETIF_F_HW_VLAN_CTAG_RX 5654 * NETIF_F_HW_VLAN_CTAG_TX 5655 * 5656 * NETIF_F_HW_VLAN_STAG_FILTER 5657 * NETIF_HW_VLAN_STAG_RX 5658 * NETIF_HW_VLAN_STAG_TX 5659 * 5660 * Features that need fixing: 5661 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. 5662 * These are mutually exlusive as the VSI context cannot support multiple 5663 * VLAN ethertypes simultaneously for stripping and/or insertion. If this 5664 * is not done, then default to clearing the requested STAG offload 5665 * settings. 5666 * 5667 * All supported filtering has to be enabled or disabled together. For 5668 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled 5669 * together. If this is not done, then default to VLAN filtering disabled. 5670 * These are mutually exclusive as there is currently no way to 5671 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN 5672 * prune rules. 5673 */ 5674 static netdev_features_t 5675 ice_fix_features(struct net_device *netdev, netdev_features_t features) 5676 { 5677 struct ice_netdev_priv *np = netdev_priv(netdev); 5678 netdev_features_t supported_vlan_filtering; 5679 netdev_features_t requested_vlan_filtering; 5680 struct ice_vsi *vsi = np->vsi; 5681 5682 requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES; 5683 5684 /* make sure supported_vlan_filtering works for both SVM and DVM */ 5685 supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER; 5686 if (ice_is_dvm_ena(&vsi->back->hw)) 5687 supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER; 5688 5689 if (requested_vlan_filtering && 5690 requested_vlan_filtering != supported_vlan_filtering) { 5691 if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) { 5692 netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n"); 5693 features |= supported_vlan_filtering; 5694 } else { 5695 netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n"); 5696 features &= ~supported_vlan_filtering; 5697 } 5698 } 5699 5700 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 5701 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { 5702 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 5703 features &= ~(NETIF_F_HW_VLAN_STAG_RX | 5704 NETIF_F_HW_VLAN_STAG_TX); 5705 } 5706 5707 return features; 5708 } 5709 5710 /** 5711 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI 5712 * @vsi: PF's VSI 5713 * @features: features used to determine VLAN offload settings 5714 * 5715 * First, determine the vlan_ethertype based on the VLAN offload bits in 5716 * features. Then determine if stripping and insertion should be enabled or 5717 * disabled. Finally enable or disable VLAN stripping and insertion. 5718 */ 5719 static int 5720 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) 5721 { 5722 bool enable_stripping = true, enable_insertion = true; 5723 struct ice_vsi_vlan_ops *vlan_ops; 5724 int strip_err = 0, insert_err = 0; 5725 u16 vlan_ethertype = 0; 5726 5727 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 5728 5729 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 5730 vlan_ethertype = ETH_P_8021AD; 5731 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 5732 vlan_ethertype = ETH_P_8021Q; 5733 5734 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 5735 enable_stripping = false; 5736 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 5737 enable_insertion = false; 5738 5739 if (enable_stripping) 5740 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); 5741 else 5742 strip_err = vlan_ops->dis_stripping(vsi); 5743 5744 if (enable_insertion) 5745 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); 5746 else 5747 insert_err = vlan_ops->dis_insertion(vsi); 5748 5749 if (strip_err || insert_err) 5750 return -EIO; 5751 5752 return 0; 5753 } 5754 5755 /** 5756 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI 5757 * @vsi: PF's VSI 5758 * @features: features used to determine VLAN filtering settings 5759 * 5760 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the 5761 * features. 5762 */ 5763 static int 5764 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) 5765 { 5766 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 5767 int err = 0; 5768 5769 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking 5770 * if either bit is set 5771 */ 5772 if (features & 5773 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) 5774 err = vlan_ops->ena_rx_filtering(vsi); 5775 else 5776 err = vlan_ops->dis_rx_filtering(vsi); 5777 5778 return err; 5779 } 5780 5781 /** 5782 * ice_set_vlan_features - set VLAN settings based on suggested feature set 5783 * @netdev: ptr to the netdev being adjusted 5784 * @features: the feature set that the stack is suggesting 5785 * 5786 * Only update VLAN settings if the requested_vlan_features are different than 5787 * the current_vlan_features. 5788 */ 5789 static int 5790 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) 5791 { 5792 netdev_features_t current_vlan_features, requested_vlan_features; 5793 struct ice_netdev_priv *np = netdev_priv(netdev); 5794 struct ice_vsi *vsi = np->vsi; 5795 int err; 5796 5797 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; 5798 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; 5799 if (current_vlan_features ^ requested_vlan_features) { 5800 err = ice_set_vlan_offload_features(vsi, features); 5801 if (err) 5802 return err; 5803 } 5804 5805 current_vlan_features = netdev->features & 5806 NETIF_VLAN_FILTERING_FEATURES; 5807 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; 5808 if (current_vlan_features ^ requested_vlan_features) { 5809 err = ice_set_vlan_filtering_features(vsi, features); 5810 if (err) 5811 return err; 5812 } 5813 5814 return 0; 5815 } 5816 5817 /** 5818 * ice_set_features - set the netdev feature flags 5819 * @netdev: ptr to the netdev being adjusted 5820 * @features: the feature set that the stack is suggesting 5821 */ 5822 static int 5823 ice_set_features(struct net_device *netdev, netdev_features_t features) 5824 { 5825 struct ice_netdev_priv *np = netdev_priv(netdev); 5826 struct ice_vsi *vsi = np->vsi; 5827 struct ice_pf *pf = vsi->back; 5828 int ret = 0; 5829 5830 /* Don't set any netdev advanced features with device in Safe Mode */ 5831 if (ice_is_safe_mode(vsi->back)) { 5832 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); 5833 return ret; 5834 } 5835 5836 /* Do not change setting during reset */ 5837 if (ice_is_reset_in_progress(pf->state)) { 5838 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 5839 return -EBUSY; 5840 } 5841 5842 /* Multiple features can be changed in one call so keep features in 5843 * separate if/else statements to guarantee each feature is checked 5844 */ 5845 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 5846 ice_vsi_manage_rss_lut(vsi, true); 5847 else if (!(features & NETIF_F_RXHASH) && 5848 netdev->features & NETIF_F_RXHASH) 5849 ice_vsi_manage_rss_lut(vsi, false); 5850 5851 ret = ice_set_vlan_features(netdev, features); 5852 if (ret) 5853 return ret; 5854 5855 if ((features & NETIF_F_NTUPLE) && 5856 !(netdev->features & NETIF_F_NTUPLE)) { 5857 ice_vsi_manage_fdir(vsi, true); 5858 ice_init_arfs(vsi); 5859 } else if (!(features & NETIF_F_NTUPLE) && 5860 (netdev->features & NETIF_F_NTUPLE)) { 5861 ice_vsi_manage_fdir(vsi, false); 5862 ice_clear_arfs(vsi); 5863 } 5864 5865 /* don't turn off hw_tc_offload when ADQ is already enabled */ 5866 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 5867 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 5868 return -EACCES; 5869 } 5870 5871 if ((features & NETIF_F_HW_TC) && 5872 !(netdev->features & NETIF_F_HW_TC)) 5873 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 5874 else 5875 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 5876 5877 return 0; 5878 } 5879 5880 /** 5881 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI 5882 * @vsi: VSI to setup VLAN properties for 5883 */ 5884 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 5885 { 5886 int err; 5887 5888 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); 5889 if (err) 5890 return err; 5891 5892 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); 5893 if (err) 5894 return err; 5895 5896 return ice_vsi_add_vlan_zero(vsi); 5897 } 5898 5899 /** 5900 * ice_vsi_cfg - Setup the VSI 5901 * @vsi: the VSI being configured 5902 * 5903 * Return 0 on success and negative value on error 5904 */ 5905 int ice_vsi_cfg(struct ice_vsi *vsi) 5906 { 5907 int err; 5908 5909 if (vsi->netdev) { 5910 ice_set_rx_mode(vsi->netdev); 5911 5912 err = ice_vsi_vlan_setup(vsi); 5913 5914 if (err) 5915 return err; 5916 } 5917 ice_vsi_cfg_dcb_rings(vsi); 5918 5919 err = ice_vsi_cfg_lan_txqs(vsi); 5920 if (!err && ice_is_xdp_ena_vsi(vsi)) 5921 err = ice_vsi_cfg_xdp_txqs(vsi); 5922 if (!err) 5923 err = ice_vsi_cfg_rxqs(vsi); 5924 5925 return err; 5926 } 5927 5928 /* THEORY OF MODERATION: 5929 * The ice driver hardware works differently than the hardware that DIMLIB was 5930 * originally made for. ice hardware doesn't have packet count limits that 5931 * can trigger an interrupt, but it *does* have interrupt rate limit support, 5932 * which is hard-coded to a limit of 250,000 ints/second. 5933 * If not using dynamic moderation, the INTRL value can be modified 5934 * by ethtool rx-usecs-high. 5935 */ 5936 struct ice_dim { 5937 /* the throttle rate for interrupts, basically worst case delay before 5938 * an initial interrupt fires, value is stored in microseconds. 5939 */ 5940 u16 itr; 5941 }; 5942 5943 /* Make a different profile for Rx that doesn't allow quite so aggressive 5944 * moderation at the high end (it maxes out at 126us or about 8k interrupts a 5945 * second. 5946 */ 5947 static const struct ice_dim rx_profile[] = { 5948 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 5949 {8}, /* 125,000 ints/s */ 5950 {16}, /* 62,500 ints/s */ 5951 {62}, /* 16,129 ints/s */ 5952 {126} /* 7,936 ints/s */ 5953 }; 5954 5955 /* The transmit profile, which has the same sorts of values 5956 * as the previous struct 5957 */ 5958 static const struct ice_dim tx_profile[] = { 5959 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 5960 {8}, /* 125,000 ints/s */ 5961 {40}, /* 16,125 ints/s */ 5962 {128}, /* 7,812 ints/s */ 5963 {256} /* 3,906 ints/s */ 5964 }; 5965 5966 static void ice_tx_dim_work(struct work_struct *work) 5967 { 5968 struct ice_ring_container *rc; 5969 struct dim *dim; 5970 u16 itr; 5971 5972 dim = container_of(work, struct dim, work); 5973 rc = (struct ice_ring_container *)dim->priv; 5974 5975 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 5976 5977 /* look up the values in our local table */ 5978 itr = tx_profile[dim->profile_ix].itr; 5979 5980 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 5981 ice_write_itr(rc, itr); 5982 5983 dim->state = DIM_START_MEASURE; 5984 } 5985 5986 static void ice_rx_dim_work(struct work_struct *work) 5987 { 5988 struct ice_ring_container *rc; 5989 struct dim *dim; 5990 u16 itr; 5991 5992 dim = container_of(work, struct dim, work); 5993 rc = (struct ice_ring_container *)dim->priv; 5994 5995 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 5996 5997 /* look up the values in our local table */ 5998 itr = rx_profile[dim->profile_ix].itr; 5999 6000 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 6001 ice_write_itr(rc, itr); 6002 6003 dim->state = DIM_START_MEASURE; 6004 } 6005 6006 #define ICE_DIM_DEFAULT_PROFILE_IX 1 6007 6008 /** 6009 * ice_init_moderation - set up interrupt moderation 6010 * @q_vector: the vector containing rings to be configured 6011 * 6012 * Set up interrupt moderation registers, with the intent to do the right thing 6013 * when called from reset or from probe, and whether or not dynamic moderation 6014 * is enabled or not. Take special care to write all the registers in both 6015 * dynamic moderation mode or not in order to make sure hardware is in a known 6016 * state. 6017 */ 6018 static void ice_init_moderation(struct ice_q_vector *q_vector) 6019 { 6020 struct ice_ring_container *rc; 6021 bool tx_dynamic, rx_dynamic; 6022 6023 rc = &q_vector->tx; 6024 INIT_WORK(&rc->dim.work, ice_tx_dim_work); 6025 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6026 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6027 rc->dim.priv = rc; 6028 tx_dynamic = ITR_IS_DYNAMIC(rc); 6029 6030 /* set the initial TX ITR to match the above */ 6031 ice_write_itr(rc, tx_dynamic ? 6032 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 6033 6034 rc = &q_vector->rx; 6035 INIT_WORK(&rc->dim.work, ice_rx_dim_work); 6036 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6037 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6038 rc->dim.priv = rc; 6039 rx_dynamic = ITR_IS_DYNAMIC(rc); 6040 6041 /* set the initial RX ITR to match the above */ 6042 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 6043 rc->itr_setting); 6044 6045 ice_set_q_vector_intrl(q_vector); 6046 } 6047 6048 /** 6049 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 6050 * @vsi: the VSI being configured 6051 */ 6052 static void ice_napi_enable_all(struct ice_vsi *vsi) 6053 { 6054 int q_idx; 6055 6056 if (!vsi->netdev) 6057 return; 6058 6059 ice_for_each_q_vector(vsi, q_idx) { 6060 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6061 6062 ice_init_moderation(q_vector); 6063 6064 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6065 napi_enable(&q_vector->napi); 6066 } 6067 } 6068 6069 /** 6070 * ice_up_complete - Finish the last steps of bringing up a connection 6071 * @vsi: The VSI being configured 6072 * 6073 * Return 0 on success and negative value on error 6074 */ 6075 static int ice_up_complete(struct ice_vsi *vsi) 6076 { 6077 struct ice_pf *pf = vsi->back; 6078 int err; 6079 6080 ice_vsi_cfg_msix(vsi); 6081 6082 /* Enable only Rx rings, Tx rings were enabled by the FW when the 6083 * Tx queue group list was configured and the context bits were 6084 * programmed using ice_vsi_cfg_txqs 6085 */ 6086 err = ice_vsi_start_all_rx_rings(vsi); 6087 if (err) 6088 return err; 6089 6090 clear_bit(ICE_VSI_DOWN, vsi->state); 6091 ice_napi_enable_all(vsi); 6092 ice_vsi_ena_irq(vsi); 6093 6094 if (vsi->port_info && 6095 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 6096 vsi->netdev) { 6097 ice_print_link_msg(vsi, true); 6098 netif_tx_start_all_queues(vsi->netdev); 6099 netif_carrier_on(vsi->netdev); 6100 if (!ice_is_e810(&pf->hw)) 6101 ice_ptp_link_change(pf, pf->hw.pf_id, true); 6102 } 6103 6104 /* clear this now, and the first stats read will be used as baseline */ 6105 vsi->stat_offsets_loaded = false; 6106 6107 ice_service_task_schedule(pf); 6108 6109 return 0; 6110 } 6111 6112 /** 6113 * ice_up - Bring the connection back up after being down 6114 * @vsi: VSI being configured 6115 */ 6116 int ice_up(struct ice_vsi *vsi) 6117 { 6118 int err; 6119 6120 err = ice_vsi_cfg(vsi); 6121 if (!err) 6122 err = ice_up_complete(vsi); 6123 6124 return err; 6125 } 6126 6127 /** 6128 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 6129 * @syncp: pointer to u64_stats_sync 6130 * @stats: stats that pkts and bytes count will be taken from 6131 * @pkts: packets stats counter 6132 * @bytes: bytes stats counter 6133 * 6134 * This function fetches stats from the ring considering the atomic operations 6135 * that needs to be performed to read u64 values in 32 bit machine. 6136 */ 6137 static void 6138 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats, 6139 u64 *pkts, u64 *bytes) 6140 { 6141 unsigned int start; 6142 6143 do { 6144 start = u64_stats_fetch_begin_irq(syncp); 6145 *pkts = stats.pkts; 6146 *bytes = stats.bytes; 6147 } while (u64_stats_fetch_retry_irq(syncp, start)); 6148 } 6149 6150 /** 6151 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 6152 * @vsi: the VSI to be updated 6153 * @vsi_stats: the stats struct to be updated 6154 * @rings: rings to work on 6155 * @count: number of rings 6156 */ 6157 static void 6158 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 6159 struct rtnl_link_stats64 *vsi_stats, 6160 struct ice_tx_ring **rings, u16 count) 6161 { 6162 u16 i; 6163 6164 for (i = 0; i < count; i++) { 6165 struct ice_tx_ring *ring; 6166 u64 pkts = 0, bytes = 0; 6167 6168 ring = READ_ONCE(rings[i]); 6169 if (ring) 6170 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); 6171 vsi_stats->tx_packets += pkts; 6172 vsi_stats->tx_bytes += bytes; 6173 vsi->tx_restart += ring->tx_stats.restart_q; 6174 vsi->tx_busy += ring->tx_stats.tx_busy; 6175 vsi->tx_linearize += ring->tx_stats.tx_linearize; 6176 } 6177 } 6178 6179 /** 6180 * ice_update_vsi_ring_stats - Update VSI stats counters 6181 * @vsi: the VSI to be updated 6182 */ 6183 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 6184 { 6185 struct rtnl_link_stats64 *vsi_stats; 6186 u64 pkts, bytes; 6187 int i; 6188 6189 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 6190 if (!vsi_stats) 6191 return; 6192 6193 /* reset non-netdev (extended) stats */ 6194 vsi->tx_restart = 0; 6195 vsi->tx_busy = 0; 6196 vsi->tx_linearize = 0; 6197 vsi->rx_buf_failed = 0; 6198 vsi->rx_page_failed = 0; 6199 6200 rcu_read_lock(); 6201 6202 /* update Tx rings counters */ 6203 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 6204 vsi->num_txq); 6205 6206 /* update Rx rings counters */ 6207 ice_for_each_rxq(vsi, i) { 6208 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 6209 6210 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); 6211 vsi_stats->rx_packets += pkts; 6212 vsi_stats->rx_bytes += bytes; 6213 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 6214 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 6215 } 6216 6217 /* update XDP Tx rings counters */ 6218 if (ice_is_xdp_ena_vsi(vsi)) 6219 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 6220 vsi->num_xdp_txq); 6221 6222 rcu_read_unlock(); 6223 6224 vsi->net_stats.tx_packets = vsi_stats->tx_packets; 6225 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; 6226 vsi->net_stats.rx_packets = vsi_stats->rx_packets; 6227 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; 6228 6229 kfree(vsi_stats); 6230 } 6231 6232 /** 6233 * ice_update_vsi_stats - Update VSI stats counters 6234 * @vsi: the VSI to be updated 6235 */ 6236 void ice_update_vsi_stats(struct ice_vsi *vsi) 6237 { 6238 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6239 struct ice_eth_stats *cur_es = &vsi->eth_stats; 6240 struct ice_pf *pf = vsi->back; 6241 6242 if (test_bit(ICE_VSI_DOWN, vsi->state) || 6243 test_bit(ICE_CFG_BUSY, pf->state)) 6244 return; 6245 6246 /* get stats as recorded by Tx/Rx rings */ 6247 ice_update_vsi_ring_stats(vsi); 6248 6249 /* get VSI stats as recorded by the hardware */ 6250 ice_update_eth_stats(vsi); 6251 6252 cur_ns->tx_errors = cur_es->tx_errors; 6253 cur_ns->rx_dropped = cur_es->rx_discards; 6254 cur_ns->tx_dropped = cur_es->tx_discards; 6255 cur_ns->multicast = cur_es->rx_multicast; 6256 6257 /* update some more netdev stats if this is main VSI */ 6258 if (vsi->type == ICE_VSI_PF) { 6259 cur_ns->rx_crc_errors = pf->stats.crc_errors; 6260 cur_ns->rx_errors = pf->stats.crc_errors + 6261 pf->stats.illegal_bytes + 6262 pf->stats.rx_len_errors + 6263 pf->stats.rx_undersize + 6264 pf->hw_csum_rx_error + 6265 pf->stats.rx_jabber + 6266 pf->stats.rx_fragments + 6267 pf->stats.rx_oversize; 6268 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 6269 /* record drops from the port level */ 6270 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 6271 } 6272 } 6273 6274 /** 6275 * ice_update_pf_stats - Update PF port stats counters 6276 * @pf: PF whose stats needs to be updated 6277 */ 6278 void ice_update_pf_stats(struct ice_pf *pf) 6279 { 6280 struct ice_hw_port_stats *prev_ps, *cur_ps; 6281 struct ice_hw *hw = &pf->hw; 6282 u16 fd_ctr_base; 6283 u8 port; 6284 6285 port = hw->port_info->lport; 6286 prev_ps = &pf->stats_prev; 6287 cur_ps = &pf->stats; 6288 6289 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 6290 &prev_ps->eth.rx_bytes, 6291 &cur_ps->eth.rx_bytes); 6292 6293 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 6294 &prev_ps->eth.rx_unicast, 6295 &cur_ps->eth.rx_unicast); 6296 6297 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 6298 &prev_ps->eth.rx_multicast, 6299 &cur_ps->eth.rx_multicast); 6300 6301 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 6302 &prev_ps->eth.rx_broadcast, 6303 &cur_ps->eth.rx_broadcast); 6304 6305 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 6306 &prev_ps->eth.rx_discards, 6307 &cur_ps->eth.rx_discards); 6308 6309 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 6310 &prev_ps->eth.tx_bytes, 6311 &cur_ps->eth.tx_bytes); 6312 6313 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 6314 &prev_ps->eth.tx_unicast, 6315 &cur_ps->eth.tx_unicast); 6316 6317 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 6318 &prev_ps->eth.tx_multicast, 6319 &cur_ps->eth.tx_multicast); 6320 6321 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 6322 &prev_ps->eth.tx_broadcast, 6323 &cur_ps->eth.tx_broadcast); 6324 6325 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 6326 &prev_ps->tx_dropped_link_down, 6327 &cur_ps->tx_dropped_link_down); 6328 6329 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 6330 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 6331 6332 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 6333 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 6334 6335 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 6336 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 6337 6338 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 6339 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 6340 6341 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 6342 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 6343 6344 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 6345 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 6346 6347 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 6348 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 6349 6350 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 6351 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 6352 6353 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 6354 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 6355 6356 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 6357 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 6358 6359 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 6360 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 6361 6362 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 6363 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 6364 6365 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 6366 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 6367 6368 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 6369 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 6370 6371 fd_ctr_base = hw->fd_ctr_base; 6372 6373 ice_stat_update40(hw, 6374 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 6375 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 6376 &cur_ps->fd_sb_match); 6377 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 6378 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 6379 6380 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 6381 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 6382 6383 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 6384 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 6385 6386 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 6387 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 6388 6389 ice_update_dcb_stats(pf); 6390 6391 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 6392 &prev_ps->crc_errors, &cur_ps->crc_errors); 6393 6394 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 6395 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 6396 6397 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 6398 &prev_ps->mac_local_faults, 6399 &cur_ps->mac_local_faults); 6400 6401 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 6402 &prev_ps->mac_remote_faults, 6403 &cur_ps->mac_remote_faults); 6404 6405 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 6406 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 6407 6408 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 6409 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 6410 6411 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 6412 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 6413 6414 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 6415 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 6416 6417 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 6418 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 6419 6420 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 6421 6422 pf->stat_prev_loaded = true; 6423 } 6424 6425 /** 6426 * ice_get_stats64 - get statistics for network device structure 6427 * @netdev: network interface device structure 6428 * @stats: main device statistics structure 6429 */ 6430 static 6431 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 6432 { 6433 struct ice_netdev_priv *np = netdev_priv(netdev); 6434 struct rtnl_link_stats64 *vsi_stats; 6435 struct ice_vsi *vsi = np->vsi; 6436 6437 vsi_stats = &vsi->net_stats; 6438 6439 if (!vsi->num_txq || !vsi->num_rxq) 6440 return; 6441 6442 /* netdev packet/byte stats come from ring counter. These are obtained 6443 * by summing up ring counters (done by ice_update_vsi_ring_stats). 6444 * But, only call the update routine and read the registers if VSI is 6445 * not down. 6446 */ 6447 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 6448 ice_update_vsi_ring_stats(vsi); 6449 stats->tx_packets = vsi_stats->tx_packets; 6450 stats->tx_bytes = vsi_stats->tx_bytes; 6451 stats->rx_packets = vsi_stats->rx_packets; 6452 stats->rx_bytes = vsi_stats->rx_bytes; 6453 6454 /* The rest of the stats can be read from the hardware but instead we 6455 * just return values that the watchdog task has already obtained from 6456 * the hardware. 6457 */ 6458 stats->multicast = vsi_stats->multicast; 6459 stats->tx_errors = vsi_stats->tx_errors; 6460 stats->tx_dropped = vsi_stats->tx_dropped; 6461 stats->rx_errors = vsi_stats->rx_errors; 6462 stats->rx_dropped = vsi_stats->rx_dropped; 6463 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 6464 stats->rx_length_errors = vsi_stats->rx_length_errors; 6465 } 6466 6467 /** 6468 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 6469 * @vsi: VSI having NAPI disabled 6470 */ 6471 static void ice_napi_disable_all(struct ice_vsi *vsi) 6472 { 6473 int q_idx; 6474 6475 if (!vsi->netdev) 6476 return; 6477 6478 ice_for_each_q_vector(vsi, q_idx) { 6479 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6480 6481 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6482 napi_disable(&q_vector->napi); 6483 6484 cancel_work_sync(&q_vector->tx.dim.work); 6485 cancel_work_sync(&q_vector->rx.dim.work); 6486 } 6487 } 6488 6489 /** 6490 * ice_down - Shutdown the connection 6491 * @vsi: The VSI being stopped 6492 * 6493 * Caller of this function is expected to set the vsi->state ICE_DOWN bit 6494 */ 6495 int ice_down(struct ice_vsi *vsi) 6496 { 6497 int i, tx_err, rx_err, link_err = 0, vlan_err = 0; 6498 6499 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 6500 6501 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6502 vlan_err = ice_vsi_del_vlan_zero(vsi); 6503 if (!ice_is_e810(&vsi->back->hw)) 6504 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); 6505 netif_carrier_off(vsi->netdev); 6506 netif_tx_disable(vsi->netdev); 6507 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 6508 ice_eswitch_stop_all_tx_queues(vsi->back); 6509 } 6510 6511 ice_vsi_dis_irq(vsi); 6512 6513 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 6514 if (tx_err) 6515 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 6516 vsi->vsi_num, tx_err); 6517 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 6518 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 6519 if (tx_err) 6520 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 6521 vsi->vsi_num, tx_err); 6522 } 6523 6524 rx_err = ice_vsi_stop_all_rx_rings(vsi); 6525 if (rx_err) 6526 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 6527 vsi->vsi_num, rx_err); 6528 6529 ice_napi_disable_all(vsi); 6530 6531 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 6532 link_err = ice_force_phys_link_state(vsi, false); 6533 if (link_err) 6534 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 6535 vsi->vsi_num, link_err); 6536 } 6537 6538 ice_for_each_txq(vsi, i) 6539 ice_clean_tx_ring(vsi->tx_rings[i]); 6540 6541 ice_for_each_rxq(vsi, i) 6542 ice_clean_rx_ring(vsi->rx_rings[i]); 6543 6544 if (tx_err || rx_err || link_err || vlan_err) { 6545 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 6546 vsi->vsi_num, vsi->vsw->sw_id); 6547 return -EIO; 6548 } 6549 6550 return 0; 6551 } 6552 6553 /** 6554 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 6555 * @vsi: VSI having resources allocated 6556 * 6557 * Return 0 on success, negative on failure 6558 */ 6559 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 6560 { 6561 int i, err = 0; 6562 6563 if (!vsi->num_txq) { 6564 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 6565 vsi->vsi_num); 6566 return -EINVAL; 6567 } 6568 6569 ice_for_each_txq(vsi, i) { 6570 struct ice_tx_ring *ring = vsi->tx_rings[i]; 6571 6572 if (!ring) 6573 return -EINVAL; 6574 6575 if (vsi->netdev) 6576 ring->netdev = vsi->netdev; 6577 err = ice_setup_tx_ring(ring); 6578 if (err) 6579 break; 6580 } 6581 6582 return err; 6583 } 6584 6585 /** 6586 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 6587 * @vsi: VSI having resources allocated 6588 * 6589 * Return 0 on success, negative on failure 6590 */ 6591 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 6592 { 6593 int i, err = 0; 6594 6595 if (!vsi->num_rxq) { 6596 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 6597 vsi->vsi_num); 6598 return -EINVAL; 6599 } 6600 6601 ice_for_each_rxq(vsi, i) { 6602 struct ice_rx_ring *ring = vsi->rx_rings[i]; 6603 6604 if (!ring) 6605 return -EINVAL; 6606 6607 if (vsi->netdev) 6608 ring->netdev = vsi->netdev; 6609 err = ice_setup_rx_ring(ring); 6610 if (err) 6611 break; 6612 } 6613 6614 return err; 6615 } 6616 6617 /** 6618 * ice_vsi_open_ctrl - open control VSI for use 6619 * @vsi: the VSI to open 6620 * 6621 * Initialization of the Control VSI 6622 * 6623 * Returns 0 on success, negative value on error 6624 */ 6625 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 6626 { 6627 char int_name[ICE_INT_NAME_STR_LEN]; 6628 struct ice_pf *pf = vsi->back; 6629 struct device *dev; 6630 int err; 6631 6632 dev = ice_pf_to_dev(pf); 6633 /* allocate descriptors */ 6634 err = ice_vsi_setup_tx_rings(vsi); 6635 if (err) 6636 goto err_setup_tx; 6637 6638 err = ice_vsi_setup_rx_rings(vsi); 6639 if (err) 6640 goto err_setup_rx; 6641 6642 err = ice_vsi_cfg(vsi); 6643 if (err) 6644 goto err_setup_rx; 6645 6646 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 6647 dev_driver_string(dev), dev_name(dev)); 6648 err = ice_vsi_req_irq_msix(vsi, int_name); 6649 if (err) 6650 goto err_setup_rx; 6651 6652 ice_vsi_cfg_msix(vsi); 6653 6654 err = ice_vsi_start_all_rx_rings(vsi); 6655 if (err) 6656 goto err_up_complete; 6657 6658 clear_bit(ICE_VSI_DOWN, vsi->state); 6659 ice_vsi_ena_irq(vsi); 6660 6661 return 0; 6662 6663 err_up_complete: 6664 ice_down(vsi); 6665 err_setup_rx: 6666 ice_vsi_free_rx_rings(vsi); 6667 err_setup_tx: 6668 ice_vsi_free_tx_rings(vsi); 6669 6670 return err; 6671 } 6672 6673 /** 6674 * ice_vsi_open - Called when a network interface is made active 6675 * @vsi: the VSI to open 6676 * 6677 * Initialization of the VSI 6678 * 6679 * Returns 0 on success, negative value on error 6680 */ 6681 int ice_vsi_open(struct ice_vsi *vsi) 6682 { 6683 char int_name[ICE_INT_NAME_STR_LEN]; 6684 struct ice_pf *pf = vsi->back; 6685 int err; 6686 6687 /* allocate descriptors */ 6688 err = ice_vsi_setup_tx_rings(vsi); 6689 if (err) 6690 goto err_setup_tx; 6691 6692 err = ice_vsi_setup_rx_rings(vsi); 6693 if (err) 6694 goto err_setup_rx; 6695 6696 err = ice_vsi_cfg(vsi); 6697 if (err) 6698 goto err_setup_rx; 6699 6700 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 6701 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 6702 err = ice_vsi_req_irq_msix(vsi, int_name); 6703 if (err) 6704 goto err_setup_rx; 6705 6706 if (vsi->type == ICE_VSI_PF) { 6707 /* Notify the stack of the actual queue counts. */ 6708 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 6709 if (err) 6710 goto err_set_qs; 6711 6712 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 6713 if (err) 6714 goto err_set_qs; 6715 } 6716 6717 err = ice_up_complete(vsi); 6718 if (err) 6719 goto err_up_complete; 6720 6721 return 0; 6722 6723 err_up_complete: 6724 ice_down(vsi); 6725 err_set_qs: 6726 ice_vsi_free_irq(vsi); 6727 err_setup_rx: 6728 ice_vsi_free_rx_rings(vsi); 6729 err_setup_tx: 6730 ice_vsi_free_tx_rings(vsi); 6731 6732 return err; 6733 } 6734 6735 /** 6736 * ice_vsi_release_all - Delete all VSIs 6737 * @pf: PF from which all VSIs are being removed 6738 */ 6739 static void ice_vsi_release_all(struct ice_pf *pf) 6740 { 6741 int err, i; 6742 6743 if (!pf->vsi) 6744 return; 6745 6746 ice_for_each_vsi(pf, i) { 6747 if (!pf->vsi[i]) 6748 continue; 6749 6750 if (pf->vsi[i]->type == ICE_VSI_CHNL) 6751 continue; 6752 6753 err = ice_vsi_release(pf->vsi[i]); 6754 if (err) 6755 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 6756 i, err, pf->vsi[i]->vsi_num); 6757 } 6758 } 6759 6760 /** 6761 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 6762 * @pf: pointer to the PF instance 6763 * @type: VSI type to rebuild 6764 * 6765 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 6766 */ 6767 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 6768 { 6769 struct device *dev = ice_pf_to_dev(pf); 6770 int i, err; 6771 6772 ice_for_each_vsi(pf, i) { 6773 struct ice_vsi *vsi = pf->vsi[i]; 6774 6775 if (!vsi || vsi->type != type) 6776 continue; 6777 6778 /* rebuild the VSI */ 6779 err = ice_vsi_rebuild(vsi, true); 6780 if (err) { 6781 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 6782 err, vsi->idx, ice_vsi_type_str(type)); 6783 return err; 6784 } 6785 6786 /* replay filters for the VSI */ 6787 err = ice_replay_vsi(&pf->hw, vsi->idx); 6788 if (err) { 6789 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 6790 err, vsi->idx, ice_vsi_type_str(type)); 6791 return err; 6792 } 6793 6794 /* Re-map HW VSI number, using VSI handle that has been 6795 * previously validated in ice_replay_vsi() call above 6796 */ 6797 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 6798 6799 /* enable the VSI */ 6800 err = ice_ena_vsi(vsi, false); 6801 if (err) { 6802 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 6803 err, vsi->idx, ice_vsi_type_str(type)); 6804 return err; 6805 } 6806 6807 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 6808 ice_vsi_type_str(type)); 6809 } 6810 6811 return 0; 6812 } 6813 6814 /** 6815 * ice_update_pf_netdev_link - Update PF netdev link status 6816 * @pf: pointer to the PF instance 6817 */ 6818 static void ice_update_pf_netdev_link(struct ice_pf *pf) 6819 { 6820 bool link_up; 6821 int i; 6822 6823 ice_for_each_vsi(pf, i) { 6824 struct ice_vsi *vsi = pf->vsi[i]; 6825 6826 if (!vsi || vsi->type != ICE_VSI_PF) 6827 return; 6828 6829 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 6830 if (link_up) { 6831 netif_carrier_on(pf->vsi[i]->netdev); 6832 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 6833 } else { 6834 netif_carrier_off(pf->vsi[i]->netdev); 6835 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 6836 } 6837 } 6838 } 6839 6840 /** 6841 * ice_rebuild - rebuild after reset 6842 * @pf: PF to rebuild 6843 * @reset_type: type of reset 6844 * 6845 * Do not rebuild VF VSI in this flow because that is already handled via 6846 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 6847 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 6848 * to reset/rebuild all the VF VSI twice. 6849 */ 6850 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 6851 { 6852 struct device *dev = ice_pf_to_dev(pf); 6853 struct ice_hw *hw = &pf->hw; 6854 bool dvm; 6855 int err; 6856 6857 if (test_bit(ICE_DOWN, pf->state)) 6858 goto clear_recovery; 6859 6860 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 6861 6862 if (reset_type == ICE_RESET_EMPR) { 6863 /* If an EMP reset has occurred, any previously pending flash 6864 * update will have completed. We no longer know whether or 6865 * not the NVM update EMP reset is restricted. 6866 */ 6867 pf->fw_emp_reset_disabled = false; 6868 } 6869 6870 err = ice_init_all_ctrlq(hw); 6871 if (err) { 6872 dev_err(dev, "control queues init failed %d\n", err); 6873 goto err_init_ctrlq; 6874 } 6875 6876 /* if DDP was previously loaded successfully */ 6877 if (!ice_is_safe_mode(pf)) { 6878 /* reload the SW DB of filter tables */ 6879 if (reset_type == ICE_RESET_PFR) 6880 ice_fill_blk_tbls(hw); 6881 else 6882 /* Reload DDP Package after CORER/GLOBR reset */ 6883 ice_load_pkg(NULL, pf); 6884 } 6885 6886 err = ice_clear_pf_cfg(hw); 6887 if (err) { 6888 dev_err(dev, "clear PF configuration failed %d\n", err); 6889 goto err_init_ctrlq; 6890 } 6891 6892 if (pf->first_sw->dflt_vsi_ena) 6893 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); 6894 /* clear the default VSI configuration if it exists */ 6895 pf->first_sw->dflt_vsi = NULL; 6896 pf->first_sw->dflt_vsi_ena = false; 6897 6898 ice_clear_pxe_mode(hw); 6899 6900 err = ice_init_nvm(hw); 6901 if (err) { 6902 dev_err(dev, "ice_init_nvm failed %d\n", err); 6903 goto err_init_ctrlq; 6904 } 6905 6906 err = ice_get_caps(hw); 6907 if (err) { 6908 dev_err(dev, "ice_get_caps failed %d\n", err); 6909 goto err_init_ctrlq; 6910 } 6911 6912 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 6913 if (err) { 6914 dev_err(dev, "set_mac_cfg failed %d\n", err); 6915 goto err_init_ctrlq; 6916 } 6917 6918 dvm = ice_is_dvm_ena(hw); 6919 6920 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 6921 if (err) 6922 goto err_init_ctrlq; 6923 6924 err = ice_sched_init_port(hw->port_info); 6925 if (err) 6926 goto err_sched_init_port; 6927 6928 /* start misc vector */ 6929 err = ice_req_irq_msix_misc(pf); 6930 if (err) { 6931 dev_err(dev, "misc vector setup failed: %d\n", err); 6932 goto err_sched_init_port; 6933 } 6934 6935 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6936 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 6937 if (!rd32(hw, PFQF_FD_SIZE)) { 6938 u16 unused, guar, b_effort; 6939 6940 guar = hw->func_caps.fd_fltr_guar; 6941 b_effort = hw->func_caps.fd_fltr_best_effort; 6942 6943 /* force guaranteed filter pool for PF */ 6944 ice_alloc_fd_guar_item(hw, &unused, guar); 6945 /* force shared filter pool for PF */ 6946 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 6947 } 6948 } 6949 6950 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 6951 ice_dcb_rebuild(pf); 6952 6953 /* If the PF previously had enabled PTP, PTP init needs to happen before 6954 * the VSI rebuild. If not, this causes the PTP link status events to 6955 * fail. 6956 */ 6957 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6958 ice_ptp_reset(pf); 6959 6960 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 6961 ice_gnss_init(pf); 6962 6963 /* rebuild PF VSI */ 6964 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 6965 if (err) { 6966 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 6967 goto err_vsi_rebuild; 6968 } 6969 6970 /* configure PTP timestamping after VSI rebuild */ 6971 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6972 ice_ptp_cfg_timestamp(pf, false); 6973 6974 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); 6975 if (err) { 6976 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); 6977 goto err_vsi_rebuild; 6978 } 6979 6980 if (reset_type == ICE_RESET_PFR) { 6981 err = ice_rebuild_channels(pf); 6982 if (err) { 6983 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 6984 err); 6985 goto err_vsi_rebuild; 6986 } 6987 } 6988 6989 /* If Flow Director is active */ 6990 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6991 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 6992 if (err) { 6993 dev_err(dev, "control VSI rebuild failed: %d\n", err); 6994 goto err_vsi_rebuild; 6995 } 6996 6997 /* replay HW Flow Director recipes */ 6998 if (hw->fdir_prof) 6999 ice_fdir_replay_flows(hw); 7000 7001 /* replay Flow Director filters */ 7002 ice_fdir_replay_fltrs(pf); 7003 7004 ice_rebuild_arfs(pf); 7005 } 7006 7007 ice_update_pf_netdev_link(pf); 7008 7009 /* tell the firmware we are up */ 7010 err = ice_send_version(pf); 7011 if (err) { 7012 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 7013 err); 7014 goto err_vsi_rebuild; 7015 } 7016 7017 ice_replay_post(hw); 7018 7019 /* if we get here, reset flow is successful */ 7020 clear_bit(ICE_RESET_FAILED, pf->state); 7021 7022 ice_plug_aux_dev(pf); 7023 return; 7024 7025 err_vsi_rebuild: 7026 err_sched_init_port: 7027 ice_sched_cleanup_all(hw); 7028 err_init_ctrlq: 7029 ice_shutdown_all_ctrlq(hw); 7030 set_bit(ICE_RESET_FAILED, pf->state); 7031 clear_recovery: 7032 /* set this bit in PF state to control service task scheduling */ 7033 set_bit(ICE_NEEDS_RESTART, pf->state); 7034 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 7035 } 7036 7037 /** 7038 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 7039 * @vsi: Pointer to VSI structure 7040 */ 7041 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 7042 { 7043 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 7044 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 7045 else 7046 return ICE_RXBUF_3072; 7047 } 7048 7049 /** 7050 * ice_change_mtu - NDO callback to change the MTU 7051 * @netdev: network interface device structure 7052 * @new_mtu: new value for maximum frame size 7053 * 7054 * Returns 0 on success, negative on failure 7055 */ 7056 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 7057 { 7058 struct ice_netdev_priv *np = netdev_priv(netdev); 7059 struct ice_vsi *vsi = np->vsi; 7060 struct ice_pf *pf = vsi->back; 7061 u8 count = 0; 7062 int err = 0; 7063 7064 if (new_mtu == (int)netdev->mtu) { 7065 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 7066 return 0; 7067 } 7068 7069 if (ice_is_xdp_ena_vsi(vsi)) { 7070 int frame_size = ice_max_xdp_frame_size(vsi); 7071 7072 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 7073 netdev_err(netdev, "max MTU for XDP usage is %d\n", 7074 frame_size - ICE_ETH_PKT_HDR_PAD); 7075 return -EINVAL; 7076 } 7077 } 7078 7079 /* if a reset is in progress, wait for some time for it to complete */ 7080 do { 7081 if (ice_is_reset_in_progress(pf->state)) { 7082 count++; 7083 usleep_range(1000, 2000); 7084 } else { 7085 break; 7086 } 7087 7088 } while (count < 100); 7089 7090 if (count == 100) { 7091 netdev_err(netdev, "can't change MTU. Device is busy\n"); 7092 return -EBUSY; 7093 } 7094 7095 netdev->mtu = (unsigned int)new_mtu; 7096 7097 /* if VSI is up, bring it down and then back up */ 7098 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 7099 err = ice_down(vsi); 7100 if (err) { 7101 netdev_err(netdev, "change MTU if_down err %d\n", err); 7102 return err; 7103 } 7104 7105 err = ice_up(vsi); 7106 if (err) { 7107 netdev_err(netdev, "change MTU if_up err %d\n", err); 7108 return err; 7109 } 7110 } 7111 7112 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 7113 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); 7114 7115 return err; 7116 } 7117 7118 /** 7119 * ice_eth_ioctl - Access the hwtstamp interface 7120 * @netdev: network interface device structure 7121 * @ifr: interface request data 7122 * @cmd: ioctl command 7123 */ 7124 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 7125 { 7126 struct ice_netdev_priv *np = netdev_priv(netdev); 7127 struct ice_pf *pf = np->vsi->back; 7128 7129 switch (cmd) { 7130 case SIOCGHWTSTAMP: 7131 return ice_ptp_get_ts_config(pf, ifr); 7132 case SIOCSHWTSTAMP: 7133 return ice_ptp_set_ts_config(pf, ifr); 7134 default: 7135 return -EOPNOTSUPP; 7136 } 7137 } 7138 7139 /** 7140 * ice_aq_str - convert AQ err code to a string 7141 * @aq_err: the AQ error code to convert 7142 */ 7143 const char *ice_aq_str(enum ice_aq_err aq_err) 7144 { 7145 switch (aq_err) { 7146 case ICE_AQ_RC_OK: 7147 return "OK"; 7148 case ICE_AQ_RC_EPERM: 7149 return "ICE_AQ_RC_EPERM"; 7150 case ICE_AQ_RC_ENOENT: 7151 return "ICE_AQ_RC_ENOENT"; 7152 case ICE_AQ_RC_ENOMEM: 7153 return "ICE_AQ_RC_ENOMEM"; 7154 case ICE_AQ_RC_EBUSY: 7155 return "ICE_AQ_RC_EBUSY"; 7156 case ICE_AQ_RC_EEXIST: 7157 return "ICE_AQ_RC_EEXIST"; 7158 case ICE_AQ_RC_EINVAL: 7159 return "ICE_AQ_RC_EINVAL"; 7160 case ICE_AQ_RC_ENOSPC: 7161 return "ICE_AQ_RC_ENOSPC"; 7162 case ICE_AQ_RC_ENOSYS: 7163 return "ICE_AQ_RC_ENOSYS"; 7164 case ICE_AQ_RC_EMODE: 7165 return "ICE_AQ_RC_EMODE"; 7166 case ICE_AQ_RC_ENOSEC: 7167 return "ICE_AQ_RC_ENOSEC"; 7168 case ICE_AQ_RC_EBADSIG: 7169 return "ICE_AQ_RC_EBADSIG"; 7170 case ICE_AQ_RC_ESVN: 7171 return "ICE_AQ_RC_ESVN"; 7172 case ICE_AQ_RC_EBADMAN: 7173 return "ICE_AQ_RC_EBADMAN"; 7174 case ICE_AQ_RC_EBADBUF: 7175 return "ICE_AQ_RC_EBADBUF"; 7176 } 7177 7178 return "ICE_AQ_RC_UNKNOWN"; 7179 } 7180 7181 /** 7182 * ice_set_rss_lut - Set RSS LUT 7183 * @vsi: Pointer to VSI structure 7184 * @lut: Lookup table 7185 * @lut_size: Lookup table size 7186 * 7187 * Returns 0 on success, negative on failure 7188 */ 7189 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7190 { 7191 struct ice_aq_get_set_rss_lut_params params = {}; 7192 struct ice_hw *hw = &vsi->back->hw; 7193 int status; 7194 7195 if (!lut) 7196 return -EINVAL; 7197 7198 params.vsi_handle = vsi->idx; 7199 params.lut_size = lut_size; 7200 params.lut_type = vsi->rss_lut_type; 7201 params.lut = lut; 7202 7203 status = ice_aq_set_rss_lut(hw, ¶ms); 7204 if (status) 7205 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 7206 status, ice_aq_str(hw->adminq.sq_last_status)); 7207 7208 return status; 7209 } 7210 7211 /** 7212 * ice_set_rss_key - Set RSS key 7213 * @vsi: Pointer to the VSI structure 7214 * @seed: RSS hash seed 7215 * 7216 * Returns 0 on success, negative on failure 7217 */ 7218 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 7219 { 7220 struct ice_hw *hw = &vsi->back->hw; 7221 int status; 7222 7223 if (!seed) 7224 return -EINVAL; 7225 7226 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7227 if (status) 7228 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 7229 status, ice_aq_str(hw->adminq.sq_last_status)); 7230 7231 return status; 7232 } 7233 7234 /** 7235 * ice_get_rss_lut - Get RSS LUT 7236 * @vsi: Pointer to VSI structure 7237 * @lut: Buffer to store the lookup table entries 7238 * @lut_size: Size of buffer to store the lookup table entries 7239 * 7240 * Returns 0 on success, negative on failure 7241 */ 7242 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7243 { 7244 struct ice_aq_get_set_rss_lut_params params = {}; 7245 struct ice_hw *hw = &vsi->back->hw; 7246 int status; 7247 7248 if (!lut) 7249 return -EINVAL; 7250 7251 params.vsi_handle = vsi->idx; 7252 params.lut_size = lut_size; 7253 params.lut_type = vsi->rss_lut_type; 7254 params.lut = lut; 7255 7256 status = ice_aq_get_rss_lut(hw, ¶ms); 7257 if (status) 7258 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 7259 status, ice_aq_str(hw->adminq.sq_last_status)); 7260 7261 return status; 7262 } 7263 7264 /** 7265 * ice_get_rss_key - Get RSS key 7266 * @vsi: Pointer to VSI structure 7267 * @seed: Buffer to store the key in 7268 * 7269 * Returns 0 on success, negative on failure 7270 */ 7271 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 7272 { 7273 struct ice_hw *hw = &vsi->back->hw; 7274 int status; 7275 7276 if (!seed) 7277 return -EINVAL; 7278 7279 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7280 if (status) 7281 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 7282 status, ice_aq_str(hw->adminq.sq_last_status)); 7283 7284 return status; 7285 } 7286 7287 /** 7288 * ice_bridge_getlink - Get the hardware bridge mode 7289 * @skb: skb buff 7290 * @pid: process ID 7291 * @seq: RTNL message seq 7292 * @dev: the netdev being configured 7293 * @filter_mask: filter mask passed in 7294 * @nlflags: netlink flags passed in 7295 * 7296 * Return the bridge mode (VEB/VEPA) 7297 */ 7298 static int 7299 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7300 struct net_device *dev, u32 filter_mask, int nlflags) 7301 { 7302 struct ice_netdev_priv *np = netdev_priv(dev); 7303 struct ice_vsi *vsi = np->vsi; 7304 struct ice_pf *pf = vsi->back; 7305 u16 bmode; 7306 7307 bmode = pf->first_sw->bridge_mode; 7308 7309 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 7310 filter_mask, NULL); 7311 } 7312 7313 /** 7314 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 7315 * @vsi: Pointer to VSI structure 7316 * @bmode: Hardware bridge mode (VEB/VEPA) 7317 * 7318 * Returns 0 on success, negative on failure 7319 */ 7320 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 7321 { 7322 struct ice_aqc_vsi_props *vsi_props; 7323 struct ice_hw *hw = &vsi->back->hw; 7324 struct ice_vsi_ctx *ctxt; 7325 int ret; 7326 7327 vsi_props = &vsi->info; 7328 7329 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 7330 if (!ctxt) 7331 return -ENOMEM; 7332 7333 ctxt->info = vsi->info; 7334 7335 if (bmode == BRIDGE_MODE_VEB) 7336 /* change from VEPA to VEB mode */ 7337 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7338 else 7339 /* change from VEB to VEPA mode */ 7340 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7341 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 7342 7343 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 7344 if (ret) { 7345 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 7346 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); 7347 goto out; 7348 } 7349 /* Update sw flags for book keeping */ 7350 vsi_props->sw_flags = ctxt->info.sw_flags; 7351 7352 out: 7353 kfree(ctxt); 7354 return ret; 7355 } 7356 7357 /** 7358 * ice_bridge_setlink - Set the hardware bridge mode 7359 * @dev: the netdev being configured 7360 * @nlh: RTNL message 7361 * @flags: bridge setlink flags 7362 * @extack: netlink extended ack 7363 * 7364 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 7365 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 7366 * not already set for all VSIs connected to this switch. And also update the 7367 * unicast switch filter rules for the corresponding switch of the netdev. 7368 */ 7369 static int 7370 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7371 u16 __always_unused flags, 7372 struct netlink_ext_ack __always_unused *extack) 7373 { 7374 struct ice_netdev_priv *np = netdev_priv(dev); 7375 struct ice_pf *pf = np->vsi->back; 7376 struct nlattr *attr, *br_spec; 7377 struct ice_hw *hw = &pf->hw; 7378 struct ice_sw *pf_sw; 7379 int rem, v, err = 0; 7380 7381 pf_sw = pf->first_sw; 7382 /* find the attribute in the netlink message */ 7383 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7384 7385 nla_for_each_nested(attr, br_spec, rem) { 7386 __u16 mode; 7387 7388 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7389 continue; 7390 mode = nla_get_u16(attr); 7391 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 7392 return -EINVAL; 7393 /* Continue if bridge mode is not being flipped */ 7394 if (mode == pf_sw->bridge_mode) 7395 continue; 7396 /* Iterates through the PF VSI list and update the loopback 7397 * mode of the VSI 7398 */ 7399 ice_for_each_vsi(pf, v) { 7400 if (!pf->vsi[v]) 7401 continue; 7402 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 7403 if (err) 7404 return err; 7405 } 7406 7407 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 7408 /* Update the unicast switch filter rules for the corresponding 7409 * switch of the netdev 7410 */ 7411 err = ice_update_sw_rule_bridge_mode(hw); 7412 if (err) { 7413 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 7414 mode, err, 7415 ice_aq_str(hw->adminq.sq_last_status)); 7416 /* revert hw->evb_veb */ 7417 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 7418 return err; 7419 } 7420 7421 pf_sw->bridge_mode = mode; 7422 } 7423 7424 return 0; 7425 } 7426 7427 /** 7428 * ice_tx_timeout - Respond to a Tx Hang 7429 * @netdev: network interface device structure 7430 * @txqueue: Tx queue 7431 */ 7432 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 7433 { 7434 struct ice_netdev_priv *np = netdev_priv(netdev); 7435 struct ice_tx_ring *tx_ring = NULL; 7436 struct ice_vsi *vsi = np->vsi; 7437 struct ice_pf *pf = vsi->back; 7438 u32 i; 7439 7440 pf->tx_timeout_count++; 7441 7442 /* Check if PFC is enabled for the TC to which the queue belongs 7443 * to. If yes then Tx timeout is not caused by a hung queue, no 7444 * need to reset and rebuild 7445 */ 7446 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 7447 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 7448 txqueue); 7449 return; 7450 } 7451 7452 /* now that we have an index, find the tx_ring struct */ 7453 ice_for_each_txq(vsi, i) 7454 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 7455 if (txqueue == vsi->tx_rings[i]->q_index) { 7456 tx_ring = vsi->tx_rings[i]; 7457 break; 7458 } 7459 7460 /* Reset recovery level if enough time has elapsed after last timeout. 7461 * Also ensure no new reset action happens before next timeout period. 7462 */ 7463 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 7464 pf->tx_timeout_recovery_level = 1; 7465 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 7466 netdev->watchdog_timeo))) 7467 return; 7468 7469 if (tx_ring) { 7470 struct ice_hw *hw = &pf->hw; 7471 u32 head, val = 0; 7472 7473 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 7474 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 7475 /* Read interrupt register */ 7476 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 7477 7478 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 7479 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 7480 head, tx_ring->next_to_use, val); 7481 } 7482 7483 pf->tx_timeout_last_recovery = jiffies; 7484 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 7485 pf->tx_timeout_recovery_level, txqueue); 7486 7487 switch (pf->tx_timeout_recovery_level) { 7488 case 1: 7489 set_bit(ICE_PFR_REQ, pf->state); 7490 break; 7491 case 2: 7492 set_bit(ICE_CORER_REQ, pf->state); 7493 break; 7494 case 3: 7495 set_bit(ICE_GLOBR_REQ, pf->state); 7496 break; 7497 default: 7498 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 7499 set_bit(ICE_DOWN, pf->state); 7500 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 7501 set_bit(ICE_SERVICE_DIS, pf->state); 7502 break; 7503 } 7504 7505 ice_service_task_schedule(pf); 7506 pf->tx_timeout_recovery_level++; 7507 } 7508 7509 /** 7510 * ice_setup_tc_cls_flower - flower classifier offloads 7511 * @np: net device to configure 7512 * @filter_dev: device on which filter is added 7513 * @cls_flower: offload data 7514 */ 7515 static int 7516 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 7517 struct net_device *filter_dev, 7518 struct flow_cls_offload *cls_flower) 7519 { 7520 struct ice_vsi *vsi = np->vsi; 7521 7522 if (cls_flower->common.chain_index) 7523 return -EOPNOTSUPP; 7524 7525 switch (cls_flower->command) { 7526 case FLOW_CLS_REPLACE: 7527 return ice_add_cls_flower(filter_dev, vsi, cls_flower); 7528 case FLOW_CLS_DESTROY: 7529 return ice_del_cls_flower(vsi, cls_flower); 7530 default: 7531 return -EINVAL; 7532 } 7533 } 7534 7535 /** 7536 * ice_setup_tc_block_cb - callback handler registered for TC block 7537 * @type: TC SETUP type 7538 * @type_data: TC flower offload data that contains user input 7539 * @cb_priv: netdev private data 7540 */ 7541 static int 7542 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 7543 { 7544 struct ice_netdev_priv *np = cb_priv; 7545 7546 switch (type) { 7547 case TC_SETUP_CLSFLOWER: 7548 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 7549 type_data); 7550 default: 7551 return -EOPNOTSUPP; 7552 } 7553 } 7554 7555 /** 7556 * ice_validate_mqprio_qopt - Validate TCF input parameters 7557 * @vsi: Pointer to VSI 7558 * @mqprio_qopt: input parameters for mqprio queue configuration 7559 * 7560 * This function validates MQPRIO params, such as qcount (power of 2 wherever 7561 * needed), and make sure user doesn't specify qcount and BW rate limit 7562 * for TCs, which are more than "num_tc" 7563 */ 7564 static int 7565 ice_validate_mqprio_qopt(struct ice_vsi *vsi, 7566 struct tc_mqprio_qopt_offload *mqprio_qopt) 7567 { 7568 u64 sum_max_rate = 0, sum_min_rate = 0; 7569 int non_power_of_2_qcount = 0; 7570 struct ice_pf *pf = vsi->back; 7571 int max_rss_q_cnt = 0; 7572 struct device *dev; 7573 int i, speed; 7574 u8 num_tc; 7575 7576 if (vsi->type != ICE_VSI_PF) 7577 return -EINVAL; 7578 7579 if (mqprio_qopt->qopt.offset[0] != 0 || 7580 mqprio_qopt->qopt.num_tc < 1 || 7581 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 7582 return -EINVAL; 7583 7584 dev = ice_pf_to_dev(pf); 7585 vsi->ch_rss_size = 0; 7586 num_tc = mqprio_qopt->qopt.num_tc; 7587 7588 for (i = 0; num_tc; i++) { 7589 int qcount = mqprio_qopt->qopt.count[i]; 7590 u64 max_rate, min_rate, rem; 7591 7592 if (!qcount) 7593 return -EINVAL; 7594 7595 if (is_power_of_2(qcount)) { 7596 if (non_power_of_2_qcount && 7597 qcount > non_power_of_2_qcount) { 7598 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 7599 qcount, non_power_of_2_qcount); 7600 return -EINVAL; 7601 } 7602 if (qcount > max_rss_q_cnt) 7603 max_rss_q_cnt = qcount; 7604 } else { 7605 if (non_power_of_2_qcount && 7606 qcount != non_power_of_2_qcount) { 7607 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 7608 qcount, non_power_of_2_qcount); 7609 return -EINVAL; 7610 } 7611 if (qcount < max_rss_q_cnt) { 7612 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 7613 qcount, max_rss_q_cnt); 7614 return -EINVAL; 7615 } 7616 max_rss_q_cnt = qcount; 7617 non_power_of_2_qcount = qcount; 7618 } 7619 7620 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 7621 * converts the bandwidth rate limit into Bytes/s when 7622 * passing it down to the driver. So convert input bandwidth 7623 * from Bytes/s to Kbps 7624 */ 7625 max_rate = mqprio_qopt->max_rate[i]; 7626 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 7627 sum_max_rate += max_rate; 7628 7629 /* min_rate is minimum guaranteed rate and it can't be zero */ 7630 min_rate = mqprio_qopt->min_rate[i]; 7631 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 7632 sum_min_rate += min_rate; 7633 7634 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 7635 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 7636 min_rate, ICE_MIN_BW_LIMIT); 7637 return -EINVAL; 7638 } 7639 7640 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 7641 if (rem) { 7642 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 7643 i, ICE_MIN_BW_LIMIT); 7644 return -EINVAL; 7645 } 7646 7647 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 7648 if (rem) { 7649 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 7650 i, ICE_MIN_BW_LIMIT); 7651 return -EINVAL; 7652 } 7653 7654 /* min_rate can't be more than max_rate, except when max_rate 7655 * is zero (implies max_rate sought is max line rate). In such 7656 * a case min_rate can be more than max. 7657 */ 7658 if (max_rate && min_rate > max_rate) { 7659 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 7660 min_rate, max_rate); 7661 return -EINVAL; 7662 } 7663 7664 if (i >= mqprio_qopt->qopt.num_tc - 1) 7665 break; 7666 if (mqprio_qopt->qopt.offset[i + 1] != 7667 (mqprio_qopt->qopt.offset[i] + qcount)) 7668 return -EINVAL; 7669 } 7670 if (vsi->num_rxq < 7671 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7672 return -EINVAL; 7673 if (vsi->num_txq < 7674 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 7675 return -EINVAL; 7676 7677 speed = ice_get_link_speed_kbps(vsi); 7678 if (sum_max_rate && sum_max_rate > (u64)speed) { 7679 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n", 7680 sum_max_rate, speed); 7681 return -EINVAL; 7682 } 7683 if (sum_min_rate && sum_min_rate > (u64)speed) { 7684 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 7685 sum_min_rate, speed); 7686 return -EINVAL; 7687 } 7688 7689 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 7690 vsi->ch_rss_size = max_rss_q_cnt; 7691 7692 return 0; 7693 } 7694 7695 /** 7696 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 7697 * @pf: ptr to PF device 7698 * @vsi: ptr to VSI 7699 */ 7700 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 7701 { 7702 struct device *dev = ice_pf_to_dev(pf); 7703 bool added = false; 7704 struct ice_hw *hw; 7705 int flow; 7706 7707 if (!(vsi->num_gfltr || vsi->num_bfltr)) 7708 return -EINVAL; 7709 7710 hw = &pf->hw; 7711 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 7712 struct ice_fd_hw_prof *prof; 7713 int tun, status; 7714 u64 entry_h; 7715 7716 if (!(hw->fdir_prof && hw->fdir_prof[flow] && 7717 hw->fdir_prof[flow]->cnt)) 7718 continue; 7719 7720 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 7721 enum ice_flow_priority prio; 7722 u64 prof_id; 7723 7724 /* add this VSI to FDir profile for this flow */ 7725 prio = ICE_FLOW_PRIO_NORMAL; 7726 prof = hw->fdir_prof[flow]; 7727 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 7728 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, 7729 prof->vsi_h[0], vsi->idx, 7730 prio, prof->fdir_seg[tun], 7731 &entry_h); 7732 if (status) { 7733 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 7734 vsi->idx, flow); 7735 continue; 7736 } 7737 7738 prof->entry_h[prof->cnt][tun] = entry_h; 7739 } 7740 7741 /* store VSI for filter replay and delete */ 7742 prof->vsi_h[prof->cnt] = vsi->idx; 7743 prof->cnt++; 7744 7745 added = true; 7746 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 7747 flow); 7748 } 7749 7750 if (!added) 7751 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 7752 7753 return 0; 7754 } 7755 7756 /** 7757 * ice_add_channel - add a channel by adding VSI 7758 * @pf: ptr to PF device 7759 * @sw_id: underlying HW switching element ID 7760 * @ch: ptr to channel structure 7761 * 7762 * Add a channel (VSI) using add_vsi and queue_map 7763 */ 7764 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 7765 { 7766 struct device *dev = ice_pf_to_dev(pf); 7767 struct ice_vsi *vsi; 7768 7769 if (ch->type != ICE_VSI_CHNL) { 7770 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 7771 return -EINVAL; 7772 } 7773 7774 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 7775 if (!vsi || vsi->type != ICE_VSI_CHNL) { 7776 dev_err(dev, "create chnl VSI failure\n"); 7777 return -EINVAL; 7778 } 7779 7780 ice_add_vsi_to_fdir(pf, vsi); 7781 7782 ch->sw_id = sw_id; 7783 ch->vsi_num = vsi->vsi_num; 7784 ch->info.mapping_flags = vsi->info.mapping_flags; 7785 ch->ch_vsi = vsi; 7786 /* set the back pointer of channel for newly created VSI */ 7787 vsi->ch = ch; 7788 7789 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 7790 sizeof(vsi->info.q_mapping)); 7791 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 7792 sizeof(vsi->info.tc_mapping)); 7793 7794 return 0; 7795 } 7796 7797 /** 7798 * ice_chnl_cfg_res 7799 * @vsi: the VSI being setup 7800 * @ch: ptr to channel structure 7801 * 7802 * Configure channel specific resources such as rings, vector. 7803 */ 7804 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 7805 { 7806 int i; 7807 7808 for (i = 0; i < ch->num_txq; i++) { 7809 struct ice_q_vector *tx_q_vector, *rx_q_vector; 7810 struct ice_ring_container *rc; 7811 struct ice_tx_ring *tx_ring; 7812 struct ice_rx_ring *rx_ring; 7813 7814 tx_ring = vsi->tx_rings[ch->base_q + i]; 7815 rx_ring = vsi->rx_rings[ch->base_q + i]; 7816 if (!tx_ring || !rx_ring) 7817 continue; 7818 7819 /* setup ring being channel enabled */ 7820 tx_ring->ch = ch; 7821 rx_ring->ch = ch; 7822 7823 /* following code block sets up vector specific attributes */ 7824 tx_q_vector = tx_ring->q_vector; 7825 rx_q_vector = rx_ring->q_vector; 7826 if (!tx_q_vector && !rx_q_vector) 7827 continue; 7828 7829 if (tx_q_vector) { 7830 tx_q_vector->ch = ch; 7831 /* setup Tx and Rx ITR setting if DIM is off */ 7832 rc = &tx_q_vector->tx; 7833 if (!ITR_IS_DYNAMIC(rc)) 7834 ice_write_itr(rc, rc->itr_setting); 7835 } 7836 if (rx_q_vector) { 7837 rx_q_vector->ch = ch; 7838 /* setup Tx and Rx ITR setting if DIM is off */ 7839 rc = &rx_q_vector->rx; 7840 if (!ITR_IS_DYNAMIC(rc)) 7841 ice_write_itr(rc, rc->itr_setting); 7842 } 7843 } 7844 7845 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 7846 * GLINT_ITR register would have written to perform in-context 7847 * update, hence perform flush 7848 */ 7849 if (ch->num_txq || ch->num_rxq) 7850 ice_flush(&vsi->back->hw); 7851 } 7852 7853 /** 7854 * ice_cfg_chnl_all_res - configure channel resources 7855 * @vsi: pte to main_vsi 7856 * @ch: ptr to channel structure 7857 * 7858 * This function configures channel specific resources such as flow-director 7859 * counter index, and other resources such as queues, vectors, ITR settings 7860 */ 7861 static void 7862 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 7863 { 7864 /* configure channel (aka ADQ) resources such as queues, vectors, 7865 * ITR settings for channel specific vectors and anything else 7866 */ 7867 ice_chnl_cfg_res(vsi, ch); 7868 } 7869 7870 /** 7871 * ice_setup_hw_channel - setup new channel 7872 * @pf: ptr to PF device 7873 * @vsi: the VSI being setup 7874 * @ch: ptr to channel structure 7875 * @sw_id: underlying HW switching element ID 7876 * @type: type of channel to be created (VMDq2/VF) 7877 * 7878 * Setup new channel (VSI) based on specified type (VMDq2/VF) 7879 * and configures Tx rings accordingly 7880 */ 7881 static int 7882 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 7883 struct ice_channel *ch, u16 sw_id, u8 type) 7884 { 7885 struct device *dev = ice_pf_to_dev(pf); 7886 int ret; 7887 7888 ch->base_q = vsi->next_base_q; 7889 ch->type = type; 7890 7891 ret = ice_add_channel(pf, sw_id, ch); 7892 if (ret) { 7893 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 7894 return ret; 7895 } 7896 7897 /* configure/setup ADQ specific resources */ 7898 ice_cfg_chnl_all_res(vsi, ch); 7899 7900 /* make sure to update the next_base_q so that subsequent channel's 7901 * (aka ADQ) VSI queue map is correct 7902 */ 7903 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 7904 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 7905 ch->num_rxq); 7906 7907 return 0; 7908 } 7909 7910 /** 7911 * ice_setup_channel - setup new channel using uplink element 7912 * @pf: ptr to PF device 7913 * @vsi: the VSI being setup 7914 * @ch: ptr to channel structure 7915 * 7916 * Setup new channel (VSI) based on specified type (VMDq2/VF) 7917 * and uplink switching element 7918 */ 7919 static bool 7920 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 7921 struct ice_channel *ch) 7922 { 7923 struct device *dev = ice_pf_to_dev(pf); 7924 u16 sw_id; 7925 int ret; 7926 7927 if (vsi->type != ICE_VSI_PF) { 7928 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 7929 return false; 7930 } 7931 7932 sw_id = pf->first_sw->sw_id; 7933 7934 /* create channel (VSI) */ 7935 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 7936 if (ret) { 7937 dev_err(dev, "failed to setup hw_channel\n"); 7938 return false; 7939 } 7940 dev_dbg(dev, "successfully created channel()\n"); 7941 7942 return ch->ch_vsi ? true : false; 7943 } 7944 7945 /** 7946 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 7947 * @vsi: VSI to be configured 7948 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 7949 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 7950 */ 7951 static int 7952 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 7953 { 7954 int err; 7955 7956 err = ice_set_min_bw_limit(vsi, min_tx_rate); 7957 if (err) 7958 return err; 7959 7960 return ice_set_max_bw_limit(vsi, max_tx_rate); 7961 } 7962 7963 /** 7964 * ice_create_q_channel - function to create channel 7965 * @vsi: VSI to be configured 7966 * @ch: ptr to channel (it contains channel specific params) 7967 * 7968 * This function creates channel (VSI) using num_queues specified by user, 7969 * reconfigs RSS if needed. 7970 */ 7971 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 7972 { 7973 struct ice_pf *pf = vsi->back; 7974 struct device *dev; 7975 7976 if (!ch) 7977 return -EINVAL; 7978 7979 dev = ice_pf_to_dev(pf); 7980 if (!ch->num_txq || !ch->num_rxq) { 7981 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 7982 return -EINVAL; 7983 } 7984 7985 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 7986 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 7987 vsi->cnt_q_avail, ch->num_txq); 7988 return -EINVAL; 7989 } 7990 7991 if (!ice_setup_channel(pf, vsi, ch)) { 7992 dev_info(dev, "Failed to setup channel\n"); 7993 return -EINVAL; 7994 } 7995 /* configure BW rate limit */ 7996 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 7997 int ret; 7998 7999 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 8000 ch->min_tx_rate); 8001 if (ret) 8002 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 8003 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8004 else 8005 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 8006 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8007 } 8008 8009 vsi->cnt_q_avail -= ch->num_txq; 8010 8011 return 0; 8012 } 8013 8014 /** 8015 * ice_rem_all_chnl_fltrs - removes all channel filters 8016 * @pf: ptr to PF, TC-flower based filter are tracked at PF level 8017 * 8018 * Remove all advanced switch filters only if they are channel specific 8019 * tc-flower based filter 8020 */ 8021 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 8022 { 8023 struct ice_tc_flower_fltr *fltr; 8024 struct hlist_node *node; 8025 8026 /* to remove all channel filters, iterate an ordered list of filters */ 8027 hlist_for_each_entry_safe(fltr, node, 8028 &pf->tc_flower_fltr_list, 8029 tc_flower_node) { 8030 struct ice_rule_query_data rule; 8031 int status; 8032 8033 /* for now process only channel specific filters */ 8034 if (!ice_is_chnl_fltr(fltr)) 8035 continue; 8036 8037 rule.rid = fltr->rid; 8038 rule.rule_id = fltr->rule_id; 8039 rule.vsi_handle = fltr->dest_id; 8040 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 8041 if (status) { 8042 if (status == -ENOENT) 8043 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 8044 rule.rule_id); 8045 else 8046 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 8047 status); 8048 } else if (fltr->dest_vsi) { 8049 /* update advanced switch filter count */ 8050 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 8051 u32 flags = fltr->flags; 8052 8053 fltr->dest_vsi->num_chnl_fltr--; 8054 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 8055 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 8056 pf->num_dmac_chnl_fltrs--; 8057 } 8058 } 8059 8060 hlist_del(&fltr->tc_flower_node); 8061 kfree(fltr); 8062 } 8063 } 8064 8065 /** 8066 * ice_remove_q_channels - Remove queue channels for the TCs 8067 * @vsi: VSI to be configured 8068 * @rem_fltr: delete advanced switch filter or not 8069 * 8070 * Remove queue channels for the TCs 8071 */ 8072 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 8073 { 8074 struct ice_channel *ch, *ch_tmp; 8075 struct ice_pf *pf = vsi->back; 8076 int i; 8077 8078 /* remove all tc-flower based filter if they are channel filters only */ 8079 if (rem_fltr) 8080 ice_rem_all_chnl_fltrs(pf); 8081 8082 /* remove ntuple filters since queue configuration is being changed */ 8083 if (vsi->netdev->features & NETIF_F_NTUPLE) { 8084 struct ice_hw *hw = &pf->hw; 8085 8086 mutex_lock(&hw->fdir_fltr_lock); 8087 ice_fdir_del_all_fltrs(vsi); 8088 mutex_unlock(&hw->fdir_fltr_lock); 8089 } 8090 8091 /* perform cleanup for channels if they exist */ 8092 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 8093 struct ice_vsi *ch_vsi; 8094 8095 list_del(&ch->list); 8096 ch_vsi = ch->ch_vsi; 8097 if (!ch_vsi) { 8098 kfree(ch); 8099 continue; 8100 } 8101 8102 /* Reset queue contexts */ 8103 for (i = 0; i < ch->num_rxq; i++) { 8104 struct ice_tx_ring *tx_ring; 8105 struct ice_rx_ring *rx_ring; 8106 8107 tx_ring = vsi->tx_rings[ch->base_q + i]; 8108 rx_ring = vsi->rx_rings[ch->base_q + i]; 8109 if (tx_ring) { 8110 tx_ring->ch = NULL; 8111 if (tx_ring->q_vector) 8112 tx_ring->q_vector->ch = NULL; 8113 } 8114 if (rx_ring) { 8115 rx_ring->ch = NULL; 8116 if (rx_ring->q_vector) 8117 rx_ring->q_vector->ch = NULL; 8118 } 8119 } 8120 8121 /* Release FD resources for the channel VSI */ 8122 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 8123 8124 /* clear the VSI from scheduler tree */ 8125 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 8126 8127 /* Delete VSI from FW */ 8128 ice_vsi_delete(ch->ch_vsi); 8129 8130 /* Delete VSI from PF and HW VSI arrays */ 8131 ice_vsi_clear(ch->ch_vsi); 8132 8133 /* free the channel */ 8134 kfree(ch); 8135 } 8136 8137 /* clear the channel VSI map which is stored in main VSI */ 8138 ice_for_each_chnl_tc(i) 8139 vsi->tc_map_vsi[i] = NULL; 8140 8141 /* reset main VSI's all TC information */ 8142 vsi->all_enatc = 0; 8143 vsi->all_numtc = 0; 8144 } 8145 8146 /** 8147 * ice_rebuild_channels - rebuild channel 8148 * @pf: ptr to PF 8149 * 8150 * Recreate channel VSIs and replay filters 8151 */ 8152 static int ice_rebuild_channels(struct ice_pf *pf) 8153 { 8154 struct device *dev = ice_pf_to_dev(pf); 8155 struct ice_vsi *main_vsi; 8156 bool rem_adv_fltr = true; 8157 struct ice_channel *ch; 8158 struct ice_vsi *vsi; 8159 int tc_idx = 1; 8160 int i, err; 8161 8162 main_vsi = ice_get_main_vsi(pf); 8163 if (!main_vsi) 8164 return 0; 8165 8166 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 8167 main_vsi->old_numtc == 1) 8168 return 0; /* nothing to be done */ 8169 8170 /* reconfigure main VSI based on old value of TC and cached values 8171 * for MQPRIO opts 8172 */ 8173 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 8174 if (err) { 8175 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 8176 main_vsi->old_ena_tc, main_vsi->vsi_num); 8177 return err; 8178 } 8179 8180 /* rebuild ADQ VSIs */ 8181 ice_for_each_vsi(pf, i) { 8182 enum ice_vsi_type type; 8183 8184 vsi = pf->vsi[i]; 8185 if (!vsi || vsi->type != ICE_VSI_CHNL) 8186 continue; 8187 8188 type = vsi->type; 8189 8190 /* rebuild ADQ VSI */ 8191 err = ice_vsi_rebuild(vsi, true); 8192 if (err) { 8193 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 8194 ice_vsi_type_str(type), vsi->idx, err); 8195 goto cleanup; 8196 } 8197 8198 /* Re-map HW VSI number, using VSI handle that has been 8199 * previously validated in ice_replay_vsi() call above 8200 */ 8201 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 8202 8203 /* replay filters for the VSI */ 8204 err = ice_replay_vsi(&pf->hw, vsi->idx); 8205 if (err) { 8206 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 8207 ice_vsi_type_str(type), err, vsi->idx); 8208 rem_adv_fltr = false; 8209 goto cleanup; 8210 } 8211 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 8212 ice_vsi_type_str(type), vsi->idx); 8213 8214 /* store ADQ VSI at correct TC index in main VSI's 8215 * map of TC to VSI 8216 */ 8217 main_vsi->tc_map_vsi[tc_idx++] = vsi; 8218 } 8219 8220 /* ADQ VSI(s) has been rebuilt successfully, so setup 8221 * channel for main VSI's Tx and Rx rings 8222 */ 8223 list_for_each_entry(ch, &main_vsi->ch_list, list) { 8224 struct ice_vsi *ch_vsi; 8225 8226 ch_vsi = ch->ch_vsi; 8227 if (!ch_vsi) 8228 continue; 8229 8230 /* reconfig channel resources */ 8231 ice_cfg_chnl_all_res(main_vsi, ch); 8232 8233 /* replay BW rate limit if it is non-zero */ 8234 if (!ch->max_tx_rate && !ch->min_tx_rate) 8235 continue; 8236 8237 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 8238 ch->min_tx_rate); 8239 if (err) 8240 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8241 err, ch->max_tx_rate, ch->min_tx_rate, 8242 ch_vsi->vsi_num); 8243 else 8244 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8245 ch->max_tx_rate, ch->min_tx_rate, 8246 ch_vsi->vsi_num); 8247 } 8248 8249 /* reconfig RSS for main VSI */ 8250 if (main_vsi->ch_rss_size) 8251 ice_vsi_cfg_rss_lut_key(main_vsi); 8252 8253 return 0; 8254 8255 cleanup: 8256 ice_remove_q_channels(main_vsi, rem_adv_fltr); 8257 return err; 8258 } 8259 8260 /** 8261 * ice_create_q_channels - Add queue channel for the given TCs 8262 * @vsi: VSI to be configured 8263 * 8264 * Configures queue channel mapping to the given TCs 8265 */ 8266 static int ice_create_q_channels(struct ice_vsi *vsi) 8267 { 8268 struct ice_pf *pf = vsi->back; 8269 struct ice_channel *ch; 8270 int ret = 0, i; 8271 8272 ice_for_each_chnl_tc(i) { 8273 if (!(vsi->all_enatc & BIT(i))) 8274 continue; 8275 8276 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 8277 if (!ch) { 8278 ret = -ENOMEM; 8279 goto err_free; 8280 } 8281 INIT_LIST_HEAD(&ch->list); 8282 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 8283 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 8284 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 8285 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 8286 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 8287 8288 /* convert to Kbits/s */ 8289 if (ch->max_tx_rate) 8290 ch->max_tx_rate = div_u64(ch->max_tx_rate, 8291 ICE_BW_KBPS_DIVISOR); 8292 if (ch->min_tx_rate) 8293 ch->min_tx_rate = div_u64(ch->min_tx_rate, 8294 ICE_BW_KBPS_DIVISOR); 8295 8296 ret = ice_create_q_channel(vsi, ch); 8297 if (ret) { 8298 dev_err(ice_pf_to_dev(pf), 8299 "failed creating channel TC:%d\n", i); 8300 kfree(ch); 8301 goto err_free; 8302 } 8303 list_add_tail(&ch->list, &vsi->ch_list); 8304 vsi->tc_map_vsi[i] = ch->ch_vsi; 8305 dev_dbg(ice_pf_to_dev(pf), 8306 "successfully created channel: VSI %pK\n", ch->ch_vsi); 8307 } 8308 return 0; 8309 8310 err_free: 8311 ice_remove_q_channels(vsi, false); 8312 8313 return ret; 8314 } 8315 8316 /** 8317 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 8318 * @netdev: net device to configure 8319 * @type_data: TC offload data 8320 */ 8321 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 8322 { 8323 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 8324 struct ice_netdev_priv *np = netdev_priv(netdev); 8325 struct ice_vsi *vsi = np->vsi; 8326 struct ice_pf *pf = vsi->back; 8327 u16 mode, ena_tc_qdisc = 0; 8328 int cur_txq, cur_rxq; 8329 u8 hw = 0, num_tcf; 8330 struct device *dev; 8331 int ret, i; 8332 8333 dev = ice_pf_to_dev(pf); 8334 num_tcf = mqprio_qopt->qopt.num_tc; 8335 hw = mqprio_qopt->qopt.hw; 8336 mode = mqprio_qopt->mode; 8337 if (!hw) { 8338 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8339 vsi->ch_rss_size = 0; 8340 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8341 goto config_tcf; 8342 } 8343 8344 /* Generate queue region map for number of TCF requested */ 8345 for (i = 0; i < num_tcf; i++) 8346 ena_tc_qdisc |= BIT(i); 8347 8348 switch (mode) { 8349 case TC_MQPRIO_MODE_CHANNEL: 8350 8351 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 8352 if (ret) { 8353 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 8354 ret); 8355 return ret; 8356 } 8357 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8358 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8359 /* don't assume state of hw_tc_offload during driver load 8360 * and set the flag for TC flower filter if hw_tc_offload 8361 * already ON 8362 */ 8363 if (vsi->netdev->features & NETIF_F_HW_TC) 8364 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 8365 break; 8366 default: 8367 return -EINVAL; 8368 } 8369 8370 config_tcf: 8371 8372 /* Requesting same TCF configuration as already enabled */ 8373 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 8374 mode != TC_MQPRIO_MODE_CHANNEL) 8375 return 0; 8376 8377 /* Pause VSI queues */ 8378 ice_dis_vsi(vsi, true); 8379 8380 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 8381 ice_remove_q_channels(vsi, true); 8382 8383 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8384 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 8385 num_online_cpus()); 8386 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 8387 num_online_cpus()); 8388 } else { 8389 /* logic to rebuild VSI, same like ethtool -L */ 8390 u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 8391 8392 for (i = 0; i < num_tcf; i++) { 8393 if (!(ena_tc_qdisc & BIT(i))) 8394 continue; 8395 8396 offset = vsi->mqprio_qopt.qopt.offset[i]; 8397 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 8398 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 8399 } 8400 vsi->req_txq = offset + qcount_tx; 8401 vsi->req_rxq = offset + qcount_rx; 8402 8403 /* store away original rss_size info, so that it gets reused 8404 * form ice_vsi_rebuild during tc-qdisc delete stage - to 8405 * determine, what should be the rss_sizefor main VSI 8406 */ 8407 vsi->orig_rss_size = vsi->rss_size; 8408 } 8409 8410 /* save current values of Tx and Rx queues before calling VSI rebuild 8411 * for fallback option 8412 */ 8413 cur_txq = vsi->num_txq; 8414 cur_rxq = vsi->num_rxq; 8415 8416 /* proceed with rebuild main VSI using correct number of queues */ 8417 ret = ice_vsi_rebuild(vsi, false); 8418 if (ret) { 8419 /* fallback to current number of queues */ 8420 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 8421 vsi->req_txq = cur_txq; 8422 vsi->req_rxq = cur_rxq; 8423 clear_bit(ICE_RESET_FAILED, pf->state); 8424 if (ice_vsi_rebuild(vsi, false)) { 8425 dev_err(dev, "Rebuild of main VSI failed again\n"); 8426 return ret; 8427 } 8428 } 8429 8430 vsi->all_numtc = num_tcf; 8431 vsi->all_enatc = ena_tc_qdisc; 8432 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 8433 if (ret) { 8434 netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 8435 vsi->vsi_num); 8436 goto exit; 8437 } 8438 8439 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8440 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 8441 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 8442 8443 /* set TC0 rate limit if specified */ 8444 if (max_tx_rate || min_tx_rate) { 8445 /* convert to Kbits/s */ 8446 if (max_tx_rate) 8447 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 8448 if (min_tx_rate) 8449 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 8450 8451 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 8452 if (!ret) { 8453 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 8454 max_tx_rate, min_tx_rate, vsi->vsi_num); 8455 } else { 8456 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 8457 max_tx_rate, min_tx_rate, vsi->vsi_num); 8458 goto exit; 8459 } 8460 } 8461 ret = ice_create_q_channels(vsi); 8462 if (ret) { 8463 netdev_err(netdev, "failed configuring queue channels\n"); 8464 goto exit; 8465 } else { 8466 netdev_dbg(netdev, "successfully configured channels\n"); 8467 } 8468 } 8469 8470 if (vsi->ch_rss_size) 8471 ice_vsi_cfg_rss_lut_key(vsi); 8472 8473 exit: 8474 /* if error, reset the all_numtc and all_enatc */ 8475 if (ret) { 8476 vsi->all_numtc = 0; 8477 vsi->all_enatc = 0; 8478 } 8479 /* resume VSI */ 8480 ice_ena_vsi(vsi, true); 8481 8482 return ret; 8483 } 8484 8485 static LIST_HEAD(ice_block_cb_list); 8486 8487 static int 8488 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 8489 void *type_data) 8490 { 8491 struct ice_netdev_priv *np = netdev_priv(netdev); 8492 struct ice_pf *pf = np->vsi->back; 8493 int err; 8494 8495 switch (type) { 8496 case TC_SETUP_BLOCK: 8497 return flow_block_cb_setup_simple(type_data, 8498 &ice_block_cb_list, 8499 ice_setup_tc_block_cb, 8500 np, np, true); 8501 case TC_SETUP_QDISC_MQPRIO: 8502 /* setup traffic classifier for receive side */ 8503 mutex_lock(&pf->tc_mutex); 8504 err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 8505 mutex_unlock(&pf->tc_mutex); 8506 return err; 8507 default: 8508 return -EOPNOTSUPP; 8509 } 8510 return -EOPNOTSUPP; 8511 } 8512 8513 static struct ice_indr_block_priv * 8514 ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 8515 struct net_device *netdev) 8516 { 8517 struct ice_indr_block_priv *cb_priv; 8518 8519 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 8520 if (!cb_priv->netdev) 8521 return NULL; 8522 if (cb_priv->netdev == netdev) 8523 return cb_priv; 8524 } 8525 return NULL; 8526 } 8527 8528 static int 8529 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 8530 void *indr_priv) 8531 { 8532 struct ice_indr_block_priv *priv = indr_priv; 8533 struct ice_netdev_priv *np = priv->np; 8534 8535 switch (type) { 8536 case TC_SETUP_CLSFLOWER: 8537 return ice_setup_tc_cls_flower(np, priv->netdev, 8538 (struct flow_cls_offload *) 8539 type_data); 8540 default: 8541 return -EOPNOTSUPP; 8542 } 8543 } 8544 8545 static int 8546 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 8547 struct ice_netdev_priv *np, 8548 struct flow_block_offload *f, void *data, 8549 void (*cleanup)(struct flow_block_cb *block_cb)) 8550 { 8551 struct ice_indr_block_priv *indr_priv; 8552 struct flow_block_cb *block_cb; 8553 8554 if (!ice_is_tunnel_supported(netdev) && 8555 !(is_vlan_dev(netdev) && 8556 vlan_dev_real_dev(netdev) == np->vsi->netdev)) 8557 return -EOPNOTSUPP; 8558 8559 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 8560 return -EOPNOTSUPP; 8561 8562 switch (f->command) { 8563 case FLOW_BLOCK_BIND: 8564 indr_priv = ice_indr_block_priv_lookup(np, netdev); 8565 if (indr_priv) 8566 return -EEXIST; 8567 8568 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 8569 if (!indr_priv) 8570 return -ENOMEM; 8571 8572 indr_priv->netdev = netdev; 8573 indr_priv->np = np; 8574 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 8575 8576 block_cb = 8577 flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 8578 indr_priv, indr_priv, 8579 ice_rep_indr_tc_block_unbind, 8580 f, netdev, sch, data, np, 8581 cleanup); 8582 8583 if (IS_ERR(block_cb)) { 8584 list_del(&indr_priv->list); 8585 kfree(indr_priv); 8586 return PTR_ERR(block_cb); 8587 } 8588 flow_block_cb_add(block_cb, f); 8589 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 8590 break; 8591 case FLOW_BLOCK_UNBIND: 8592 indr_priv = ice_indr_block_priv_lookup(np, netdev); 8593 if (!indr_priv) 8594 return -ENOENT; 8595 8596 block_cb = flow_block_cb_lookup(f->block, 8597 ice_indr_setup_block_cb, 8598 indr_priv); 8599 if (!block_cb) 8600 return -ENOENT; 8601 8602 flow_indr_block_cb_remove(block_cb, f); 8603 8604 list_del(&block_cb->driver_list); 8605 break; 8606 default: 8607 return -EOPNOTSUPP; 8608 } 8609 return 0; 8610 } 8611 8612 static int 8613 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 8614 void *cb_priv, enum tc_setup_type type, void *type_data, 8615 void *data, 8616 void (*cleanup)(struct flow_block_cb *block_cb)) 8617 { 8618 switch (type) { 8619 case TC_SETUP_BLOCK: 8620 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 8621 data, cleanup); 8622 8623 default: 8624 return -EOPNOTSUPP; 8625 } 8626 } 8627 8628 /** 8629 * ice_open - Called when a network interface becomes active 8630 * @netdev: network interface device structure 8631 * 8632 * The open entry point is called when a network interface is made 8633 * active by the system (IFF_UP). At this point all resources needed 8634 * for transmit and receive operations are allocated, the interrupt 8635 * handler is registered with the OS, the netdev watchdog is enabled, 8636 * and the stack is notified that the interface is ready. 8637 * 8638 * Returns 0 on success, negative value on failure 8639 */ 8640 int ice_open(struct net_device *netdev) 8641 { 8642 struct ice_netdev_priv *np = netdev_priv(netdev); 8643 struct ice_pf *pf = np->vsi->back; 8644 8645 if (ice_is_reset_in_progress(pf->state)) { 8646 netdev_err(netdev, "can't open net device while reset is in progress"); 8647 return -EBUSY; 8648 } 8649 8650 return ice_open_internal(netdev); 8651 } 8652 8653 /** 8654 * ice_open_internal - Called when a network interface becomes active 8655 * @netdev: network interface device structure 8656 * 8657 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 8658 * handling routine 8659 * 8660 * Returns 0 on success, negative value on failure 8661 */ 8662 int ice_open_internal(struct net_device *netdev) 8663 { 8664 struct ice_netdev_priv *np = netdev_priv(netdev); 8665 struct ice_vsi *vsi = np->vsi; 8666 struct ice_pf *pf = vsi->back; 8667 struct ice_port_info *pi; 8668 int err; 8669 8670 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 8671 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 8672 return -EIO; 8673 } 8674 8675 netif_carrier_off(netdev); 8676 8677 pi = vsi->port_info; 8678 err = ice_update_link_info(pi); 8679 if (err) { 8680 netdev_err(netdev, "Failed to get link info, error %d\n", err); 8681 return err; 8682 } 8683 8684 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 8685 8686 /* Set PHY if there is media, otherwise, turn off PHY */ 8687 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 8688 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8689 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 8690 err = ice_init_phy_user_cfg(pi); 8691 if (err) { 8692 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 8693 err); 8694 return err; 8695 } 8696 } 8697 8698 err = ice_configure_phy(vsi); 8699 if (err) { 8700 netdev_err(netdev, "Failed to set physical link up, error %d\n", 8701 err); 8702 return err; 8703 } 8704 } else { 8705 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8706 ice_set_link(vsi, false); 8707 } 8708 8709 err = ice_vsi_open(vsi); 8710 if (err) 8711 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 8712 vsi->vsi_num, vsi->vsw->sw_id); 8713 8714 /* Update existing tunnels information */ 8715 udp_tunnel_get_rx_info(netdev); 8716 8717 return err; 8718 } 8719 8720 /** 8721 * ice_stop - Disables a network interface 8722 * @netdev: network interface device structure 8723 * 8724 * The stop entry point is called when an interface is de-activated by the OS, 8725 * and the netdevice enters the DOWN state. The hardware is still under the 8726 * driver's control, but the netdev interface is disabled. 8727 * 8728 * Returns success only - not allowed to fail 8729 */ 8730 int ice_stop(struct net_device *netdev) 8731 { 8732 struct ice_netdev_priv *np = netdev_priv(netdev); 8733 struct ice_vsi *vsi = np->vsi; 8734 struct ice_pf *pf = vsi->back; 8735 8736 if (ice_is_reset_in_progress(pf->state)) { 8737 netdev_err(netdev, "can't stop net device while reset is in progress"); 8738 return -EBUSY; 8739 } 8740 8741 ice_vsi_close(vsi); 8742 8743 return 0; 8744 } 8745 8746 /** 8747 * ice_features_check - Validate encapsulated packet conforms to limits 8748 * @skb: skb buffer 8749 * @netdev: This port's netdev 8750 * @features: Offload features that the stack believes apply 8751 */ 8752 static netdev_features_t 8753 ice_features_check(struct sk_buff *skb, 8754 struct net_device __always_unused *netdev, 8755 netdev_features_t features) 8756 { 8757 bool gso = skb_is_gso(skb); 8758 size_t len; 8759 8760 /* No point in doing any of this if neither checksum nor GSO are 8761 * being requested for this frame. We can rule out both by just 8762 * checking for CHECKSUM_PARTIAL 8763 */ 8764 if (skb->ip_summed != CHECKSUM_PARTIAL) 8765 return features; 8766 8767 /* We cannot support GSO if the MSS is going to be less than 8768 * 64 bytes. If it is then we need to drop support for GSO. 8769 */ 8770 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 8771 features &= ~NETIF_F_GSO_MASK; 8772 8773 len = skb_network_offset(skb); 8774 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 8775 goto out_rm_features; 8776 8777 len = skb_network_header_len(skb); 8778 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 8779 goto out_rm_features; 8780 8781 if (skb->encapsulation) { 8782 /* this must work for VXLAN frames AND IPIP/SIT frames, and in 8783 * the case of IPIP frames, the transport header pointer is 8784 * after the inner header! So check to make sure that this 8785 * is a GRE or UDP_TUNNEL frame before doing that math. 8786 */ 8787 if (gso && (skb_shinfo(skb)->gso_type & 8788 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 8789 len = skb_inner_network_header(skb) - 8790 skb_transport_header(skb); 8791 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 8792 goto out_rm_features; 8793 } 8794 8795 len = skb_inner_network_header_len(skb); 8796 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 8797 goto out_rm_features; 8798 } 8799 8800 return features; 8801 out_rm_features: 8802 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 8803 } 8804 8805 static const struct net_device_ops ice_netdev_safe_mode_ops = { 8806 .ndo_open = ice_open, 8807 .ndo_stop = ice_stop, 8808 .ndo_start_xmit = ice_start_xmit, 8809 .ndo_set_mac_address = ice_set_mac_address, 8810 .ndo_validate_addr = eth_validate_addr, 8811 .ndo_change_mtu = ice_change_mtu, 8812 .ndo_get_stats64 = ice_get_stats64, 8813 .ndo_tx_timeout = ice_tx_timeout, 8814 .ndo_bpf = ice_xdp_safe_mode, 8815 }; 8816 8817 static const struct net_device_ops ice_netdev_ops = { 8818 .ndo_open = ice_open, 8819 .ndo_stop = ice_stop, 8820 .ndo_start_xmit = ice_start_xmit, 8821 .ndo_select_queue = ice_select_queue, 8822 .ndo_features_check = ice_features_check, 8823 .ndo_fix_features = ice_fix_features, 8824 .ndo_set_rx_mode = ice_set_rx_mode, 8825 .ndo_set_mac_address = ice_set_mac_address, 8826 .ndo_validate_addr = eth_validate_addr, 8827 .ndo_change_mtu = ice_change_mtu, 8828 .ndo_get_stats64 = ice_get_stats64, 8829 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 8830 .ndo_eth_ioctl = ice_eth_ioctl, 8831 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 8832 .ndo_set_vf_mac = ice_set_vf_mac, 8833 .ndo_get_vf_config = ice_get_vf_cfg, 8834 .ndo_set_vf_trust = ice_set_vf_trust, 8835 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 8836 .ndo_set_vf_link_state = ice_set_vf_link_state, 8837 .ndo_get_vf_stats = ice_get_vf_stats, 8838 .ndo_set_vf_rate = ice_set_vf_bw, 8839 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 8840 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 8841 .ndo_setup_tc = ice_setup_tc, 8842 .ndo_set_features = ice_set_features, 8843 .ndo_bridge_getlink = ice_bridge_getlink, 8844 .ndo_bridge_setlink = ice_bridge_setlink, 8845 .ndo_fdb_add = ice_fdb_add, 8846 .ndo_fdb_del = ice_fdb_del, 8847 #ifdef CONFIG_RFS_ACCEL 8848 .ndo_rx_flow_steer = ice_rx_flow_steer, 8849 #endif 8850 .ndo_tx_timeout = ice_tx_timeout, 8851 .ndo_bpf = ice_xdp, 8852 .ndo_xdp_xmit = ice_xdp_xmit, 8853 .ndo_xsk_wakeup = ice_xsk_wakeup, 8854 }; 8855