1837f08fdSAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2837f08fdSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3837f08fdSAnirudh Venkataramanan 4837f08fdSAnirudh Venkataramanan /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5837f08fdSAnirudh Venkataramanan 6837f08fdSAnirudh Venkataramanan #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7837f08fdSAnirudh Venkataramanan 8837f08fdSAnirudh Venkataramanan #include "ice.h" 9eff380aaSAnirudh Venkataramanan #include "ice_base.h" 1045d3d428SAnirudh Venkataramanan #include "ice_lib.h" 1137b6f646SAnirudh Venkataramanan #include "ice_dcb_lib.h" 12b94b013eSDave Ertman #include "ice_dcb_nl.h" 13837f08fdSAnirudh Venkataramanan 14e3710a01SPaul M Stillwell Jr #define DRV_VERSION_MAJOR 0 152de12566STony Nguyen #define DRV_VERSION_MINOR 8 1618a8d358STony Nguyen #define DRV_VERSION_BUILD 2 17e3710a01SPaul M Stillwell Jr 18e3710a01SPaul M Stillwell Jr #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 19e3710a01SPaul M Stillwell Jr __stringify(DRV_VERSION_MINOR) "." \ 20e3710a01SPaul M Stillwell Jr __stringify(DRV_VERSION_BUILD) "-k" 21837f08fdSAnirudh Venkataramanan #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 22fcea6f3dSAnirudh Venkataramanan const char ice_drv_ver[] = DRV_VERSION; 23837f08fdSAnirudh Venkataramanan static const char ice_driver_string[] = DRV_SUMMARY; 24837f08fdSAnirudh Venkataramanan static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 25837f08fdSAnirudh Venkataramanan 26462acf6aSTony Nguyen /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 27462acf6aSTony Nguyen #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 28462acf6aSTony Nguyen #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 29462acf6aSTony Nguyen 30837f08fdSAnirudh Venkataramanan MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 31837f08fdSAnirudh Venkataramanan MODULE_DESCRIPTION(DRV_SUMMARY); 3298674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2"); 33837f08fdSAnirudh Venkataramanan MODULE_VERSION(DRV_VERSION); 34462acf6aSTony Nguyen MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 35837f08fdSAnirudh Venkataramanan 36837f08fdSAnirudh Venkataramanan static int debug = -1; 37837f08fdSAnirudh Venkataramanan module_param(debug, int, 0644); 387ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG 397ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 407ec59eeaSAnirudh Venkataramanan #else 417ec59eeaSAnirudh Venkataramanan MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 427ec59eeaSAnirudh Venkataramanan #endif /* !CONFIG_DYNAMIC_DEBUG */ 43837f08fdSAnirudh Venkataramanan 44940b61afSAnirudh Venkataramanan static struct workqueue_struct *ice_wq; 45462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops; 46cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops; 4787324e74SHenry Tieman static int ice_vsi_open(struct ice_vsi *vsi); 48940b61afSAnirudh Venkataramanan 49462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 5028c2a645SAnirudh Venkataramanan 510f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf); 523a858ba3SAnirudh Venkataramanan 533a858ba3SAnirudh Venkataramanan /** 54b3969fd7SSudheer Mogilappagari * ice_get_tx_pending - returns number of Tx descriptors not processed 55b3969fd7SSudheer Mogilappagari * @ring: the ring of descriptors 56b3969fd7SSudheer Mogilappagari */ 57c1ddf1f5SBrett Creeley static u16 ice_get_tx_pending(struct ice_ring *ring) 58b3969fd7SSudheer Mogilappagari { 59c1ddf1f5SBrett Creeley u16 head, tail; 60b3969fd7SSudheer Mogilappagari 61b3969fd7SSudheer Mogilappagari head = ring->next_to_clean; 62c1ddf1f5SBrett Creeley tail = ring->next_to_use; 63b3969fd7SSudheer Mogilappagari 64b3969fd7SSudheer Mogilappagari if (head != tail) 65b3969fd7SSudheer Mogilappagari return (head < tail) ? 66b3969fd7SSudheer Mogilappagari tail - head : (tail + ring->count - head); 67b3969fd7SSudheer Mogilappagari return 0; 68b3969fd7SSudheer Mogilappagari } 69b3969fd7SSudheer Mogilappagari 70b3969fd7SSudheer Mogilappagari /** 71b3969fd7SSudheer Mogilappagari * ice_check_for_hang_subtask - check for and recover hung queues 72b3969fd7SSudheer Mogilappagari * @pf: pointer to PF struct 73b3969fd7SSudheer Mogilappagari */ 74b3969fd7SSudheer Mogilappagari static void ice_check_for_hang_subtask(struct ice_pf *pf) 75b3969fd7SSudheer Mogilappagari { 76b3969fd7SSudheer Mogilappagari struct ice_vsi *vsi = NULL; 77e89e899fSBrett Creeley struct ice_hw *hw; 78b3969fd7SSudheer Mogilappagari unsigned int i; 79b3969fd7SSudheer Mogilappagari int packets; 80e89e899fSBrett Creeley u32 v; 81b3969fd7SSudheer Mogilappagari 82b3969fd7SSudheer Mogilappagari ice_for_each_vsi(pf, v) 83b3969fd7SSudheer Mogilappagari if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 84b3969fd7SSudheer Mogilappagari vsi = pf->vsi[v]; 85b3969fd7SSudheer Mogilappagari break; 86b3969fd7SSudheer Mogilappagari } 87b3969fd7SSudheer Mogilappagari 88b3969fd7SSudheer Mogilappagari if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 89b3969fd7SSudheer Mogilappagari return; 90b3969fd7SSudheer Mogilappagari 91b3969fd7SSudheer Mogilappagari if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 92b3969fd7SSudheer Mogilappagari return; 93b3969fd7SSudheer Mogilappagari 94e89e899fSBrett Creeley hw = &vsi->back->hw; 95e89e899fSBrett Creeley 96b3969fd7SSudheer Mogilappagari for (i = 0; i < vsi->num_txq; i++) { 97b3969fd7SSudheer Mogilappagari struct ice_ring *tx_ring = vsi->tx_rings[i]; 98b3969fd7SSudheer Mogilappagari 99b3969fd7SSudheer Mogilappagari if (tx_ring && tx_ring->desc) { 100b3969fd7SSudheer Mogilappagari /* If packet counter has not changed the queue is 101b3969fd7SSudheer Mogilappagari * likely stalled, so force an interrupt for this 102b3969fd7SSudheer Mogilappagari * queue. 103b3969fd7SSudheer Mogilappagari * 104b3969fd7SSudheer Mogilappagari * prev_pkt would be negative if there was no 105b3969fd7SSudheer Mogilappagari * pending work. 106b3969fd7SSudheer Mogilappagari */ 107b3969fd7SSudheer Mogilappagari packets = tx_ring->stats.pkts & INT_MAX; 108b3969fd7SSudheer Mogilappagari if (tx_ring->tx_stats.prev_pkt == packets) { 109b3969fd7SSudheer Mogilappagari /* Trigger sw interrupt to revive the queue */ 110e89e899fSBrett Creeley ice_trigger_sw_intr(hw, tx_ring->q_vector); 111b3969fd7SSudheer Mogilappagari continue; 112b3969fd7SSudheer Mogilappagari } 113b3969fd7SSudheer Mogilappagari 114b3969fd7SSudheer Mogilappagari /* Memory barrier between read of packet count and call 115b3969fd7SSudheer Mogilappagari * to ice_get_tx_pending() 116b3969fd7SSudheer Mogilappagari */ 117b3969fd7SSudheer Mogilappagari smp_rmb(); 118b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = 119b3969fd7SSudheer Mogilappagari ice_get_tx_pending(tx_ring) ? packets : -1; 120b3969fd7SSudheer Mogilappagari } 121b3969fd7SSudheer Mogilappagari } 122b3969fd7SSudheer Mogilappagari } 123b3969fd7SSudheer Mogilappagari 124b3969fd7SSudheer Mogilappagari /** 125561f4379STony Nguyen * ice_init_mac_fltr - Set initial MAC filters 126561f4379STony Nguyen * @pf: board private structure 127561f4379STony Nguyen * 1282f2da36eSAnirudh Venkataramanan * Set initial set of MAC filters for PF VSI; configure filters for permanent 129561f4379STony Nguyen * address and broadcast address. If an error is encountered, netdevice will be 130561f4379STony Nguyen * unregistered. 131561f4379STony Nguyen */ 132561f4379STony Nguyen static int ice_init_mac_fltr(struct ice_pf *pf) 133561f4379STony Nguyen { 134bbb968e8SAkeem G Abodunrin enum ice_status status; 135561f4379STony Nguyen u8 broadcast[ETH_ALEN]; 136561f4379STony Nguyen struct ice_vsi *vsi; 137561f4379STony Nguyen 138208ff751SAnirudh Venkataramanan vsi = ice_get_main_vsi(pf); 139561f4379STony Nguyen if (!vsi) 140561f4379STony Nguyen return -EINVAL; 141561f4379STony Nguyen 142561f4379STony Nguyen /* To add a MAC filter, first add the MAC to a list and then 143561f4379STony Nguyen * pass the list to ice_add_mac. 144561f4379STony Nguyen */ 145561f4379STony Nguyen 146561f4379STony Nguyen /* Add a unicast MAC filter so the VSI can get its packets */ 147bbb968e8SAkeem G Abodunrin status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true); 148561f4379STony Nguyen if (status) 149561f4379STony Nguyen goto unregister; 150561f4379STony Nguyen 151561f4379STony Nguyen /* VSI needs to receive broadcast traffic, so add the broadcast 152561f4379STony Nguyen * MAC address to the list as well. 153561f4379STony Nguyen */ 154561f4379STony Nguyen eth_broadcast_addr(broadcast); 155bbb968e8SAkeem G Abodunrin status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true); 156561f4379STony Nguyen if (status) 157bbb968e8SAkeem G Abodunrin goto unregister; 158561f4379STony Nguyen 159bbb968e8SAkeem G Abodunrin return 0; 160561f4379STony Nguyen unregister: 161561f4379STony Nguyen /* We aren't useful with no MAC filters, so unregister if we 162561f4379STony Nguyen * had an error 163561f4379STony Nguyen */ 164561f4379STony Nguyen if (status && vsi->netdev->reg_state == NETREG_REGISTERED) { 16519cce2c6SAnirudh Venkataramanan dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n", 166561f4379STony Nguyen status); 167561f4379STony Nguyen unregister_netdev(vsi->netdev); 168561f4379STony Nguyen free_netdev(vsi->netdev); 169561f4379STony Nguyen vsi->netdev = NULL; 170561f4379STony Nguyen } 171561f4379STony Nguyen 172bbb968e8SAkeem G Abodunrin return -EIO; 173561f4379STony Nguyen } 174561f4379STony Nguyen 175561f4379STony Nguyen /** 176f9867df6SAnirudh Venkataramanan * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 177e94d4478SAnirudh Venkataramanan * @netdev: the net device on which the sync is happening 178f9867df6SAnirudh Venkataramanan * @addr: MAC address to sync 179e94d4478SAnirudh Venkataramanan * 180e94d4478SAnirudh Venkataramanan * This is a callback function which is called by the in kernel device sync 181e94d4478SAnirudh Venkataramanan * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 182e94d4478SAnirudh Venkataramanan * populates the tmp_sync_list, which is later used by ice_add_mac to add the 183f9867df6SAnirudh Venkataramanan * MAC filters from the hardware. 184e94d4478SAnirudh Venkataramanan */ 185e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 186e94d4478SAnirudh Venkataramanan { 187e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 188e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 189e94d4478SAnirudh Venkataramanan 190e94d4478SAnirudh Venkataramanan if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) 191e94d4478SAnirudh Venkataramanan return -EINVAL; 192e94d4478SAnirudh Venkataramanan 193e94d4478SAnirudh Venkataramanan return 0; 194e94d4478SAnirudh Venkataramanan } 195e94d4478SAnirudh Venkataramanan 196e94d4478SAnirudh Venkataramanan /** 197f9867df6SAnirudh Venkataramanan * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 198e94d4478SAnirudh Venkataramanan * @netdev: the net device on which the unsync is happening 199f9867df6SAnirudh Venkataramanan * @addr: MAC address to unsync 200e94d4478SAnirudh Venkataramanan * 201e94d4478SAnirudh Venkataramanan * This is a callback function which is called by the in kernel device unsync 202e94d4478SAnirudh Venkataramanan * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 203e94d4478SAnirudh Venkataramanan * populates the tmp_unsync_list, which is later used by ice_remove_mac to 204f9867df6SAnirudh Venkataramanan * delete the MAC filters from the hardware. 205e94d4478SAnirudh Venkataramanan */ 206e94d4478SAnirudh Venkataramanan static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 207e94d4478SAnirudh Venkataramanan { 208e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 209e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 210e94d4478SAnirudh Venkataramanan 211e94d4478SAnirudh Venkataramanan if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) 212e94d4478SAnirudh Venkataramanan return -EINVAL; 213e94d4478SAnirudh Venkataramanan 214e94d4478SAnirudh Venkataramanan return 0; 215e94d4478SAnirudh Venkataramanan } 216e94d4478SAnirudh Venkataramanan 217e94d4478SAnirudh Venkataramanan /** 218e94d4478SAnirudh Venkataramanan * ice_vsi_fltr_changed - check if filter state changed 219e94d4478SAnirudh Venkataramanan * @vsi: VSI to be checked 220e94d4478SAnirudh Venkataramanan * 221e94d4478SAnirudh Venkataramanan * returns true if filter state has changed, false otherwise. 222e94d4478SAnirudh Venkataramanan */ 223e94d4478SAnirudh Venkataramanan static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 224e94d4478SAnirudh Venkataramanan { 225e94d4478SAnirudh Venkataramanan return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || 226e94d4478SAnirudh Venkataramanan test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || 227e94d4478SAnirudh Venkataramanan test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 228e94d4478SAnirudh Venkataramanan } 229e94d4478SAnirudh Venkataramanan 230e94d4478SAnirudh Venkataramanan /** 2315eda8afdSAkeem G Abodunrin * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF 2325eda8afdSAkeem G Abodunrin * @vsi: the VSI being configured 2335eda8afdSAkeem G Abodunrin * @promisc_m: mask of promiscuous config bits 2345eda8afdSAkeem G Abodunrin * @set_promisc: enable or disable promisc flag request 2355eda8afdSAkeem G Abodunrin * 2365eda8afdSAkeem G Abodunrin */ 2375eda8afdSAkeem G Abodunrin static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) 2385eda8afdSAkeem G Abodunrin { 2395eda8afdSAkeem G Abodunrin struct ice_hw *hw = &vsi->back->hw; 2405eda8afdSAkeem G Abodunrin enum ice_status status = 0; 2415eda8afdSAkeem G Abodunrin 2425eda8afdSAkeem G Abodunrin if (vsi->type != ICE_VSI_PF) 2435eda8afdSAkeem G Abodunrin return 0; 2445eda8afdSAkeem G Abodunrin 2455eda8afdSAkeem G Abodunrin if (vsi->vlan_ena) { 2465eda8afdSAkeem G Abodunrin status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, 2475eda8afdSAkeem G Abodunrin set_promisc); 2485eda8afdSAkeem G Abodunrin } else { 2495eda8afdSAkeem G Abodunrin if (set_promisc) 2505eda8afdSAkeem G Abodunrin status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, 2515eda8afdSAkeem G Abodunrin 0); 2525eda8afdSAkeem G Abodunrin else 2535eda8afdSAkeem G Abodunrin status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, 2545eda8afdSAkeem G Abodunrin 0); 2555eda8afdSAkeem G Abodunrin } 2565eda8afdSAkeem G Abodunrin 2575eda8afdSAkeem G Abodunrin if (status) 2585eda8afdSAkeem G Abodunrin return -EIO; 2595eda8afdSAkeem G Abodunrin 2605eda8afdSAkeem G Abodunrin return 0; 2615eda8afdSAkeem G Abodunrin } 2625eda8afdSAkeem G Abodunrin 2635eda8afdSAkeem G Abodunrin /** 264e94d4478SAnirudh Venkataramanan * ice_vsi_sync_fltr - Update the VSI filter list to the HW 265e94d4478SAnirudh Venkataramanan * @vsi: ptr to the VSI 266e94d4478SAnirudh Venkataramanan * 267e94d4478SAnirudh Venkataramanan * Push any outstanding VSI filter changes through the AdminQ. 268e94d4478SAnirudh Venkataramanan */ 269e94d4478SAnirudh Venkataramanan static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 270e94d4478SAnirudh Venkataramanan { 2719a946843SAnirudh Venkataramanan struct device *dev = ice_pf_to_dev(vsi->back); 272e94d4478SAnirudh Venkataramanan struct net_device *netdev = vsi->netdev; 273e94d4478SAnirudh Venkataramanan bool promisc_forced_on = false; 274e94d4478SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 275e94d4478SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 276e94d4478SAnirudh Venkataramanan enum ice_status status = 0; 277e94d4478SAnirudh Venkataramanan u32 changed_flags = 0; 2785eda8afdSAkeem G Abodunrin u8 promisc_m; 279e94d4478SAnirudh Venkataramanan int err = 0; 280e94d4478SAnirudh Venkataramanan 281e94d4478SAnirudh Venkataramanan if (!vsi->netdev) 282e94d4478SAnirudh Venkataramanan return -EINVAL; 283e94d4478SAnirudh Venkataramanan 284e94d4478SAnirudh Venkataramanan while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) 285e94d4478SAnirudh Venkataramanan usleep_range(1000, 2000); 286e94d4478SAnirudh Venkataramanan 287e94d4478SAnirudh Venkataramanan changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 288e94d4478SAnirudh Venkataramanan vsi->current_netdev_flags = vsi->netdev->flags; 289e94d4478SAnirudh Venkataramanan 290e94d4478SAnirudh Venkataramanan INIT_LIST_HEAD(&vsi->tmp_sync_list); 291e94d4478SAnirudh Venkataramanan INIT_LIST_HEAD(&vsi->tmp_unsync_list); 292e94d4478SAnirudh Venkataramanan 293e94d4478SAnirudh Venkataramanan if (ice_vsi_fltr_changed(vsi)) { 294e94d4478SAnirudh Venkataramanan clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 295e94d4478SAnirudh Venkataramanan clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 296e94d4478SAnirudh Venkataramanan clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 297e94d4478SAnirudh Venkataramanan 298e94d4478SAnirudh Venkataramanan /* grab the netdev's addr_list_lock */ 299e94d4478SAnirudh Venkataramanan netif_addr_lock_bh(netdev); 300e94d4478SAnirudh Venkataramanan __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 301e94d4478SAnirudh Venkataramanan ice_add_mac_to_unsync_list); 302e94d4478SAnirudh Venkataramanan __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 303e94d4478SAnirudh Venkataramanan ice_add_mac_to_unsync_list); 304e94d4478SAnirudh Venkataramanan /* our temp lists are populated. release lock */ 305e94d4478SAnirudh Venkataramanan netif_addr_unlock_bh(netdev); 306e94d4478SAnirudh Venkataramanan } 307e94d4478SAnirudh Venkataramanan 308f9867df6SAnirudh Venkataramanan /* Remove MAC addresses in the unsync list */ 309e94d4478SAnirudh Venkataramanan status = ice_remove_mac(hw, &vsi->tmp_unsync_list); 310e94d4478SAnirudh Venkataramanan ice_free_fltr_list(dev, &vsi->tmp_unsync_list); 311e94d4478SAnirudh Venkataramanan if (status) { 312e94d4478SAnirudh Venkataramanan netdev_err(netdev, "Failed to delete MAC filters\n"); 313e94d4478SAnirudh Venkataramanan /* if we failed because of alloc failures, just bail */ 314e94d4478SAnirudh Venkataramanan if (status == ICE_ERR_NO_MEMORY) { 315e94d4478SAnirudh Venkataramanan err = -ENOMEM; 316e94d4478SAnirudh Venkataramanan goto out; 317e94d4478SAnirudh Venkataramanan } 318e94d4478SAnirudh Venkataramanan } 319e94d4478SAnirudh Venkataramanan 320f9867df6SAnirudh Venkataramanan /* Add MAC addresses in the sync list */ 321e94d4478SAnirudh Venkataramanan status = ice_add_mac(hw, &vsi->tmp_sync_list); 322e94d4478SAnirudh Venkataramanan ice_free_fltr_list(dev, &vsi->tmp_sync_list); 32389f3e4a5SPreethi Banala /* If filter is added successfully or already exists, do not go into 32489f3e4a5SPreethi Banala * 'if' condition and report it as error. Instead continue processing 32589f3e4a5SPreethi Banala * rest of the function. 32689f3e4a5SPreethi Banala */ 32789f3e4a5SPreethi Banala if (status && status != ICE_ERR_ALREADY_EXISTS) { 328e94d4478SAnirudh Venkataramanan netdev_err(netdev, "Failed to add MAC filters\n"); 329f9867df6SAnirudh Venkataramanan /* If there is no more space for new umac filters, VSI 330e94d4478SAnirudh Venkataramanan * should go into promiscuous mode. There should be some 331e94d4478SAnirudh Venkataramanan * space reserved for promiscuous filters. 332e94d4478SAnirudh Venkataramanan */ 333e94d4478SAnirudh Venkataramanan if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 334e94d4478SAnirudh Venkataramanan !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, 335e94d4478SAnirudh Venkataramanan vsi->state)) { 336e94d4478SAnirudh Venkataramanan promisc_forced_on = true; 33719cce2c6SAnirudh Venkataramanan netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 338e94d4478SAnirudh Venkataramanan vsi->vsi_num); 339e94d4478SAnirudh Venkataramanan } else { 340e94d4478SAnirudh Venkataramanan err = -EIO; 341e94d4478SAnirudh Venkataramanan goto out; 342e94d4478SAnirudh Venkataramanan } 343e94d4478SAnirudh Venkataramanan } 344e94d4478SAnirudh Venkataramanan /* check for changes in promiscuous modes */ 3455eda8afdSAkeem G Abodunrin if (changed_flags & IFF_ALLMULTI) { 3465eda8afdSAkeem G Abodunrin if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3475eda8afdSAkeem G Abodunrin if (vsi->vlan_ena) 3485eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 3495eda8afdSAkeem G Abodunrin else 3505eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_PROMISC_BITS; 3515eda8afdSAkeem G Abodunrin 3525eda8afdSAkeem G Abodunrin err = ice_cfg_promisc(vsi, promisc_m, true); 3535eda8afdSAkeem G Abodunrin if (err) { 3545eda8afdSAkeem G Abodunrin netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", 3555eda8afdSAkeem G Abodunrin vsi->vsi_num); 3565eda8afdSAkeem G Abodunrin vsi->current_netdev_flags &= ~IFF_ALLMULTI; 3575eda8afdSAkeem G Abodunrin goto out_promisc; 3585eda8afdSAkeem G Abodunrin } 3595eda8afdSAkeem G Abodunrin } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { 3605eda8afdSAkeem G Abodunrin if (vsi->vlan_ena) 3615eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 3625eda8afdSAkeem G Abodunrin else 3635eda8afdSAkeem G Abodunrin promisc_m = ICE_MCAST_PROMISC_BITS; 3645eda8afdSAkeem G Abodunrin 3655eda8afdSAkeem G Abodunrin err = ice_cfg_promisc(vsi, promisc_m, false); 3665eda8afdSAkeem G Abodunrin if (err) { 3675eda8afdSAkeem G Abodunrin netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", 3685eda8afdSAkeem G Abodunrin vsi->vsi_num); 3695eda8afdSAkeem G Abodunrin vsi->current_netdev_flags |= IFF_ALLMULTI; 3705eda8afdSAkeem G Abodunrin goto out_promisc; 3715eda8afdSAkeem G Abodunrin } 3725eda8afdSAkeem G Abodunrin } 3735eda8afdSAkeem G Abodunrin } 374e94d4478SAnirudh Venkataramanan 375e94d4478SAnirudh Venkataramanan if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 376e94d4478SAnirudh Venkataramanan test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { 377e94d4478SAnirudh Venkataramanan clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 378e94d4478SAnirudh Venkataramanan if (vsi->current_netdev_flags & IFF_PROMISC) { 379f9867df6SAnirudh Venkataramanan /* Apply Rx filter rule to get traffic from wire */ 380fc0f39bcSBrett Creeley if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { 381fc0f39bcSBrett Creeley err = ice_set_dflt_vsi(pf->first_sw, vsi); 382fc0f39bcSBrett Creeley if (err && err != -EEXIST) { 38319cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 384fc0f39bcSBrett Creeley err, vsi->vsi_num); 385fc0f39bcSBrett Creeley vsi->current_netdev_flags &= 386fc0f39bcSBrett Creeley ~IFF_PROMISC; 387e94d4478SAnirudh Venkataramanan goto out_promisc; 388e94d4478SAnirudh Venkataramanan } 389fc0f39bcSBrett Creeley } 390e94d4478SAnirudh Venkataramanan } else { 391f9867df6SAnirudh Venkataramanan /* Clear Rx filter to remove traffic from wire */ 392fc0f39bcSBrett Creeley if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { 393fc0f39bcSBrett Creeley err = ice_clear_dflt_vsi(pf->first_sw); 394fc0f39bcSBrett Creeley if (err) { 39519cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 396fc0f39bcSBrett Creeley err, vsi->vsi_num); 397fc0f39bcSBrett Creeley vsi->current_netdev_flags |= 398fc0f39bcSBrett Creeley IFF_PROMISC; 399e94d4478SAnirudh Venkataramanan goto out_promisc; 400e94d4478SAnirudh Venkataramanan } 401e94d4478SAnirudh Venkataramanan } 402e94d4478SAnirudh Venkataramanan } 403fc0f39bcSBrett Creeley } 404e94d4478SAnirudh Venkataramanan goto exit; 405e94d4478SAnirudh Venkataramanan 406e94d4478SAnirudh Venkataramanan out_promisc: 407e94d4478SAnirudh Venkataramanan set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 408e94d4478SAnirudh Venkataramanan goto exit; 409e94d4478SAnirudh Venkataramanan out: 410e94d4478SAnirudh Venkataramanan /* if something went wrong then set the changed flag so we try again */ 411e94d4478SAnirudh Venkataramanan set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 412e94d4478SAnirudh Venkataramanan set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 413e94d4478SAnirudh Venkataramanan exit: 414e94d4478SAnirudh Venkataramanan clear_bit(__ICE_CFG_BUSY, vsi->state); 415e94d4478SAnirudh Venkataramanan return err; 416e94d4478SAnirudh Venkataramanan } 417e94d4478SAnirudh Venkataramanan 418e94d4478SAnirudh Venkataramanan /** 419e94d4478SAnirudh Venkataramanan * ice_sync_fltr_subtask - Sync the VSI filter list with HW 420e94d4478SAnirudh Venkataramanan * @pf: board private structure 421e94d4478SAnirudh Venkataramanan */ 422e94d4478SAnirudh Venkataramanan static void ice_sync_fltr_subtask(struct ice_pf *pf) 423e94d4478SAnirudh Venkataramanan { 424e94d4478SAnirudh Venkataramanan int v; 425e94d4478SAnirudh Venkataramanan 426e94d4478SAnirudh Venkataramanan if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 427e94d4478SAnirudh Venkataramanan return; 428e94d4478SAnirudh Venkataramanan 429e94d4478SAnirudh Venkataramanan clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 430e94d4478SAnirudh Venkataramanan 43180ed404aSBrett Creeley ice_for_each_vsi(pf, v) 432e94d4478SAnirudh Venkataramanan if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 433e94d4478SAnirudh Venkataramanan ice_vsi_sync_fltr(pf->vsi[v])) { 434e94d4478SAnirudh Venkataramanan /* come back and try again later */ 435e94d4478SAnirudh Venkataramanan set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 436e94d4478SAnirudh Venkataramanan break; 437e94d4478SAnirudh Venkataramanan } 438e94d4478SAnirudh Venkataramanan } 439e94d4478SAnirudh Venkataramanan 440e94d4478SAnirudh Venkataramanan /** 4417b9ffc76SAnirudh Venkataramanan * ice_pf_dis_all_vsi - Pause all VSIs on a PF 4427b9ffc76SAnirudh Venkataramanan * @pf: the PF 4437b9ffc76SAnirudh Venkataramanan * @locked: is the rtnl_lock already held 4447b9ffc76SAnirudh Venkataramanan */ 4457b9ffc76SAnirudh Venkataramanan static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 4467b9ffc76SAnirudh Venkataramanan { 4477b9ffc76SAnirudh Venkataramanan int v; 4487b9ffc76SAnirudh Venkataramanan 4497b9ffc76SAnirudh Venkataramanan ice_for_each_vsi(pf, v) 4507b9ffc76SAnirudh Venkataramanan if (pf->vsi[v]) 4517b9ffc76SAnirudh Venkataramanan ice_dis_vsi(pf->vsi[v], locked); 4527b9ffc76SAnirudh Venkataramanan } 4537b9ffc76SAnirudh Venkataramanan 4547b9ffc76SAnirudh Venkataramanan /** 4550b28b702SAnirudh Venkataramanan * ice_prepare_for_reset - prep for the core to reset 4560b28b702SAnirudh Venkataramanan * @pf: board private structure 4570b28b702SAnirudh Venkataramanan * 4580b28b702SAnirudh Venkataramanan * Inform or close all dependent features in prep for reset. 4590b28b702SAnirudh Venkataramanan */ 4600b28b702SAnirudh Venkataramanan static void 4610b28b702SAnirudh Venkataramanan ice_prepare_for_reset(struct ice_pf *pf) 4620b28b702SAnirudh Venkataramanan { 4630b28b702SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 464a1199d67SColin Ian King int i; 4650b28b702SAnirudh Venkataramanan 4665abac9d7SBrett Creeley /* already prepared for reset */ 4675abac9d7SBrett Creeley if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) 4685abac9d7SBrett Creeley return; 4695abac9d7SBrett Creeley 470007676b4SAnirudh Venkataramanan /* Notify VFs of impending reset */ 471007676b4SAnirudh Venkataramanan if (ice_check_sq_alive(hw, &hw->mailboxq)) 472007676b4SAnirudh Venkataramanan ice_vc_notify_reset(pf); 473007676b4SAnirudh Venkataramanan 474c7aeb4d1SAkeem G Abodunrin /* Disable VFs until reset is completed */ 475005881bcSBrett Creeley ice_for_each_vf(pf, i) 47677ca27c4SPaul Greenwalt ice_set_vf_state_qs_dis(&pf->vf[i]); 477c7aeb4d1SAkeem G Abodunrin 478462acf6aSTony Nguyen /* clear SW filtering DB */ 479462acf6aSTony Nguyen ice_clear_hw_tbls(hw); 4800b28b702SAnirudh Venkataramanan /* disable the VSIs and their queues that are not already DOWN */ 4817b9ffc76SAnirudh Venkataramanan ice_pf_dis_all_vsi(pf, false); 4820b28b702SAnirudh Venkataramanan 483c5a2a4a3SUsha Ketineni if (hw->port_info) 484c5a2a4a3SUsha Ketineni ice_sched_clear_port(hw->port_info); 485c5a2a4a3SUsha Ketineni 4860b28b702SAnirudh Venkataramanan ice_shutdown_all_ctrlq(hw); 4870f9d5027SAnirudh Venkataramanan 4880f9d5027SAnirudh Venkataramanan set_bit(__ICE_PREPARED_FOR_RESET, pf->state); 4890b28b702SAnirudh Venkataramanan } 4900b28b702SAnirudh Venkataramanan 4910b28b702SAnirudh Venkataramanan /** 4920b28b702SAnirudh Venkataramanan * ice_do_reset - Initiate one of many types of resets 4930b28b702SAnirudh Venkataramanan * @pf: board private structure 4940b28b702SAnirudh Venkataramanan * @reset_type: reset type requested 4950b28b702SAnirudh Venkataramanan * before this function was called. 4960b28b702SAnirudh Venkataramanan */ 4970b28b702SAnirudh Venkataramanan static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 4980b28b702SAnirudh Venkataramanan { 4994015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 5000b28b702SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 5010b28b702SAnirudh Venkataramanan 5020b28b702SAnirudh Venkataramanan dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 5030b28b702SAnirudh Venkataramanan WARN_ON(in_interrupt()); 5040b28b702SAnirudh Venkataramanan 5050b28b702SAnirudh Venkataramanan ice_prepare_for_reset(pf); 5060b28b702SAnirudh Venkataramanan 5070b28b702SAnirudh Venkataramanan /* trigger the reset */ 5080b28b702SAnirudh Venkataramanan if (ice_reset(hw, reset_type)) { 5090b28b702SAnirudh Venkataramanan dev_err(dev, "reset %d failed\n", reset_type); 5100b28b702SAnirudh Venkataramanan set_bit(__ICE_RESET_FAILED, pf->state); 5115df7e45dSDave Ertman clear_bit(__ICE_RESET_OICR_RECV, pf->state); 5120f9d5027SAnirudh Venkataramanan clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 5135df7e45dSDave Ertman clear_bit(__ICE_PFR_REQ, pf->state); 5145df7e45dSDave Ertman clear_bit(__ICE_CORER_REQ, pf->state); 5155df7e45dSDave Ertman clear_bit(__ICE_GLOBR_REQ, pf->state); 5160b28b702SAnirudh Venkataramanan return; 5170b28b702SAnirudh Venkataramanan } 5180b28b702SAnirudh Venkataramanan 5190f9d5027SAnirudh Venkataramanan /* PFR is a bit of a special case because it doesn't result in an OICR 5200f9d5027SAnirudh Venkataramanan * interrupt. So for PFR, rebuild after the reset and clear the reset- 5210f9d5027SAnirudh Venkataramanan * associated state bits. 5220f9d5027SAnirudh Venkataramanan */ 5230b28b702SAnirudh Venkataramanan if (reset_type == ICE_RESET_PFR) { 5240b28b702SAnirudh Venkataramanan pf->pfr_count++; 525462acf6aSTony Nguyen ice_rebuild(pf, reset_type); 5260f9d5027SAnirudh Venkataramanan clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 5275df7e45dSDave Ertman clear_bit(__ICE_PFR_REQ, pf->state); 5281c44e3bcSAkeem G Abodunrin ice_reset_all_vfs(pf, true); 5290b28b702SAnirudh Venkataramanan } 5300b28b702SAnirudh Venkataramanan } 5310b28b702SAnirudh Venkataramanan 5320b28b702SAnirudh Venkataramanan /** 5330b28b702SAnirudh Venkataramanan * ice_reset_subtask - Set up for resetting the device and driver 5340b28b702SAnirudh Venkataramanan * @pf: board private structure 5350b28b702SAnirudh Venkataramanan */ 5360b28b702SAnirudh Venkataramanan static void ice_reset_subtask(struct ice_pf *pf) 5370b28b702SAnirudh Venkataramanan { 5380f9d5027SAnirudh Venkataramanan enum ice_reset_req reset_type = ICE_RESET_INVAL; 5390b28b702SAnirudh Venkataramanan 5400b28b702SAnirudh Venkataramanan /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 5410f9d5027SAnirudh Venkataramanan * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 5420f9d5027SAnirudh Venkataramanan * of reset is pending and sets bits in pf->state indicating the reset 5435df7e45dSDave Ertman * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set 5440f9d5027SAnirudh Venkataramanan * prepare for pending reset if not already (for PF software-initiated 5450f9d5027SAnirudh Venkataramanan * global resets the software should already be prepared for it as 5460f9d5027SAnirudh Venkataramanan * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated 5470f9d5027SAnirudh Venkataramanan * by firmware or software on other PFs, that bit is not set so prepare 5480f9d5027SAnirudh Venkataramanan * for the reset now), poll for reset done, rebuild and return. 5490b28b702SAnirudh Venkataramanan */ 5505df7e45dSDave Ertman if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { 5512ebd4428SDave Ertman /* Perform the largest reset requested */ 5522ebd4428SDave Ertman if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) 5532ebd4428SDave Ertman reset_type = ICE_RESET_CORER; 5542ebd4428SDave Ertman if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) 5552ebd4428SDave Ertman reset_type = ICE_RESET_GLOBR; 55603af8406SAnirudh Venkataramanan if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) 55703af8406SAnirudh Venkataramanan reset_type = ICE_RESET_EMPR; 5582ebd4428SDave Ertman /* return if no valid reset type requested */ 5592ebd4428SDave Ertman if (reset_type == ICE_RESET_INVAL) 5602ebd4428SDave Ertman return; 5610b28b702SAnirudh Venkataramanan ice_prepare_for_reset(pf); 5620b28b702SAnirudh Venkataramanan 5630b28b702SAnirudh Venkataramanan /* make sure we are ready to rebuild */ 564fd2a9817SAnirudh Venkataramanan if (ice_check_reset(&pf->hw)) { 5650b28b702SAnirudh Venkataramanan set_bit(__ICE_RESET_FAILED, pf->state); 566fd2a9817SAnirudh Venkataramanan } else { 567fd2a9817SAnirudh Venkataramanan /* done with reset. start rebuild */ 568fd2a9817SAnirudh Venkataramanan pf->hw.reset_ongoing = false; 569462acf6aSTony Nguyen ice_rebuild(pf, reset_type); 5700f9d5027SAnirudh Venkataramanan /* clear bit to resume normal operations, but 5710f9d5027SAnirudh Venkataramanan * ICE_NEEDS_RESTART bit is set in case rebuild failed 5720f9d5027SAnirudh Venkataramanan */ 5735df7e45dSDave Ertman clear_bit(__ICE_RESET_OICR_RECV, pf->state); 5740f9d5027SAnirudh Venkataramanan clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 5755df7e45dSDave Ertman clear_bit(__ICE_PFR_REQ, pf->state); 5765df7e45dSDave Ertman clear_bit(__ICE_CORER_REQ, pf->state); 5775df7e45dSDave Ertman clear_bit(__ICE_GLOBR_REQ, pf->state); 5781c44e3bcSAkeem G Abodunrin ice_reset_all_vfs(pf, true); 5790f9d5027SAnirudh Venkataramanan } 5800f9d5027SAnirudh Venkataramanan 5810f9d5027SAnirudh Venkataramanan return; 5820b28b702SAnirudh Venkataramanan } 5830b28b702SAnirudh Venkataramanan 5840b28b702SAnirudh Venkataramanan /* No pending resets to finish processing. Check for new resets */ 5855df7e45dSDave Ertman if (test_bit(__ICE_PFR_REQ, pf->state)) 5860f9d5027SAnirudh Venkataramanan reset_type = ICE_RESET_PFR; 5875df7e45dSDave Ertman if (test_bit(__ICE_CORER_REQ, pf->state)) 5880f9d5027SAnirudh Venkataramanan reset_type = ICE_RESET_CORER; 5895df7e45dSDave Ertman if (test_bit(__ICE_GLOBR_REQ, pf->state)) 5900b28b702SAnirudh Venkataramanan reset_type = ICE_RESET_GLOBR; 5910f9d5027SAnirudh Venkataramanan /* If no valid reset type requested just return */ 5920f9d5027SAnirudh Venkataramanan if (reset_type == ICE_RESET_INVAL) 5930f9d5027SAnirudh Venkataramanan return; 5940b28b702SAnirudh Venkataramanan 5950f9d5027SAnirudh Venkataramanan /* reset if not already down or busy */ 5960b28b702SAnirudh Venkataramanan if (!test_bit(__ICE_DOWN, pf->state) && 5970b28b702SAnirudh Venkataramanan !test_bit(__ICE_CFG_BUSY, pf->state)) { 5980b28b702SAnirudh Venkataramanan ice_do_reset(pf, reset_type); 5990b28b702SAnirudh Venkataramanan } 6000b28b702SAnirudh Venkataramanan } 6010b28b702SAnirudh Venkataramanan 6020b28b702SAnirudh Venkataramanan /** 6032e0ab37cSJesse Brandeburg * ice_print_topo_conflict - print topology conflict message 6042e0ab37cSJesse Brandeburg * @vsi: the VSI whose topology status is being checked 6052e0ab37cSJesse Brandeburg */ 6062e0ab37cSJesse Brandeburg static void ice_print_topo_conflict(struct ice_vsi *vsi) 6072e0ab37cSJesse Brandeburg { 6082e0ab37cSJesse Brandeburg switch (vsi->port_info->phy.link_info.topo_media_conflict) { 6092e0ab37cSJesse Brandeburg case ICE_AQ_LINK_TOPO_CONFLICT: 6102e0ab37cSJesse Brandeburg case ICE_AQ_LINK_MEDIA_CONFLICT: 6115878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNREACH_PRT: 6125878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 6135878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 6142e0ab37cSJesse Brandeburg netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); 6152e0ab37cSJesse Brandeburg break; 6165878589dSPaul Greenwalt case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 6175878589dSPaul Greenwalt netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 6185878589dSPaul Greenwalt break; 6192e0ab37cSJesse Brandeburg default: 6202e0ab37cSJesse Brandeburg break; 6212e0ab37cSJesse Brandeburg } 6222e0ab37cSJesse Brandeburg } 6232e0ab37cSJesse Brandeburg 6242e0ab37cSJesse Brandeburg /** 625cdedef59SAnirudh Venkataramanan * ice_print_link_msg - print link up or down message 626cdedef59SAnirudh Venkataramanan * @vsi: the VSI whose link status is being queried 627cdedef59SAnirudh Venkataramanan * @isup: boolean for if the link is now up or down 628cdedef59SAnirudh Venkataramanan */ 629fcea6f3dSAnirudh Venkataramanan void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 630cdedef59SAnirudh Venkataramanan { 631f776b3acSPaul Greenwalt struct ice_aqc_get_phy_caps_data *caps; 632f776b3acSPaul Greenwalt enum ice_status status; 633f776b3acSPaul Greenwalt const char *fec_req; 634cdedef59SAnirudh Venkataramanan const char *speed; 635f776b3acSPaul Greenwalt const char *fec; 636cdedef59SAnirudh Venkataramanan const char *fc; 63743260988SJesse Brandeburg const char *an; 638cdedef59SAnirudh Venkataramanan 639c2a23e00SBrett Creeley if (!vsi) 640c2a23e00SBrett Creeley return; 641c2a23e00SBrett Creeley 642cdedef59SAnirudh Venkataramanan if (vsi->current_isup == isup) 643cdedef59SAnirudh Venkataramanan return; 644cdedef59SAnirudh Venkataramanan 645cdedef59SAnirudh Venkataramanan vsi->current_isup = isup; 646cdedef59SAnirudh Venkataramanan 647cdedef59SAnirudh Venkataramanan if (!isup) { 648cdedef59SAnirudh Venkataramanan netdev_info(vsi->netdev, "NIC Link is Down\n"); 649cdedef59SAnirudh Venkataramanan return; 650cdedef59SAnirudh Venkataramanan } 651cdedef59SAnirudh Venkataramanan 652cdedef59SAnirudh Venkataramanan switch (vsi->port_info->phy.link_info.link_speed) { 653072efdf8SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_100GB: 654072efdf8SAnirudh Venkataramanan speed = "100 G"; 655072efdf8SAnirudh Venkataramanan break; 656072efdf8SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_50GB: 657072efdf8SAnirudh Venkataramanan speed = "50 G"; 658072efdf8SAnirudh Venkataramanan break; 659cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_40GB: 660cdedef59SAnirudh Venkataramanan speed = "40 G"; 661cdedef59SAnirudh Venkataramanan break; 662cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_25GB: 663cdedef59SAnirudh Venkataramanan speed = "25 G"; 664cdedef59SAnirudh Venkataramanan break; 665cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_20GB: 666cdedef59SAnirudh Venkataramanan speed = "20 G"; 667cdedef59SAnirudh Venkataramanan break; 668cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_10GB: 669cdedef59SAnirudh Venkataramanan speed = "10 G"; 670cdedef59SAnirudh Venkataramanan break; 671cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_5GB: 672cdedef59SAnirudh Venkataramanan speed = "5 G"; 673cdedef59SAnirudh Venkataramanan break; 674cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_2500MB: 675cdedef59SAnirudh Venkataramanan speed = "2.5 G"; 676cdedef59SAnirudh Venkataramanan break; 677cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_1000MB: 678cdedef59SAnirudh Venkataramanan speed = "1 G"; 679cdedef59SAnirudh Venkataramanan break; 680cdedef59SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_100MB: 681cdedef59SAnirudh Venkataramanan speed = "100 M"; 682cdedef59SAnirudh Venkataramanan break; 683cdedef59SAnirudh Venkataramanan default: 684cdedef59SAnirudh Venkataramanan speed = "Unknown"; 685cdedef59SAnirudh Venkataramanan break; 686cdedef59SAnirudh Venkataramanan } 687cdedef59SAnirudh Venkataramanan 688cdedef59SAnirudh Venkataramanan switch (vsi->port_info->fc.current_mode) { 689cdedef59SAnirudh Venkataramanan case ICE_FC_FULL: 6902f2da36eSAnirudh Venkataramanan fc = "Rx/Tx"; 691cdedef59SAnirudh Venkataramanan break; 692cdedef59SAnirudh Venkataramanan case ICE_FC_TX_PAUSE: 6932f2da36eSAnirudh Venkataramanan fc = "Tx"; 694cdedef59SAnirudh Venkataramanan break; 695cdedef59SAnirudh Venkataramanan case ICE_FC_RX_PAUSE: 6962f2da36eSAnirudh Venkataramanan fc = "Rx"; 697cdedef59SAnirudh Venkataramanan break; 698203a068aSBrett Creeley case ICE_FC_NONE: 699203a068aSBrett Creeley fc = "None"; 700203a068aSBrett Creeley break; 701cdedef59SAnirudh Venkataramanan default: 702cdedef59SAnirudh Venkataramanan fc = "Unknown"; 703cdedef59SAnirudh Venkataramanan break; 704cdedef59SAnirudh Venkataramanan } 705cdedef59SAnirudh Venkataramanan 706f776b3acSPaul Greenwalt /* Get FEC mode based on negotiated link info */ 707f776b3acSPaul Greenwalt switch (vsi->port_info->phy.link_info.fec_info) { 708f776b3acSPaul Greenwalt case ICE_AQ_LINK_25G_RS_528_FEC_EN: 709f776b3acSPaul Greenwalt /* fall through */ 710f776b3acSPaul Greenwalt case ICE_AQ_LINK_25G_RS_544_FEC_EN: 711f776b3acSPaul Greenwalt fec = "RS-FEC"; 712f776b3acSPaul Greenwalt break; 713f776b3acSPaul Greenwalt case ICE_AQ_LINK_25G_KR_FEC_EN: 714f776b3acSPaul Greenwalt fec = "FC-FEC/BASE-R"; 715f776b3acSPaul Greenwalt break; 716f776b3acSPaul Greenwalt default: 717f776b3acSPaul Greenwalt fec = "NONE"; 718f776b3acSPaul Greenwalt break; 719f776b3acSPaul Greenwalt } 720f776b3acSPaul Greenwalt 72143260988SJesse Brandeburg /* check if autoneg completed, might be false due to not supported */ 72243260988SJesse Brandeburg if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 72343260988SJesse Brandeburg an = "True"; 72443260988SJesse Brandeburg else 72543260988SJesse Brandeburg an = "False"; 72643260988SJesse Brandeburg 727f776b3acSPaul Greenwalt /* Get FEC mode requested based on PHY caps last SW configuration */ 7289efe35d0STony Nguyen caps = kzalloc(sizeof(*caps), GFP_KERNEL); 729f776b3acSPaul Greenwalt if (!caps) { 730f776b3acSPaul Greenwalt fec_req = "Unknown"; 731f776b3acSPaul Greenwalt goto done; 732f776b3acSPaul Greenwalt } 733f776b3acSPaul Greenwalt 734f776b3acSPaul Greenwalt status = ice_aq_get_phy_caps(vsi->port_info, false, 735f776b3acSPaul Greenwalt ICE_AQC_REPORT_SW_CFG, caps, NULL); 736f776b3acSPaul Greenwalt if (status) 737f776b3acSPaul Greenwalt netdev_info(vsi->netdev, "Get phy capability failed.\n"); 738f776b3acSPaul Greenwalt 739f776b3acSPaul Greenwalt if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 740f776b3acSPaul Greenwalt caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 741f776b3acSPaul Greenwalt fec_req = "RS-FEC"; 742f776b3acSPaul Greenwalt else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 743f776b3acSPaul Greenwalt caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 744f776b3acSPaul Greenwalt fec_req = "FC-FEC/BASE-R"; 745f776b3acSPaul Greenwalt else 746f776b3acSPaul Greenwalt fec_req = "NONE"; 747f776b3acSPaul Greenwalt 7489efe35d0STony Nguyen kfree(caps); 749f776b3acSPaul Greenwalt 750f776b3acSPaul Greenwalt done: 751b55e6032SAkeem G Abodunrin netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 75243260988SJesse Brandeburg speed, fec_req, fec, an, fc); 7532e0ab37cSJesse Brandeburg ice_print_topo_conflict(vsi); 754cdedef59SAnirudh Venkataramanan } 755cdedef59SAnirudh Venkataramanan 756cdedef59SAnirudh Venkataramanan /** 757f9867df6SAnirudh Venkataramanan * ice_vsi_link_event - update the VSI's netdev 758f9867df6SAnirudh Venkataramanan * @vsi: the VSI on which the link event occurred 759f9867df6SAnirudh Venkataramanan * @link_up: whether or not the VSI needs to be set up or down 7600b28b702SAnirudh Venkataramanan */ 7610b28b702SAnirudh Venkataramanan static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 7620b28b702SAnirudh Venkataramanan { 763c2a23e00SBrett Creeley if (!vsi) 764c2a23e00SBrett Creeley return; 765c2a23e00SBrett Creeley 766c2a23e00SBrett Creeley if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev) 7670b28b702SAnirudh Venkataramanan return; 7680b28b702SAnirudh Venkataramanan 7690b28b702SAnirudh Venkataramanan if (vsi->type == ICE_VSI_PF) { 770c2a23e00SBrett Creeley if (link_up == netif_carrier_ok(vsi->netdev)) 7710b28b702SAnirudh Venkataramanan return; 772c2a23e00SBrett Creeley 7730b28b702SAnirudh Venkataramanan if (link_up) { 7740b28b702SAnirudh Venkataramanan netif_carrier_on(vsi->netdev); 7750b28b702SAnirudh Venkataramanan netif_tx_wake_all_queues(vsi->netdev); 7760b28b702SAnirudh Venkataramanan } else { 7770b28b702SAnirudh Venkataramanan netif_carrier_off(vsi->netdev); 7780b28b702SAnirudh Venkataramanan netif_tx_stop_all_queues(vsi->netdev); 7790b28b702SAnirudh Venkataramanan } 7800b28b702SAnirudh Venkataramanan } 7810b28b702SAnirudh Venkataramanan } 7820b28b702SAnirudh Venkataramanan 7830b28b702SAnirudh Venkataramanan /** 7840b28b702SAnirudh Venkataramanan * ice_link_event - process the link event 7852f2da36eSAnirudh Venkataramanan * @pf: PF that the link event is associated with 7860b28b702SAnirudh Venkataramanan * @pi: port_info for the port that the link event is associated with 787c2a23e00SBrett Creeley * @link_up: true if the physical link is up and false if it is down 788c2a23e00SBrett Creeley * @link_speed: current link speed received from the link event 7890b28b702SAnirudh Venkataramanan * 790c2a23e00SBrett Creeley * Returns 0 on success and negative on failure 7910b28b702SAnirudh Venkataramanan */ 7920b28b702SAnirudh Venkataramanan static int 793c2a23e00SBrett Creeley ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 794c2a23e00SBrett Creeley u16 link_speed) 7950b28b702SAnirudh Venkataramanan { 7964015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 7970b28b702SAnirudh Venkataramanan struct ice_phy_info *phy_info; 798c2a23e00SBrett Creeley struct ice_vsi *vsi; 799c2a23e00SBrett Creeley u16 old_link_speed; 800c2a23e00SBrett Creeley bool old_link; 801c2a23e00SBrett Creeley int result; 8020b28b702SAnirudh Venkataramanan 8030b28b702SAnirudh Venkataramanan phy_info = &pi->phy; 8040b28b702SAnirudh Venkataramanan phy_info->link_info_old = phy_info->link_info; 8050b28b702SAnirudh Venkataramanan 806c2a23e00SBrett Creeley old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 8070b28b702SAnirudh Venkataramanan old_link_speed = phy_info->link_info_old.link_speed; 8080b28b702SAnirudh Venkataramanan 809c2a23e00SBrett Creeley /* update the link info structures and re-enable link events, 810c2a23e00SBrett Creeley * don't bail on failure due to other book keeping needed 811c2a23e00SBrett Creeley */ 812c2a23e00SBrett Creeley result = ice_update_link_info(pi); 813c2a23e00SBrett Creeley if (result) 81419cce2c6SAnirudh Venkataramanan dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", 815c2a23e00SBrett Creeley pi->lport); 8160b28b702SAnirudh Venkataramanan 817c2a23e00SBrett Creeley /* if the old link up/down and speed is the same as the new */ 818c2a23e00SBrett Creeley if (link_up == old_link && link_speed == old_link_speed) 819c2a23e00SBrett Creeley return result; 8200b28b702SAnirudh Venkataramanan 821208ff751SAnirudh Venkataramanan vsi = ice_get_main_vsi(pf); 8220b28b702SAnirudh Venkataramanan if (!vsi || !vsi->port_info) 823c2a23e00SBrett Creeley return -EINVAL; 8240b28b702SAnirudh Venkataramanan 8256d599946STony Nguyen /* turn off PHY if media was removed */ 8266d599946STony Nguyen if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 8276d599946STony Nguyen !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 8286d599946STony Nguyen set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 8296d599946STony Nguyen 8306d599946STony Nguyen result = ice_aq_set_link_restart_an(pi, false, NULL); 8316d599946STony Nguyen if (result) { 83219cce2c6SAnirudh Venkataramanan dev_dbg(dev, "Failed to set link down, VSI %d error %d\n", 8336d599946STony Nguyen vsi->vsi_num, result); 8346d599946STony Nguyen return result; 8356d599946STony Nguyen } 8366d599946STony Nguyen } 8376d599946STony Nguyen 838242b5e06SDave Ertman ice_dcb_rebuild(pf); 839c2a23e00SBrett Creeley ice_vsi_link_event(vsi, link_up); 840c2a23e00SBrett Creeley ice_print_link_msg(vsi, link_up); 8410b28b702SAnirudh Venkataramanan 84253b8decbSAnirudh Venkataramanan ice_vc_notify_link_state(pf); 84353b8decbSAnirudh Venkataramanan 844c2a23e00SBrett Creeley return result; 8450b28b702SAnirudh Venkataramanan } 8460b28b702SAnirudh Venkataramanan 8470b28b702SAnirudh Venkataramanan /** 8484f4be03bSAnirudh Venkataramanan * ice_watchdog_subtask - periodic tasks not using event driven scheduling 8494f4be03bSAnirudh Venkataramanan * @pf: board private structure 8500b28b702SAnirudh Venkataramanan */ 8514f4be03bSAnirudh Venkataramanan static void ice_watchdog_subtask(struct ice_pf *pf) 8520b28b702SAnirudh Venkataramanan { 8534f4be03bSAnirudh Venkataramanan int i; 8540b28b702SAnirudh Venkataramanan 8554f4be03bSAnirudh Venkataramanan /* if interface is down do nothing */ 8564f4be03bSAnirudh Venkataramanan if (test_bit(__ICE_DOWN, pf->state) || 8574f4be03bSAnirudh Venkataramanan test_bit(__ICE_CFG_BUSY, pf->state)) 8584f4be03bSAnirudh Venkataramanan return; 8590b28b702SAnirudh Venkataramanan 8604f4be03bSAnirudh Venkataramanan /* make sure we don't do these things too often */ 8614f4be03bSAnirudh Venkataramanan if (time_before(jiffies, 8624f4be03bSAnirudh Venkataramanan pf->serv_tmr_prev + pf->serv_tmr_period)) 8634f4be03bSAnirudh Venkataramanan return; 8640b28b702SAnirudh Venkataramanan 8654f4be03bSAnirudh Venkataramanan pf->serv_tmr_prev = jiffies; 8664f4be03bSAnirudh Venkataramanan 8674f4be03bSAnirudh Venkataramanan /* Update the stats for active netdevs so the network stack 8684f4be03bSAnirudh Venkataramanan * can look at updated numbers whenever it cares to 8694f4be03bSAnirudh Venkataramanan */ 8704f4be03bSAnirudh Venkataramanan ice_update_pf_stats(pf); 87180ed404aSBrett Creeley ice_for_each_vsi(pf, i) 8724f4be03bSAnirudh Venkataramanan if (pf->vsi[i] && pf->vsi[i]->netdev) 8734f4be03bSAnirudh Venkataramanan ice_update_vsi_stats(pf->vsi[i]); 8740b28b702SAnirudh Venkataramanan } 8750b28b702SAnirudh Venkataramanan 8760b28b702SAnirudh Venkataramanan /** 877250c3b3eSBrett Creeley * ice_init_link_events - enable/initialize link events 878250c3b3eSBrett Creeley * @pi: pointer to the port_info instance 879250c3b3eSBrett Creeley * 880250c3b3eSBrett Creeley * Returns -EIO on failure, 0 on success 881250c3b3eSBrett Creeley */ 882250c3b3eSBrett Creeley static int ice_init_link_events(struct ice_port_info *pi) 883250c3b3eSBrett Creeley { 884250c3b3eSBrett Creeley u16 mask; 885250c3b3eSBrett Creeley 886250c3b3eSBrett Creeley mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 887250c3b3eSBrett Creeley ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); 888250c3b3eSBrett Creeley 889250c3b3eSBrett Creeley if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 89019cce2c6SAnirudh Venkataramanan dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 891250c3b3eSBrett Creeley pi->lport); 892250c3b3eSBrett Creeley return -EIO; 893250c3b3eSBrett Creeley } 894250c3b3eSBrett Creeley 895250c3b3eSBrett Creeley if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 89619cce2c6SAnirudh Venkataramanan dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 897250c3b3eSBrett Creeley pi->lport); 898250c3b3eSBrett Creeley return -EIO; 899250c3b3eSBrett Creeley } 900250c3b3eSBrett Creeley 901250c3b3eSBrett Creeley return 0; 902250c3b3eSBrett Creeley } 903250c3b3eSBrett Creeley 904250c3b3eSBrett Creeley /** 905250c3b3eSBrett Creeley * ice_handle_link_event - handle link event via ARQ 9062f2da36eSAnirudh Venkataramanan * @pf: PF that the link event is associated with 907c2a23e00SBrett Creeley * @event: event structure containing link status info 908250c3b3eSBrett Creeley */ 909c2a23e00SBrett Creeley static int 910c2a23e00SBrett Creeley ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 911250c3b3eSBrett Creeley { 912c2a23e00SBrett Creeley struct ice_aqc_get_link_status_data *link_data; 913250c3b3eSBrett Creeley struct ice_port_info *port_info; 914250c3b3eSBrett Creeley int status; 915250c3b3eSBrett Creeley 916c2a23e00SBrett Creeley link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 917250c3b3eSBrett Creeley port_info = pf->hw.port_info; 918250c3b3eSBrett Creeley if (!port_info) 919250c3b3eSBrett Creeley return -EINVAL; 920250c3b3eSBrett Creeley 921c2a23e00SBrett Creeley status = ice_link_event(pf, port_info, 922c2a23e00SBrett Creeley !!(link_data->link_info & ICE_AQ_LINK_UP), 923c2a23e00SBrett Creeley le16_to_cpu(link_data->link_speed)); 924250c3b3eSBrett Creeley if (status) 92519cce2c6SAnirudh Venkataramanan dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 92619cce2c6SAnirudh Venkataramanan status); 927250c3b3eSBrett Creeley 928250c3b3eSBrett Creeley return status; 929250c3b3eSBrett Creeley } 930250c3b3eSBrett Creeley 931250c3b3eSBrett Creeley /** 932940b61afSAnirudh Venkataramanan * __ice_clean_ctrlq - helper function to clean controlq rings 933940b61afSAnirudh Venkataramanan * @pf: ptr to struct ice_pf 934940b61afSAnirudh Venkataramanan * @q_type: specific Control queue type 935940b61afSAnirudh Venkataramanan */ 936940b61afSAnirudh Venkataramanan static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 937940b61afSAnirudh Venkataramanan { 9384015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 939940b61afSAnirudh Venkataramanan struct ice_rq_event_info event; 940940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 941940b61afSAnirudh Venkataramanan struct ice_ctl_q_info *cq; 942940b61afSAnirudh Venkataramanan u16 pending, i = 0; 943940b61afSAnirudh Venkataramanan const char *qtype; 944940b61afSAnirudh Venkataramanan u32 oldval, val; 945940b61afSAnirudh Venkataramanan 9460b28b702SAnirudh Venkataramanan /* Do not clean control queue if/when PF reset fails */ 9470b28b702SAnirudh Venkataramanan if (test_bit(__ICE_RESET_FAILED, pf->state)) 9480b28b702SAnirudh Venkataramanan return 0; 9490b28b702SAnirudh Venkataramanan 950940b61afSAnirudh Venkataramanan switch (q_type) { 951940b61afSAnirudh Venkataramanan case ICE_CTL_Q_ADMIN: 952940b61afSAnirudh Venkataramanan cq = &hw->adminq; 953940b61afSAnirudh Venkataramanan qtype = "Admin"; 954940b61afSAnirudh Venkataramanan break; 95575d2b253SAnirudh Venkataramanan case ICE_CTL_Q_MAILBOX: 95675d2b253SAnirudh Venkataramanan cq = &hw->mailboxq; 95775d2b253SAnirudh Venkataramanan qtype = "Mailbox"; 95875d2b253SAnirudh Venkataramanan break; 959940b61afSAnirudh Venkataramanan default: 9604015d11eSBrett Creeley dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 961940b61afSAnirudh Venkataramanan return 0; 962940b61afSAnirudh Venkataramanan } 963940b61afSAnirudh Venkataramanan 964940b61afSAnirudh Venkataramanan /* check for error indications - PF_xx_AxQLEN register layout for 965940b61afSAnirudh Venkataramanan * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 966940b61afSAnirudh Venkataramanan */ 967940b61afSAnirudh Venkataramanan val = rd32(hw, cq->rq.len); 968940b61afSAnirudh Venkataramanan if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 969940b61afSAnirudh Venkataramanan PF_FW_ARQLEN_ARQCRIT_M)) { 970940b61afSAnirudh Venkataramanan oldval = val; 971940b61afSAnirudh Venkataramanan if (val & PF_FW_ARQLEN_ARQVFE_M) 9724015d11eSBrett Creeley dev_dbg(dev, "%s Receive Queue VF Error detected\n", 9734015d11eSBrett Creeley qtype); 974940b61afSAnirudh Venkataramanan if (val & PF_FW_ARQLEN_ARQOVFL_M) { 97519cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 976940b61afSAnirudh Venkataramanan qtype); 977940b61afSAnirudh Venkataramanan } 978940b61afSAnirudh Venkataramanan if (val & PF_FW_ARQLEN_ARQCRIT_M) 97919cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 980940b61afSAnirudh Venkataramanan qtype); 981940b61afSAnirudh Venkataramanan val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 982940b61afSAnirudh Venkataramanan PF_FW_ARQLEN_ARQCRIT_M); 983940b61afSAnirudh Venkataramanan if (oldval != val) 984940b61afSAnirudh Venkataramanan wr32(hw, cq->rq.len, val); 985940b61afSAnirudh Venkataramanan } 986940b61afSAnirudh Venkataramanan 987940b61afSAnirudh Venkataramanan val = rd32(hw, cq->sq.len); 988940b61afSAnirudh Venkataramanan if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 989940b61afSAnirudh Venkataramanan PF_FW_ATQLEN_ATQCRIT_M)) { 990940b61afSAnirudh Venkataramanan oldval = val; 991940b61afSAnirudh Venkataramanan if (val & PF_FW_ATQLEN_ATQVFE_M) 99219cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Send Queue VF Error detected\n", 99319cce2c6SAnirudh Venkataramanan qtype); 994940b61afSAnirudh Venkataramanan if (val & PF_FW_ATQLEN_ATQOVFL_M) { 9954015d11eSBrett Creeley dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 996940b61afSAnirudh Venkataramanan qtype); 997940b61afSAnirudh Venkataramanan } 998940b61afSAnirudh Venkataramanan if (val & PF_FW_ATQLEN_ATQCRIT_M) 9994015d11eSBrett Creeley dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1000940b61afSAnirudh Venkataramanan qtype); 1001940b61afSAnirudh Venkataramanan val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1002940b61afSAnirudh Venkataramanan PF_FW_ATQLEN_ATQCRIT_M); 1003940b61afSAnirudh Venkataramanan if (oldval != val) 1004940b61afSAnirudh Venkataramanan wr32(hw, cq->sq.len, val); 1005940b61afSAnirudh Venkataramanan } 1006940b61afSAnirudh Venkataramanan 1007940b61afSAnirudh Venkataramanan event.buf_len = cq->rq_buf_size; 10089efe35d0STony Nguyen event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1009940b61afSAnirudh Venkataramanan if (!event.msg_buf) 1010940b61afSAnirudh Venkataramanan return 0; 1011940b61afSAnirudh Venkataramanan 1012940b61afSAnirudh Venkataramanan do { 1013940b61afSAnirudh Venkataramanan enum ice_status ret; 10140b28b702SAnirudh Venkataramanan u16 opcode; 1015940b61afSAnirudh Venkataramanan 1016940b61afSAnirudh Venkataramanan ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1017940b61afSAnirudh Venkataramanan if (ret == ICE_ERR_AQ_NO_WORK) 1018940b61afSAnirudh Venkataramanan break; 1019940b61afSAnirudh Venkataramanan if (ret) { 10204015d11eSBrett Creeley dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1021940b61afSAnirudh Venkataramanan ret); 1022940b61afSAnirudh Venkataramanan break; 1023940b61afSAnirudh Venkataramanan } 10240b28b702SAnirudh Venkataramanan 10250b28b702SAnirudh Venkataramanan opcode = le16_to_cpu(event.desc.opcode); 10260b28b702SAnirudh Venkataramanan 10270b28b702SAnirudh Venkataramanan switch (opcode) { 1028250c3b3eSBrett Creeley case ice_aqc_opc_get_link_status: 1029c2a23e00SBrett Creeley if (ice_handle_link_event(pf, &event)) 10304015d11eSBrett Creeley dev_err(dev, "Could not handle link event\n"); 1031250c3b3eSBrett Creeley break; 10322309ae38SBrett Creeley case ice_aqc_opc_event_lan_overflow: 10332309ae38SBrett Creeley ice_vf_lan_overflow_event(pf, &event); 10342309ae38SBrett Creeley break; 10351071a835SAnirudh Venkataramanan case ice_mbx_opc_send_msg_to_pf: 10361071a835SAnirudh Venkataramanan ice_vc_process_vf_msg(pf, &event); 10371071a835SAnirudh Venkataramanan break; 10388b97ceb1SHieu Tran case ice_aqc_opc_fw_logging: 10398b97ceb1SHieu Tran ice_output_fw_log(hw, &event.desc, event.msg_buf); 10408b97ceb1SHieu Tran break; 104100cc3f1bSAnirudh Venkataramanan case ice_aqc_opc_lldp_set_mib_change: 104200cc3f1bSAnirudh Venkataramanan ice_dcb_process_lldp_set_mib_change(pf, &event); 104300cc3f1bSAnirudh Venkataramanan break; 10440b28b702SAnirudh Venkataramanan default: 104519cce2c6SAnirudh Venkataramanan dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 10460b28b702SAnirudh Venkataramanan qtype, opcode); 10470b28b702SAnirudh Venkataramanan break; 10480b28b702SAnirudh Venkataramanan } 1049940b61afSAnirudh Venkataramanan } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1050940b61afSAnirudh Venkataramanan 10519efe35d0STony Nguyen kfree(event.msg_buf); 1052940b61afSAnirudh Venkataramanan 1053940b61afSAnirudh Venkataramanan return pending && (i == ICE_DFLT_IRQ_WORK); 1054940b61afSAnirudh Venkataramanan } 1055940b61afSAnirudh Venkataramanan 1056940b61afSAnirudh Venkataramanan /** 10573d6b640eSAnirudh Venkataramanan * ice_ctrlq_pending - check if there is a difference between ntc and ntu 10583d6b640eSAnirudh Venkataramanan * @hw: pointer to hardware info 10593d6b640eSAnirudh Venkataramanan * @cq: control queue information 10603d6b640eSAnirudh Venkataramanan * 10613d6b640eSAnirudh Venkataramanan * returns true if there are pending messages in a queue, false if there aren't 10623d6b640eSAnirudh Venkataramanan */ 10633d6b640eSAnirudh Venkataramanan static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 10643d6b640eSAnirudh Venkataramanan { 10653d6b640eSAnirudh Venkataramanan u16 ntu; 10663d6b640eSAnirudh Venkataramanan 10673d6b640eSAnirudh Venkataramanan ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 10683d6b640eSAnirudh Venkataramanan return cq->rq.next_to_clean != ntu; 10693d6b640eSAnirudh Venkataramanan } 10703d6b640eSAnirudh Venkataramanan 10713d6b640eSAnirudh Venkataramanan /** 1072940b61afSAnirudh Venkataramanan * ice_clean_adminq_subtask - clean the AdminQ rings 1073940b61afSAnirudh Venkataramanan * @pf: board private structure 1074940b61afSAnirudh Venkataramanan */ 1075940b61afSAnirudh Venkataramanan static void ice_clean_adminq_subtask(struct ice_pf *pf) 1076940b61afSAnirudh Venkataramanan { 1077940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 1078940b61afSAnirudh Venkataramanan 1079940b61afSAnirudh Venkataramanan if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1080940b61afSAnirudh Venkataramanan return; 1081940b61afSAnirudh Venkataramanan 1082940b61afSAnirudh Venkataramanan if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1083940b61afSAnirudh Venkataramanan return; 1084940b61afSAnirudh Venkataramanan 1085940b61afSAnirudh Venkataramanan clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 1086940b61afSAnirudh Venkataramanan 10873d6b640eSAnirudh Venkataramanan /* There might be a situation where new messages arrive to a control 10883d6b640eSAnirudh Venkataramanan * queue between processing the last message and clearing the 10893d6b640eSAnirudh Venkataramanan * EVENT_PENDING bit. So before exiting, check queue head again (using 10903d6b640eSAnirudh Venkataramanan * ice_ctrlq_pending) and process new messages if any. 10913d6b640eSAnirudh Venkataramanan */ 10923d6b640eSAnirudh Venkataramanan if (ice_ctrlq_pending(hw, &hw->adminq)) 10933d6b640eSAnirudh Venkataramanan __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1094940b61afSAnirudh Venkataramanan 1095940b61afSAnirudh Venkataramanan ice_flush(hw); 1096940b61afSAnirudh Venkataramanan } 1097940b61afSAnirudh Venkataramanan 1098940b61afSAnirudh Venkataramanan /** 109975d2b253SAnirudh Venkataramanan * ice_clean_mailboxq_subtask - clean the MailboxQ rings 110075d2b253SAnirudh Venkataramanan * @pf: board private structure 110175d2b253SAnirudh Venkataramanan */ 110275d2b253SAnirudh Venkataramanan static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 110375d2b253SAnirudh Venkataramanan { 110475d2b253SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 110575d2b253SAnirudh Venkataramanan 110675d2b253SAnirudh Venkataramanan if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 110775d2b253SAnirudh Venkataramanan return; 110875d2b253SAnirudh Venkataramanan 110975d2b253SAnirudh Venkataramanan if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 111075d2b253SAnirudh Venkataramanan return; 111175d2b253SAnirudh Venkataramanan 111275d2b253SAnirudh Venkataramanan clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 111375d2b253SAnirudh Venkataramanan 111475d2b253SAnirudh Venkataramanan if (ice_ctrlq_pending(hw, &hw->mailboxq)) 111575d2b253SAnirudh Venkataramanan __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 111675d2b253SAnirudh Venkataramanan 111775d2b253SAnirudh Venkataramanan ice_flush(hw); 111875d2b253SAnirudh Venkataramanan } 111975d2b253SAnirudh Venkataramanan 112075d2b253SAnirudh Venkataramanan /** 1121940b61afSAnirudh Venkataramanan * ice_service_task_schedule - schedule the service task to wake up 1122940b61afSAnirudh Venkataramanan * @pf: board private structure 1123940b61afSAnirudh Venkataramanan * 1124940b61afSAnirudh Venkataramanan * If not already scheduled, this puts the task into the work queue. 1125940b61afSAnirudh Venkataramanan */ 1126940b61afSAnirudh Venkataramanan static void ice_service_task_schedule(struct ice_pf *pf) 1127940b61afSAnirudh Venkataramanan { 11288d81fa55SAkeem G Abodunrin if (!test_bit(__ICE_SERVICE_DIS, pf->state) && 11290f9d5027SAnirudh Venkataramanan !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && 11300f9d5027SAnirudh Venkataramanan !test_bit(__ICE_NEEDS_RESTART, pf->state)) 1131940b61afSAnirudh Venkataramanan queue_work(ice_wq, &pf->serv_task); 1132940b61afSAnirudh Venkataramanan } 1133940b61afSAnirudh Venkataramanan 1134940b61afSAnirudh Venkataramanan /** 1135940b61afSAnirudh Venkataramanan * ice_service_task_complete - finish up the service task 1136940b61afSAnirudh Venkataramanan * @pf: board private structure 1137940b61afSAnirudh Venkataramanan */ 1138940b61afSAnirudh Venkataramanan static void ice_service_task_complete(struct ice_pf *pf) 1139940b61afSAnirudh Venkataramanan { 1140940b61afSAnirudh Venkataramanan WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); 1141940b61afSAnirudh Venkataramanan 1142940b61afSAnirudh Venkataramanan /* force memory (pf->state) to sync before next service task */ 1143940b61afSAnirudh Venkataramanan smp_mb__before_atomic(); 1144940b61afSAnirudh Venkataramanan clear_bit(__ICE_SERVICE_SCHED, pf->state); 1145940b61afSAnirudh Venkataramanan } 1146940b61afSAnirudh Venkataramanan 1147940b61afSAnirudh Venkataramanan /** 11488d81fa55SAkeem G Abodunrin * ice_service_task_stop - stop service task and cancel works 11498d81fa55SAkeem G Abodunrin * @pf: board private structure 11508d81fa55SAkeem G Abodunrin */ 11518d81fa55SAkeem G Abodunrin static void ice_service_task_stop(struct ice_pf *pf) 11528d81fa55SAkeem G Abodunrin { 11538d81fa55SAkeem G Abodunrin set_bit(__ICE_SERVICE_DIS, pf->state); 11548d81fa55SAkeem G Abodunrin 11558d81fa55SAkeem G Abodunrin if (pf->serv_tmr.function) 11568d81fa55SAkeem G Abodunrin del_timer_sync(&pf->serv_tmr); 11578d81fa55SAkeem G Abodunrin if (pf->serv_task.func) 11588d81fa55SAkeem G Abodunrin cancel_work_sync(&pf->serv_task); 11598d81fa55SAkeem G Abodunrin 11608d81fa55SAkeem G Abodunrin clear_bit(__ICE_SERVICE_SCHED, pf->state); 11618d81fa55SAkeem G Abodunrin } 11628d81fa55SAkeem G Abodunrin 11638d81fa55SAkeem G Abodunrin /** 11645995b6d0SBrett Creeley * ice_service_task_restart - restart service task and schedule works 11655995b6d0SBrett Creeley * @pf: board private structure 11665995b6d0SBrett Creeley * 11675995b6d0SBrett Creeley * This function is needed for suspend and resume works (e.g WoL scenario) 11685995b6d0SBrett Creeley */ 11695995b6d0SBrett Creeley static void ice_service_task_restart(struct ice_pf *pf) 11705995b6d0SBrett Creeley { 11715995b6d0SBrett Creeley clear_bit(__ICE_SERVICE_DIS, pf->state); 11725995b6d0SBrett Creeley ice_service_task_schedule(pf); 11735995b6d0SBrett Creeley } 11745995b6d0SBrett Creeley 11755995b6d0SBrett Creeley /** 1176940b61afSAnirudh Venkataramanan * ice_service_timer - timer callback to schedule service task 1177940b61afSAnirudh Venkataramanan * @t: pointer to timer_list 1178940b61afSAnirudh Venkataramanan */ 1179940b61afSAnirudh Venkataramanan static void ice_service_timer(struct timer_list *t) 1180940b61afSAnirudh Venkataramanan { 1181940b61afSAnirudh Venkataramanan struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1182940b61afSAnirudh Venkataramanan 1183940b61afSAnirudh Venkataramanan mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1184940b61afSAnirudh Venkataramanan ice_service_task_schedule(pf); 1185940b61afSAnirudh Venkataramanan } 1186940b61afSAnirudh Venkataramanan 1187940b61afSAnirudh Venkataramanan /** 1188b3969fd7SSudheer Mogilappagari * ice_handle_mdd_event - handle malicious driver detect event 1189b3969fd7SSudheer Mogilappagari * @pf: pointer to the PF structure 1190b3969fd7SSudheer Mogilappagari * 1191b3969fd7SSudheer Mogilappagari * Called from service task. OICR interrupt handler indicates MDD event 1192b3969fd7SSudheer Mogilappagari */ 1193b3969fd7SSudheer Mogilappagari static void ice_handle_mdd_event(struct ice_pf *pf) 1194b3969fd7SSudheer Mogilappagari { 11954015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 1196b3969fd7SSudheer Mogilappagari struct ice_hw *hw = &pf->hw; 1197b3969fd7SSudheer Mogilappagari bool mdd_detected = false; 1198b3969fd7SSudheer Mogilappagari u32 reg; 11997c4bc1f5SAnirudh Venkataramanan int i; 1200b3969fd7SSudheer Mogilappagari 12018d7189d2SMd Fahad Iqbal Polash if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) 1202b3969fd7SSudheer Mogilappagari return; 1203b3969fd7SSudheer Mogilappagari 1204b3969fd7SSudheer Mogilappagari /* find what triggered the MDD event */ 1205b3969fd7SSudheer Mogilappagari reg = rd32(hw, GL_MDET_TX_PQM); 1206b3969fd7SSudheer Mogilappagari if (reg & GL_MDET_TX_PQM_VALID_M) { 1207b3969fd7SSudheer Mogilappagari u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1208b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_PF_NUM_S; 1209b3969fd7SSudheer Mogilappagari u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1210b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_VF_NUM_S; 1211b3969fd7SSudheer Mogilappagari u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1212b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_MAL_TYPE_S; 1213b3969fd7SSudheer Mogilappagari u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1214b3969fd7SSudheer Mogilappagari GL_MDET_TX_PQM_QNUM_S); 1215b3969fd7SSudheer Mogilappagari 1216b3969fd7SSudheer Mogilappagari if (netif_msg_tx_err(pf)) 12174015d11eSBrett Creeley dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1218b3969fd7SSudheer Mogilappagari event, queue, pf_num, vf_num); 1219b3969fd7SSudheer Mogilappagari wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1220b3969fd7SSudheer Mogilappagari mdd_detected = true; 1221b3969fd7SSudheer Mogilappagari } 1222b3969fd7SSudheer Mogilappagari 1223b3969fd7SSudheer Mogilappagari reg = rd32(hw, GL_MDET_TX_TCLAN); 1224b3969fd7SSudheer Mogilappagari if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1225b3969fd7SSudheer Mogilappagari u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1226b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_PF_NUM_S; 1227b3969fd7SSudheer Mogilappagari u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1228b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_VF_NUM_S; 1229b3969fd7SSudheer Mogilappagari u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1230b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_MAL_TYPE_S; 1231b3969fd7SSudheer Mogilappagari u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1232b3969fd7SSudheer Mogilappagari GL_MDET_TX_TCLAN_QNUM_S); 1233b3969fd7SSudheer Mogilappagari 12341d8bd992SBen Shelton if (netif_msg_tx_err(pf)) 12354015d11eSBrett Creeley dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1236b3969fd7SSudheer Mogilappagari event, queue, pf_num, vf_num); 1237b3969fd7SSudheer Mogilappagari wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1238b3969fd7SSudheer Mogilappagari mdd_detected = true; 1239b3969fd7SSudheer Mogilappagari } 1240b3969fd7SSudheer Mogilappagari 1241b3969fd7SSudheer Mogilappagari reg = rd32(hw, GL_MDET_RX); 1242b3969fd7SSudheer Mogilappagari if (reg & GL_MDET_RX_VALID_M) { 1243b3969fd7SSudheer Mogilappagari u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1244b3969fd7SSudheer Mogilappagari GL_MDET_RX_PF_NUM_S; 1245b3969fd7SSudheer Mogilappagari u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1246b3969fd7SSudheer Mogilappagari GL_MDET_RX_VF_NUM_S; 1247b3969fd7SSudheer Mogilappagari u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1248b3969fd7SSudheer Mogilappagari GL_MDET_RX_MAL_TYPE_S; 1249b3969fd7SSudheer Mogilappagari u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1250b3969fd7SSudheer Mogilappagari GL_MDET_RX_QNUM_S); 1251b3969fd7SSudheer Mogilappagari 1252b3969fd7SSudheer Mogilappagari if (netif_msg_rx_err(pf)) 12534015d11eSBrett Creeley dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1254b3969fd7SSudheer Mogilappagari event, queue, pf_num, vf_num); 1255b3969fd7SSudheer Mogilappagari wr32(hw, GL_MDET_RX, 0xffffffff); 1256b3969fd7SSudheer Mogilappagari mdd_detected = true; 1257b3969fd7SSudheer Mogilappagari } 1258b3969fd7SSudheer Mogilappagari 1259b3969fd7SSudheer Mogilappagari if (mdd_detected) { 1260b3969fd7SSudheer Mogilappagari bool pf_mdd_detected = false; 1261b3969fd7SSudheer Mogilappagari 1262b3969fd7SSudheer Mogilappagari reg = rd32(hw, PF_MDET_TX_PQM); 1263b3969fd7SSudheer Mogilappagari if (reg & PF_MDET_TX_PQM_VALID_M) { 1264b3969fd7SSudheer Mogilappagari wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 12654015d11eSBrett Creeley dev_info(dev, "TX driver issue detected, PF reset issued\n"); 1266b3969fd7SSudheer Mogilappagari pf_mdd_detected = true; 1267b3969fd7SSudheer Mogilappagari } 1268b3969fd7SSudheer Mogilappagari 1269b3969fd7SSudheer Mogilappagari reg = rd32(hw, PF_MDET_TX_TCLAN); 1270b3969fd7SSudheer Mogilappagari if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1271b3969fd7SSudheer Mogilappagari wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 12724015d11eSBrett Creeley dev_info(dev, "TX driver issue detected, PF reset issued\n"); 1273b3969fd7SSudheer Mogilappagari pf_mdd_detected = true; 1274b3969fd7SSudheer Mogilappagari } 1275b3969fd7SSudheer Mogilappagari 1276b3969fd7SSudheer Mogilappagari reg = rd32(hw, PF_MDET_RX); 1277b3969fd7SSudheer Mogilappagari if (reg & PF_MDET_RX_VALID_M) { 1278b3969fd7SSudheer Mogilappagari wr32(hw, PF_MDET_RX, 0xFFFF); 12794015d11eSBrett Creeley dev_info(dev, "RX driver issue detected, PF reset issued\n"); 1280b3969fd7SSudheer Mogilappagari pf_mdd_detected = true; 1281b3969fd7SSudheer Mogilappagari } 1282b3969fd7SSudheer Mogilappagari /* Queue belongs to the PF initiate a reset */ 1283b3969fd7SSudheer Mogilappagari if (pf_mdd_detected) { 1284b3969fd7SSudheer Mogilappagari set_bit(__ICE_NEEDS_RESTART, pf->state); 1285b3969fd7SSudheer Mogilappagari ice_service_task_schedule(pf); 1286b3969fd7SSudheer Mogilappagari } 1287b3969fd7SSudheer Mogilappagari } 1288b3969fd7SSudheer Mogilappagari 128923c01122SMitch Williams /* check to see if one of the VFs caused the MDD */ 1290005881bcSBrett Creeley ice_for_each_vf(pf, i) { 12917c4bc1f5SAnirudh Venkataramanan struct ice_vf *vf = &pf->vf[i]; 12927c4bc1f5SAnirudh Venkataramanan 129323c01122SMitch Williams bool vf_mdd_detected = false; 1294a52db6b2SMichal Swiatkowski 12957c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_TX_PQM(i)); 12967c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_TX_PQM_VALID_M) { 12977c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 129823c01122SMitch Williams vf_mdd_detected = true; 12994015d11eSBrett Creeley dev_info(dev, "TX driver issue detected on VF %d\n", 13007c4bc1f5SAnirudh Venkataramanan i); 13017c4bc1f5SAnirudh Venkataramanan } 13027c4bc1f5SAnirudh Venkataramanan 13037c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 13047c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_TX_TCLAN_VALID_M) { 13057c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 130623c01122SMitch Williams vf_mdd_detected = true; 13074015d11eSBrett Creeley dev_info(dev, "TX driver issue detected on VF %d\n", 13087c4bc1f5SAnirudh Venkataramanan i); 13097c4bc1f5SAnirudh Venkataramanan } 13107c4bc1f5SAnirudh Venkataramanan 13117c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_TX_TDPU(i)); 13127c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_TX_TDPU_VALID_M) { 13137c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 131423c01122SMitch Williams vf_mdd_detected = true; 13154015d11eSBrett Creeley dev_info(dev, "TX driver issue detected on VF %d\n", 13167c4bc1f5SAnirudh Venkataramanan i); 13177c4bc1f5SAnirudh Venkataramanan } 13187c4bc1f5SAnirudh Venkataramanan 13197c4bc1f5SAnirudh Venkataramanan reg = rd32(hw, VP_MDET_RX(i)); 13207c4bc1f5SAnirudh Venkataramanan if (reg & VP_MDET_RX_VALID_M) { 13217c4bc1f5SAnirudh Venkataramanan wr32(hw, VP_MDET_RX(i), 0xFFFF); 132223c01122SMitch Williams vf_mdd_detected = true; 13234015d11eSBrett Creeley dev_info(dev, "RX driver issue detected on VF %d\n", 13247c4bc1f5SAnirudh Venkataramanan i); 13257c4bc1f5SAnirudh Venkataramanan } 13267c4bc1f5SAnirudh Venkataramanan 132723c01122SMitch Williams if (vf_mdd_detected) { 1328a52db6b2SMichal Swiatkowski vf->num_mdd_events++; 1329e63a1dbdSAkeem G Abodunrin if (vf->num_mdd_events && 1330e63a1dbdSAkeem G Abodunrin vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) 133119cce2c6SAnirudh Venkataramanan dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", 133223c01122SMitch Williams i, vf->num_mdd_events); 13337c4bc1f5SAnirudh Venkataramanan } 13347c4bc1f5SAnirudh Venkataramanan } 1335b3969fd7SSudheer Mogilappagari } 1336b3969fd7SSudheer Mogilappagari 1337b3969fd7SSudheer Mogilappagari /** 13386d599946STony Nguyen * ice_force_phys_link_state - Force the physical link state 13396d599946STony Nguyen * @vsi: VSI to force the physical link state to up/down 13406d599946STony Nguyen * @link_up: true/false indicates to set the physical link to up/down 13416d599946STony Nguyen * 13426d599946STony Nguyen * Force the physical link state by getting the current PHY capabilities from 13436d599946STony Nguyen * hardware and setting the PHY config based on the determined capabilities. If 13446d599946STony Nguyen * link changes a link event will be triggered because both the Enable Automatic 13456d599946STony Nguyen * Link Update and LESM Enable bits are set when setting the PHY capabilities. 13466d599946STony Nguyen * 13476d599946STony Nguyen * Returns 0 on success, negative on failure 13486d599946STony Nguyen */ 13496d599946STony Nguyen static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 13506d599946STony Nguyen { 13516d599946STony Nguyen struct ice_aqc_get_phy_caps_data *pcaps; 13526d599946STony Nguyen struct ice_aqc_set_phy_cfg_data *cfg; 13536d599946STony Nguyen struct ice_port_info *pi; 13546d599946STony Nguyen struct device *dev; 13556d599946STony Nguyen int retcode; 13566d599946STony Nguyen 13576d599946STony Nguyen if (!vsi || !vsi->port_info || !vsi->back) 13586d599946STony Nguyen return -EINVAL; 13596d599946STony Nguyen if (vsi->type != ICE_VSI_PF) 13606d599946STony Nguyen return 0; 13616d599946STony Nguyen 13629a946843SAnirudh Venkataramanan dev = ice_pf_to_dev(vsi->back); 13636d599946STony Nguyen 13646d599946STony Nguyen pi = vsi->port_info; 13656d599946STony Nguyen 13669efe35d0STony Nguyen pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 13676d599946STony Nguyen if (!pcaps) 13686d599946STony Nguyen return -ENOMEM; 13696d599946STony Nguyen 13706d599946STony Nguyen retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 13716d599946STony Nguyen NULL); 13726d599946STony Nguyen if (retcode) { 137319cce2c6SAnirudh Venkataramanan dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 13746d599946STony Nguyen vsi->vsi_num, retcode); 13756d599946STony Nguyen retcode = -EIO; 13766d599946STony Nguyen goto out; 13776d599946STony Nguyen } 13786d599946STony Nguyen 13796d599946STony Nguyen /* No change in link */ 13806d599946STony Nguyen if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 13816d599946STony Nguyen link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 13826d599946STony Nguyen goto out; 13836d599946STony Nguyen 13849efe35d0STony Nguyen cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 13856d599946STony Nguyen if (!cfg) { 13866d599946STony Nguyen retcode = -ENOMEM; 13876d599946STony Nguyen goto out; 13886d599946STony Nguyen } 13896d599946STony Nguyen 13906d599946STony Nguyen cfg->phy_type_low = pcaps->phy_type_low; 13916d599946STony Nguyen cfg->phy_type_high = pcaps->phy_type_high; 13926d599946STony Nguyen cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 13936d599946STony Nguyen cfg->low_power_ctrl = pcaps->low_power_ctrl; 13946d599946STony Nguyen cfg->eee_cap = pcaps->eee_cap; 13956d599946STony Nguyen cfg->eeer_value = pcaps->eeer_value; 13966d599946STony Nguyen cfg->link_fec_opt = pcaps->link_fec_options; 13976d599946STony Nguyen if (link_up) 13986d599946STony Nguyen cfg->caps |= ICE_AQ_PHY_ENA_LINK; 13996d599946STony Nguyen else 14006d599946STony Nguyen cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 14016d599946STony Nguyen 14026d599946STony Nguyen retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); 14036d599946STony Nguyen if (retcode) { 14046d599946STony Nguyen dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 14056d599946STony Nguyen vsi->vsi_num, retcode); 14066d599946STony Nguyen retcode = -EIO; 14076d599946STony Nguyen } 14086d599946STony Nguyen 14099efe35d0STony Nguyen kfree(cfg); 14106d599946STony Nguyen out: 14119efe35d0STony Nguyen kfree(pcaps); 14126d599946STony Nguyen return retcode; 14136d599946STony Nguyen } 14146d599946STony Nguyen 14156d599946STony Nguyen /** 14166d599946STony Nguyen * ice_check_media_subtask - Check for media; bring link up if detected. 14176d599946STony Nguyen * @pf: pointer to PF struct 14186d599946STony Nguyen */ 14196d599946STony Nguyen static void ice_check_media_subtask(struct ice_pf *pf) 14206d599946STony Nguyen { 14216d599946STony Nguyen struct ice_port_info *pi; 14226d599946STony Nguyen struct ice_vsi *vsi; 14236d599946STony Nguyen int err; 14246d599946STony Nguyen 1425208ff751SAnirudh Venkataramanan vsi = ice_get_main_vsi(pf); 14266d599946STony Nguyen if (!vsi) 14276d599946STony Nguyen return; 14286d599946STony Nguyen 14296d599946STony Nguyen /* No need to check for media if it's already present or the interface 14306d599946STony Nguyen * is down 14316d599946STony Nguyen */ 14326d599946STony Nguyen if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || 14336d599946STony Nguyen test_bit(__ICE_DOWN, vsi->state)) 14346d599946STony Nguyen return; 14356d599946STony Nguyen 14366d599946STony Nguyen /* Refresh link info and check if media is present */ 14376d599946STony Nguyen pi = vsi->port_info; 14386d599946STony Nguyen err = ice_update_link_info(pi); 14396d599946STony Nguyen if (err) 14406d599946STony Nguyen return; 14416d599946STony Nguyen 14426d599946STony Nguyen if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 14436d599946STony Nguyen err = ice_force_phys_link_state(vsi, true); 14446d599946STony Nguyen if (err) 14456d599946STony Nguyen return; 14466d599946STony Nguyen clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 14476d599946STony Nguyen 14486d599946STony Nguyen /* A Link Status Event will be generated; the event handler 14496d599946STony Nguyen * will complete bringing the interface up 14506d599946STony Nguyen */ 14516d599946STony Nguyen } 14526d599946STony Nguyen } 14536d599946STony Nguyen 14546d599946STony Nguyen /** 1455940b61afSAnirudh Venkataramanan * ice_service_task - manage and run subtasks 1456940b61afSAnirudh Venkataramanan * @work: pointer to work_struct contained by the PF struct 1457940b61afSAnirudh Venkataramanan */ 1458940b61afSAnirudh Venkataramanan static void ice_service_task(struct work_struct *work) 1459940b61afSAnirudh Venkataramanan { 1460940b61afSAnirudh Venkataramanan struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 1461940b61afSAnirudh Venkataramanan unsigned long start_time = jiffies; 1462940b61afSAnirudh Venkataramanan 1463940b61afSAnirudh Venkataramanan /* subtasks */ 14640b28b702SAnirudh Venkataramanan 14650b28b702SAnirudh Venkataramanan /* process reset requests first */ 14660b28b702SAnirudh Venkataramanan ice_reset_subtask(pf); 14670b28b702SAnirudh Venkataramanan 14680f9d5027SAnirudh Venkataramanan /* bail if a reset/recovery cycle is pending or rebuild failed */ 14695df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state) || 14700f9d5027SAnirudh Venkataramanan test_bit(__ICE_SUSPENDED, pf->state) || 14710f9d5027SAnirudh Venkataramanan test_bit(__ICE_NEEDS_RESTART, pf->state)) { 14720b28b702SAnirudh Venkataramanan ice_service_task_complete(pf); 14730b28b702SAnirudh Venkataramanan return; 14740b28b702SAnirudh Venkataramanan } 14750b28b702SAnirudh Venkataramanan 1476462acf6aSTony Nguyen ice_clean_adminq_subtask(pf); 14776d599946STony Nguyen ice_check_media_subtask(pf); 1478b3969fd7SSudheer Mogilappagari ice_check_for_hang_subtask(pf); 1479e94d4478SAnirudh Venkataramanan ice_sync_fltr_subtask(pf); 1480b3969fd7SSudheer Mogilappagari ice_handle_mdd_event(pf); 1481fcea6f3dSAnirudh Venkataramanan ice_watchdog_subtask(pf); 1482462acf6aSTony Nguyen 1483462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 1484462acf6aSTony Nguyen ice_service_task_complete(pf); 1485462acf6aSTony Nguyen return; 1486462acf6aSTony Nguyen } 1487462acf6aSTony Nguyen 1488462acf6aSTony Nguyen ice_process_vflr_event(pf); 148975d2b253SAnirudh Venkataramanan ice_clean_mailboxq_subtask(pf); 1490940b61afSAnirudh Venkataramanan 1491940b61afSAnirudh Venkataramanan /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1492940b61afSAnirudh Venkataramanan ice_service_task_complete(pf); 1493940b61afSAnirudh Venkataramanan 1494940b61afSAnirudh Venkataramanan /* If the tasks have taken longer than one service timer period 1495940b61afSAnirudh Venkataramanan * or there is more work to be done, reset the service timer to 1496940b61afSAnirudh Venkataramanan * schedule the service task now. 1497940b61afSAnirudh Venkataramanan */ 1498940b61afSAnirudh Venkataramanan if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 1499b3969fd7SSudheer Mogilappagari test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || 1500007676b4SAnirudh Venkataramanan test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || 150175d2b253SAnirudh Venkataramanan test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 1502940b61afSAnirudh Venkataramanan test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1503940b61afSAnirudh Venkataramanan mod_timer(&pf->serv_tmr, jiffies); 1504940b61afSAnirudh Venkataramanan } 1505940b61afSAnirudh Venkataramanan 1506837f08fdSAnirudh Venkataramanan /** 1507f31e4b6fSAnirudh Venkataramanan * ice_set_ctrlq_len - helper function to set controlq length 1508f9867df6SAnirudh Venkataramanan * @hw: pointer to the HW instance 1509f31e4b6fSAnirudh Venkataramanan */ 1510f31e4b6fSAnirudh Venkataramanan static void ice_set_ctrlq_len(struct ice_hw *hw) 1511f31e4b6fSAnirudh Venkataramanan { 1512f31e4b6fSAnirudh Venkataramanan hw->adminq.num_rq_entries = ICE_AQ_LEN; 1513f31e4b6fSAnirudh Venkataramanan hw->adminq.num_sq_entries = ICE_AQ_LEN; 1514f31e4b6fSAnirudh Venkataramanan hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 1515f31e4b6fSAnirudh Venkataramanan hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 151611836214SBrett Creeley hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; 151711836214SBrett Creeley hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 151875d2b253SAnirudh Venkataramanan hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 151975d2b253SAnirudh Venkataramanan hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1520f31e4b6fSAnirudh Venkataramanan } 1521f31e4b6fSAnirudh Venkataramanan 1522f31e4b6fSAnirudh Venkataramanan /** 152387324e74SHenry Tieman * ice_schedule_reset - schedule a reset 152487324e74SHenry Tieman * @pf: board private structure 152587324e74SHenry Tieman * @reset: reset being requested 152687324e74SHenry Tieman */ 152787324e74SHenry Tieman int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 152887324e74SHenry Tieman { 152987324e74SHenry Tieman struct device *dev = ice_pf_to_dev(pf); 153087324e74SHenry Tieman 153187324e74SHenry Tieman /* bail out if earlier reset has failed */ 153287324e74SHenry Tieman if (test_bit(__ICE_RESET_FAILED, pf->state)) { 153387324e74SHenry Tieman dev_dbg(dev, "earlier reset has failed\n"); 153487324e74SHenry Tieman return -EIO; 153587324e74SHenry Tieman } 153687324e74SHenry Tieman /* bail if reset/recovery already in progress */ 153787324e74SHenry Tieman if (ice_is_reset_in_progress(pf->state)) { 153887324e74SHenry Tieman dev_dbg(dev, "Reset already in progress\n"); 153987324e74SHenry Tieman return -EBUSY; 154087324e74SHenry Tieman } 154187324e74SHenry Tieman 154287324e74SHenry Tieman switch (reset) { 154387324e74SHenry Tieman case ICE_RESET_PFR: 154487324e74SHenry Tieman set_bit(__ICE_PFR_REQ, pf->state); 154587324e74SHenry Tieman break; 154687324e74SHenry Tieman case ICE_RESET_CORER: 154787324e74SHenry Tieman set_bit(__ICE_CORER_REQ, pf->state); 154887324e74SHenry Tieman break; 154987324e74SHenry Tieman case ICE_RESET_GLOBR: 155087324e74SHenry Tieman set_bit(__ICE_GLOBR_REQ, pf->state); 155187324e74SHenry Tieman break; 155287324e74SHenry Tieman default: 155387324e74SHenry Tieman return -EINVAL; 155487324e74SHenry Tieman } 155587324e74SHenry Tieman 155687324e74SHenry Tieman ice_service_task_schedule(pf); 155787324e74SHenry Tieman return 0; 155887324e74SHenry Tieman } 155987324e74SHenry Tieman 156087324e74SHenry Tieman /** 1561cdedef59SAnirudh Venkataramanan * ice_irq_affinity_notify - Callback for affinity changes 1562cdedef59SAnirudh Venkataramanan * @notify: context as to what irq was changed 1563cdedef59SAnirudh Venkataramanan * @mask: the new affinity mask 1564cdedef59SAnirudh Venkataramanan * 1565cdedef59SAnirudh Venkataramanan * This is a callback function used by the irq_set_affinity_notifier function 1566cdedef59SAnirudh Venkataramanan * so that we may register to receive changes to the irq affinity masks. 1567cdedef59SAnirudh Venkataramanan */ 1568c8b7abddSBruce Allan static void 1569c8b7abddSBruce Allan ice_irq_affinity_notify(struct irq_affinity_notify *notify, 1570cdedef59SAnirudh Venkataramanan const cpumask_t *mask) 1571cdedef59SAnirudh Venkataramanan { 1572cdedef59SAnirudh Venkataramanan struct ice_q_vector *q_vector = 1573cdedef59SAnirudh Venkataramanan container_of(notify, struct ice_q_vector, affinity_notify); 1574cdedef59SAnirudh Venkataramanan 1575cdedef59SAnirudh Venkataramanan cpumask_copy(&q_vector->affinity_mask, mask); 1576cdedef59SAnirudh Venkataramanan } 1577cdedef59SAnirudh Venkataramanan 1578cdedef59SAnirudh Venkataramanan /** 1579cdedef59SAnirudh Venkataramanan * ice_irq_affinity_release - Callback for affinity notifier release 1580cdedef59SAnirudh Venkataramanan * @ref: internal core kernel usage 1581cdedef59SAnirudh Venkataramanan * 1582cdedef59SAnirudh Venkataramanan * This is a callback function used by the irq_set_affinity_notifier function 1583cdedef59SAnirudh Venkataramanan * to inform the current notification subscriber that they will no longer 1584cdedef59SAnirudh Venkataramanan * receive notifications. 1585cdedef59SAnirudh Venkataramanan */ 1586cdedef59SAnirudh Venkataramanan static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 1587cdedef59SAnirudh Venkataramanan 1588cdedef59SAnirudh Venkataramanan /** 1589cdedef59SAnirudh Venkataramanan * ice_vsi_ena_irq - Enable IRQ for the given VSI 1590cdedef59SAnirudh Venkataramanan * @vsi: the VSI being configured 1591cdedef59SAnirudh Venkataramanan */ 1592cdedef59SAnirudh Venkataramanan static int ice_vsi_ena_irq(struct ice_vsi *vsi) 1593cdedef59SAnirudh Venkataramanan { 1594ba880734SBrett Creeley struct ice_hw *hw = &vsi->back->hw; 1595cdedef59SAnirudh Venkataramanan int i; 1596cdedef59SAnirudh Venkataramanan 15970c2561c8SBrett Creeley ice_for_each_q_vector(vsi, i) 1598cdedef59SAnirudh Venkataramanan ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 1599cdedef59SAnirudh Venkataramanan 1600cdedef59SAnirudh Venkataramanan ice_flush(hw); 1601cdedef59SAnirudh Venkataramanan return 0; 1602cdedef59SAnirudh Venkataramanan } 1603cdedef59SAnirudh Venkataramanan 1604cdedef59SAnirudh Venkataramanan /** 1605cdedef59SAnirudh Venkataramanan * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 1606cdedef59SAnirudh Venkataramanan * @vsi: the VSI being configured 1607cdedef59SAnirudh Venkataramanan * @basename: name for the vector 1608cdedef59SAnirudh Venkataramanan */ 1609cdedef59SAnirudh Venkataramanan static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 1610cdedef59SAnirudh Venkataramanan { 1611cdedef59SAnirudh Venkataramanan int q_vectors = vsi->num_q_vectors; 1612cdedef59SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 1613cbe66bfeSBrett Creeley int base = vsi->base_vector; 16144015d11eSBrett Creeley struct device *dev; 1615cdedef59SAnirudh Venkataramanan int rx_int_idx = 0; 1616cdedef59SAnirudh Venkataramanan int tx_int_idx = 0; 1617cdedef59SAnirudh Venkataramanan int vector, err; 1618cdedef59SAnirudh Venkataramanan int irq_num; 1619cdedef59SAnirudh Venkataramanan 16204015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 1621cdedef59SAnirudh Venkataramanan for (vector = 0; vector < q_vectors; vector++) { 1622cdedef59SAnirudh Venkataramanan struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 1623cdedef59SAnirudh Venkataramanan 1624cdedef59SAnirudh Venkataramanan irq_num = pf->msix_entries[base + vector].vector; 1625cdedef59SAnirudh Venkataramanan 1626cdedef59SAnirudh Venkataramanan if (q_vector->tx.ring && q_vector->rx.ring) { 1627cdedef59SAnirudh Venkataramanan snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1628cdedef59SAnirudh Venkataramanan "%s-%s-%d", basename, "TxRx", rx_int_idx++); 1629cdedef59SAnirudh Venkataramanan tx_int_idx++; 1630cdedef59SAnirudh Venkataramanan } else if (q_vector->rx.ring) { 1631cdedef59SAnirudh Venkataramanan snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1632cdedef59SAnirudh Venkataramanan "%s-%s-%d", basename, "rx", rx_int_idx++); 1633cdedef59SAnirudh Venkataramanan } else if (q_vector->tx.ring) { 1634cdedef59SAnirudh Venkataramanan snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1635cdedef59SAnirudh Venkataramanan "%s-%s-%d", basename, "tx", tx_int_idx++); 1636cdedef59SAnirudh Venkataramanan } else { 1637cdedef59SAnirudh Venkataramanan /* skip this unused q_vector */ 1638cdedef59SAnirudh Venkataramanan continue; 1639cdedef59SAnirudh Venkataramanan } 16404015d11eSBrett Creeley err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, 16418d051b8bSAlan Brady q_vector->name, q_vector); 1642cdedef59SAnirudh Venkataramanan if (err) { 164319cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 164419cce2c6SAnirudh Venkataramanan err); 1645cdedef59SAnirudh Venkataramanan goto free_q_irqs; 1646cdedef59SAnirudh Venkataramanan } 1647cdedef59SAnirudh Venkataramanan 1648cdedef59SAnirudh Venkataramanan /* register for affinity change notifications */ 1649cdedef59SAnirudh Venkataramanan q_vector->affinity_notify.notify = ice_irq_affinity_notify; 1650cdedef59SAnirudh Venkataramanan q_vector->affinity_notify.release = ice_irq_affinity_release; 1651cdedef59SAnirudh Venkataramanan irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 1652cdedef59SAnirudh Venkataramanan 1653cdedef59SAnirudh Venkataramanan /* assign the mask for this irq */ 1654cdedef59SAnirudh Venkataramanan irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 1655cdedef59SAnirudh Venkataramanan } 1656cdedef59SAnirudh Venkataramanan 1657cdedef59SAnirudh Venkataramanan vsi->irqs_ready = true; 1658cdedef59SAnirudh Venkataramanan return 0; 1659cdedef59SAnirudh Venkataramanan 1660cdedef59SAnirudh Venkataramanan free_q_irqs: 1661cdedef59SAnirudh Venkataramanan while (vector) { 1662cdedef59SAnirudh Venkataramanan vector--; 1663cdedef59SAnirudh Venkataramanan irq_num = pf->msix_entries[base + vector].vector, 1664cdedef59SAnirudh Venkataramanan irq_set_affinity_notifier(irq_num, NULL); 1665cdedef59SAnirudh Venkataramanan irq_set_affinity_hint(irq_num, NULL); 16664015d11eSBrett Creeley devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 1667cdedef59SAnirudh Venkataramanan } 1668cdedef59SAnirudh Venkataramanan return err; 1669cdedef59SAnirudh Venkataramanan } 1670cdedef59SAnirudh Venkataramanan 1671cdedef59SAnirudh Venkataramanan /** 1672efc2214bSMaciej Fijalkowski * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 1673efc2214bSMaciej Fijalkowski * @vsi: VSI to setup Tx rings used by XDP 1674efc2214bSMaciej Fijalkowski * 1675efc2214bSMaciej Fijalkowski * Return 0 on success and negative value on error 1676efc2214bSMaciej Fijalkowski */ 1677efc2214bSMaciej Fijalkowski static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 1678efc2214bSMaciej Fijalkowski { 16799a946843SAnirudh Venkataramanan struct device *dev = ice_pf_to_dev(vsi->back); 1680efc2214bSMaciej Fijalkowski int i; 1681efc2214bSMaciej Fijalkowski 1682efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->num_xdp_txq; i++) { 1683efc2214bSMaciej Fijalkowski u16 xdp_q_idx = vsi->alloc_txq + i; 1684efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 1685efc2214bSMaciej Fijalkowski 1686efc2214bSMaciej Fijalkowski xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 1687efc2214bSMaciej Fijalkowski 1688efc2214bSMaciej Fijalkowski if (!xdp_ring) 1689efc2214bSMaciej Fijalkowski goto free_xdp_rings; 1690efc2214bSMaciej Fijalkowski 1691efc2214bSMaciej Fijalkowski xdp_ring->q_index = xdp_q_idx; 1692efc2214bSMaciej Fijalkowski xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 1693efc2214bSMaciej Fijalkowski xdp_ring->ring_active = false; 1694efc2214bSMaciej Fijalkowski xdp_ring->vsi = vsi; 1695efc2214bSMaciej Fijalkowski xdp_ring->netdev = NULL; 1696efc2214bSMaciej Fijalkowski xdp_ring->dev = dev; 1697efc2214bSMaciej Fijalkowski xdp_ring->count = vsi->num_tx_desc; 1698efc2214bSMaciej Fijalkowski vsi->xdp_rings[i] = xdp_ring; 1699efc2214bSMaciej Fijalkowski if (ice_setup_tx_ring(xdp_ring)) 1700efc2214bSMaciej Fijalkowski goto free_xdp_rings; 1701efc2214bSMaciej Fijalkowski ice_set_ring_xdp(xdp_ring); 17022d4238f5SKrzysztof Kazimierczak xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring); 1703efc2214bSMaciej Fijalkowski } 1704efc2214bSMaciej Fijalkowski 1705efc2214bSMaciej Fijalkowski return 0; 1706efc2214bSMaciej Fijalkowski 1707efc2214bSMaciej Fijalkowski free_xdp_rings: 1708efc2214bSMaciej Fijalkowski for (; i >= 0; i--) 1709efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 1710efc2214bSMaciej Fijalkowski ice_free_tx_ring(vsi->xdp_rings[i]); 1711efc2214bSMaciej Fijalkowski return -ENOMEM; 1712efc2214bSMaciej Fijalkowski } 1713efc2214bSMaciej Fijalkowski 1714efc2214bSMaciej Fijalkowski /** 1715efc2214bSMaciej Fijalkowski * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 1716efc2214bSMaciej Fijalkowski * @vsi: VSI to set the bpf prog on 1717efc2214bSMaciej Fijalkowski * @prog: the bpf prog pointer 1718efc2214bSMaciej Fijalkowski */ 1719efc2214bSMaciej Fijalkowski static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 1720efc2214bSMaciej Fijalkowski { 1721efc2214bSMaciej Fijalkowski struct bpf_prog *old_prog; 1722efc2214bSMaciej Fijalkowski int i; 1723efc2214bSMaciej Fijalkowski 1724efc2214bSMaciej Fijalkowski old_prog = xchg(&vsi->xdp_prog, prog); 1725efc2214bSMaciej Fijalkowski if (old_prog) 1726efc2214bSMaciej Fijalkowski bpf_prog_put(old_prog); 1727efc2214bSMaciej Fijalkowski 1728efc2214bSMaciej Fijalkowski ice_for_each_rxq(vsi, i) 1729efc2214bSMaciej Fijalkowski WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 1730efc2214bSMaciej Fijalkowski } 1731efc2214bSMaciej Fijalkowski 1732efc2214bSMaciej Fijalkowski /** 1733efc2214bSMaciej Fijalkowski * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 1734efc2214bSMaciej Fijalkowski * @vsi: VSI to bring up Tx rings used by XDP 1735efc2214bSMaciej Fijalkowski * @prog: bpf program that will be assigned to VSI 1736efc2214bSMaciej Fijalkowski * 1737efc2214bSMaciej Fijalkowski * Return 0 on success and negative value on error 1738efc2214bSMaciej Fijalkowski */ 1739efc2214bSMaciej Fijalkowski int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 1740efc2214bSMaciej Fijalkowski { 1741efc2214bSMaciej Fijalkowski u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 1742efc2214bSMaciej Fijalkowski int xdp_rings_rem = vsi->num_xdp_txq; 1743efc2214bSMaciej Fijalkowski struct ice_pf *pf = vsi->back; 1744efc2214bSMaciej Fijalkowski struct ice_qs_cfg xdp_qs_cfg = { 1745efc2214bSMaciej Fijalkowski .qs_mutex = &pf->avail_q_mutex, 1746efc2214bSMaciej Fijalkowski .pf_map = pf->avail_txqs, 1747efc2214bSMaciej Fijalkowski .pf_map_size = pf->max_pf_txqs, 1748efc2214bSMaciej Fijalkowski .q_count = vsi->num_xdp_txq, 1749efc2214bSMaciej Fijalkowski .scatter_count = ICE_MAX_SCATTER_TXQS, 1750efc2214bSMaciej Fijalkowski .vsi_map = vsi->txq_map, 1751efc2214bSMaciej Fijalkowski .vsi_map_offset = vsi->alloc_txq, 1752efc2214bSMaciej Fijalkowski .mapping_mode = ICE_VSI_MAP_CONTIG 1753efc2214bSMaciej Fijalkowski }; 1754efc2214bSMaciej Fijalkowski enum ice_status status; 17554015d11eSBrett Creeley struct device *dev; 1756efc2214bSMaciej Fijalkowski int i, v_idx; 1757efc2214bSMaciej Fijalkowski 17584015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 17594015d11eSBrett Creeley vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 1760efc2214bSMaciej Fijalkowski sizeof(*vsi->xdp_rings), GFP_KERNEL); 1761efc2214bSMaciej Fijalkowski if (!vsi->xdp_rings) 1762efc2214bSMaciej Fijalkowski return -ENOMEM; 1763efc2214bSMaciej Fijalkowski 1764efc2214bSMaciej Fijalkowski vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 1765efc2214bSMaciej Fijalkowski if (__ice_vsi_get_qs(&xdp_qs_cfg)) 1766efc2214bSMaciej Fijalkowski goto err_map_xdp; 1767efc2214bSMaciej Fijalkowski 1768efc2214bSMaciej Fijalkowski if (ice_xdp_alloc_setup_rings(vsi)) 1769efc2214bSMaciej Fijalkowski goto clear_xdp_rings; 1770efc2214bSMaciej Fijalkowski 1771efc2214bSMaciej Fijalkowski /* follow the logic from ice_vsi_map_rings_to_vectors */ 1772efc2214bSMaciej Fijalkowski ice_for_each_q_vector(vsi, v_idx) { 1773efc2214bSMaciej Fijalkowski struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 1774efc2214bSMaciej Fijalkowski int xdp_rings_per_v, q_id, q_base; 1775efc2214bSMaciej Fijalkowski 1776efc2214bSMaciej Fijalkowski xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 1777efc2214bSMaciej Fijalkowski vsi->num_q_vectors - v_idx); 1778efc2214bSMaciej Fijalkowski q_base = vsi->num_xdp_txq - xdp_rings_rem; 1779efc2214bSMaciej Fijalkowski 1780efc2214bSMaciej Fijalkowski for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 1781efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; 1782efc2214bSMaciej Fijalkowski 1783efc2214bSMaciej Fijalkowski xdp_ring->q_vector = q_vector; 1784efc2214bSMaciej Fijalkowski xdp_ring->next = q_vector->tx.ring; 1785efc2214bSMaciej Fijalkowski q_vector->tx.ring = xdp_ring; 1786efc2214bSMaciej Fijalkowski } 1787efc2214bSMaciej Fijalkowski xdp_rings_rem -= xdp_rings_per_v; 1788efc2214bSMaciej Fijalkowski } 1789efc2214bSMaciej Fijalkowski 1790efc2214bSMaciej Fijalkowski /* omit the scheduler update if in reset path; XDP queues will be 1791efc2214bSMaciej Fijalkowski * taken into account at the end of ice_vsi_rebuild, where 1792efc2214bSMaciej Fijalkowski * ice_cfg_vsi_lan is being called 1793efc2214bSMaciej Fijalkowski */ 1794efc2214bSMaciej Fijalkowski if (ice_is_reset_in_progress(pf->state)) 1795efc2214bSMaciej Fijalkowski return 0; 1796efc2214bSMaciej Fijalkowski 1797efc2214bSMaciej Fijalkowski /* tell the Tx scheduler that right now we have 1798efc2214bSMaciej Fijalkowski * additional queues 1799efc2214bSMaciej Fijalkowski */ 1800efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->tc_cfg.numtc; i++) 1801efc2214bSMaciej Fijalkowski max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 1802efc2214bSMaciej Fijalkowski 1803efc2214bSMaciej Fijalkowski status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 1804efc2214bSMaciej Fijalkowski max_txqs); 1805efc2214bSMaciej Fijalkowski if (status) { 18064015d11eSBrett Creeley dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n", 1807efc2214bSMaciej Fijalkowski status); 1808efc2214bSMaciej Fijalkowski goto clear_xdp_rings; 1809efc2214bSMaciej Fijalkowski } 1810efc2214bSMaciej Fijalkowski ice_vsi_assign_bpf_prog(vsi, prog); 1811efc2214bSMaciej Fijalkowski 1812efc2214bSMaciej Fijalkowski return 0; 1813efc2214bSMaciej Fijalkowski clear_xdp_rings: 1814efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->num_xdp_txq; i++) 1815efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i]) { 1816efc2214bSMaciej Fijalkowski kfree_rcu(vsi->xdp_rings[i], rcu); 1817efc2214bSMaciej Fijalkowski vsi->xdp_rings[i] = NULL; 1818efc2214bSMaciej Fijalkowski } 1819efc2214bSMaciej Fijalkowski 1820efc2214bSMaciej Fijalkowski err_map_xdp: 1821efc2214bSMaciej Fijalkowski mutex_lock(&pf->avail_q_mutex); 1822efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->num_xdp_txq; i++) { 1823efc2214bSMaciej Fijalkowski clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 1824efc2214bSMaciej Fijalkowski vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 1825efc2214bSMaciej Fijalkowski } 1826efc2214bSMaciej Fijalkowski mutex_unlock(&pf->avail_q_mutex); 1827efc2214bSMaciej Fijalkowski 18284015d11eSBrett Creeley devm_kfree(dev, vsi->xdp_rings); 1829efc2214bSMaciej Fijalkowski return -ENOMEM; 1830efc2214bSMaciej Fijalkowski } 1831efc2214bSMaciej Fijalkowski 1832efc2214bSMaciej Fijalkowski /** 1833efc2214bSMaciej Fijalkowski * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 1834efc2214bSMaciej Fijalkowski * @vsi: VSI to remove XDP rings 1835efc2214bSMaciej Fijalkowski * 1836efc2214bSMaciej Fijalkowski * Detach XDP rings from irq vectors, clean up the PF bitmap and free 1837efc2214bSMaciej Fijalkowski * resources 1838efc2214bSMaciej Fijalkowski */ 1839efc2214bSMaciej Fijalkowski int ice_destroy_xdp_rings(struct ice_vsi *vsi) 1840efc2214bSMaciej Fijalkowski { 1841efc2214bSMaciej Fijalkowski u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 1842efc2214bSMaciej Fijalkowski struct ice_pf *pf = vsi->back; 1843efc2214bSMaciej Fijalkowski int i, v_idx; 1844efc2214bSMaciej Fijalkowski 1845efc2214bSMaciej Fijalkowski /* q_vectors are freed in reset path so there's no point in detaching 1846efc2214bSMaciej Fijalkowski * rings; in case of rebuild being triggered not from reset reset bits 1847efc2214bSMaciej Fijalkowski * in pf->state won't be set, so additionally check first q_vector 1848efc2214bSMaciej Fijalkowski * against NULL 1849efc2214bSMaciej Fijalkowski */ 1850efc2214bSMaciej Fijalkowski if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 1851efc2214bSMaciej Fijalkowski goto free_qmap; 1852efc2214bSMaciej Fijalkowski 1853efc2214bSMaciej Fijalkowski ice_for_each_q_vector(vsi, v_idx) { 1854efc2214bSMaciej Fijalkowski struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 1855efc2214bSMaciej Fijalkowski struct ice_ring *ring; 1856efc2214bSMaciej Fijalkowski 1857efc2214bSMaciej Fijalkowski ice_for_each_ring(ring, q_vector->tx) 1858efc2214bSMaciej Fijalkowski if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 1859efc2214bSMaciej Fijalkowski break; 1860efc2214bSMaciej Fijalkowski 1861efc2214bSMaciej Fijalkowski /* restore the value of last node prior to XDP setup */ 1862efc2214bSMaciej Fijalkowski q_vector->tx.ring = ring; 1863efc2214bSMaciej Fijalkowski } 1864efc2214bSMaciej Fijalkowski 1865efc2214bSMaciej Fijalkowski free_qmap: 1866efc2214bSMaciej Fijalkowski mutex_lock(&pf->avail_q_mutex); 1867efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->num_xdp_txq; i++) { 1868efc2214bSMaciej Fijalkowski clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 1869efc2214bSMaciej Fijalkowski vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 1870efc2214bSMaciej Fijalkowski } 1871efc2214bSMaciej Fijalkowski mutex_unlock(&pf->avail_q_mutex); 1872efc2214bSMaciej Fijalkowski 1873efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->num_xdp_txq; i++) 1874efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i]) { 1875efc2214bSMaciej Fijalkowski if (vsi->xdp_rings[i]->desc) 1876efc2214bSMaciej Fijalkowski ice_free_tx_ring(vsi->xdp_rings[i]); 1877efc2214bSMaciej Fijalkowski kfree_rcu(vsi->xdp_rings[i], rcu); 1878efc2214bSMaciej Fijalkowski vsi->xdp_rings[i] = NULL; 1879efc2214bSMaciej Fijalkowski } 1880efc2214bSMaciej Fijalkowski 18814015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 1882efc2214bSMaciej Fijalkowski vsi->xdp_rings = NULL; 1883efc2214bSMaciej Fijalkowski 1884efc2214bSMaciej Fijalkowski if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 1885efc2214bSMaciej Fijalkowski return 0; 1886efc2214bSMaciej Fijalkowski 1887efc2214bSMaciej Fijalkowski ice_vsi_assign_bpf_prog(vsi, NULL); 1888efc2214bSMaciej Fijalkowski 1889efc2214bSMaciej Fijalkowski /* notify Tx scheduler that we destroyed XDP queues and bring 1890efc2214bSMaciej Fijalkowski * back the old number of child nodes 1891efc2214bSMaciej Fijalkowski */ 1892efc2214bSMaciej Fijalkowski for (i = 0; i < vsi->tc_cfg.numtc; i++) 1893efc2214bSMaciej Fijalkowski max_txqs[i] = vsi->num_txq; 1894efc2214bSMaciej Fijalkowski 1895efc2214bSMaciej Fijalkowski return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 1896efc2214bSMaciej Fijalkowski max_txqs); 1897efc2214bSMaciej Fijalkowski } 1898efc2214bSMaciej Fijalkowski 1899efc2214bSMaciej Fijalkowski /** 1900efc2214bSMaciej Fijalkowski * ice_xdp_setup_prog - Add or remove XDP eBPF program 1901efc2214bSMaciej Fijalkowski * @vsi: VSI to setup XDP for 1902efc2214bSMaciej Fijalkowski * @prog: XDP program 1903efc2214bSMaciej Fijalkowski * @extack: netlink extended ack 1904efc2214bSMaciej Fijalkowski */ 1905efc2214bSMaciej Fijalkowski static int 1906efc2214bSMaciej Fijalkowski ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 1907efc2214bSMaciej Fijalkowski struct netlink_ext_ack *extack) 1908efc2214bSMaciej Fijalkowski { 1909efc2214bSMaciej Fijalkowski int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 1910efc2214bSMaciej Fijalkowski bool if_running = netif_running(vsi->netdev); 1911efc2214bSMaciej Fijalkowski int ret = 0, xdp_ring_err = 0; 1912efc2214bSMaciej Fijalkowski 1913efc2214bSMaciej Fijalkowski if (frame_size > vsi->rx_buf_len) { 1914efc2214bSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 1915efc2214bSMaciej Fijalkowski return -EOPNOTSUPP; 1916efc2214bSMaciej Fijalkowski } 1917efc2214bSMaciej Fijalkowski 1918efc2214bSMaciej Fijalkowski /* need to stop netdev while setting up the program for Rx rings */ 1919efc2214bSMaciej Fijalkowski if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { 1920efc2214bSMaciej Fijalkowski ret = ice_down(vsi); 1921efc2214bSMaciej Fijalkowski if (ret) { 1922efc2214bSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(extack, 1923efc2214bSMaciej Fijalkowski "Preparing device for XDP attach failed"); 1924efc2214bSMaciej Fijalkowski return ret; 1925efc2214bSMaciej Fijalkowski } 1926efc2214bSMaciej Fijalkowski } 1927efc2214bSMaciej Fijalkowski 1928efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) && prog) { 1929efc2214bSMaciej Fijalkowski vsi->num_xdp_txq = vsi->alloc_txq; 1930efc2214bSMaciej Fijalkowski xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 1931efc2214bSMaciej Fijalkowski if (xdp_ring_err) 1932efc2214bSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(extack, 1933efc2214bSMaciej Fijalkowski "Setting up XDP Tx resources failed"); 1934efc2214bSMaciej Fijalkowski } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 1935efc2214bSMaciej Fijalkowski xdp_ring_err = ice_destroy_xdp_rings(vsi); 1936efc2214bSMaciej Fijalkowski if (xdp_ring_err) 1937efc2214bSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(extack, 1938efc2214bSMaciej Fijalkowski "Freeing XDP Tx resources failed"); 1939efc2214bSMaciej Fijalkowski } else { 1940efc2214bSMaciej Fijalkowski ice_vsi_assign_bpf_prog(vsi, prog); 1941efc2214bSMaciej Fijalkowski } 1942efc2214bSMaciej Fijalkowski 1943efc2214bSMaciej Fijalkowski if (if_running) 1944efc2214bSMaciej Fijalkowski ret = ice_up(vsi); 1945efc2214bSMaciej Fijalkowski 19462d4238f5SKrzysztof Kazimierczak if (!ret && prog && vsi->xsk_umems) { 19472d4238f5SKrzysztof Kazimierczak int i; 19482d4238f5SKrzysztof Kazimierczak 19492d4238f5SKrzysztof Kazimierczak ice_for_each_rxq(vsi, i) { 19502d4238f5SKrzysztof Kazimierczak struct ice_ring *rx_ring = vsi->rx_rings[i]; 19512d4238f5SKrzysztof Kazimierczak 19522d4238f5SKrzysztof Kazimierczak if (rx_ring->xsk_umem) 19532d4238f5SKrzysztof Kazimierczak napi_schedule(&rx_ring->q_vector->napi); 19542d4238f5SKrzysztof Kazimierczak } 19552d4238f5SKrzysztof Kazimierczak } 19562d4238f5SKrzysztof Kazimierczak 1957efc2214bSMaciej Fijalkowski return (ret || xdp_ring_err) ? -ENOMEM : 0; 1958efc2214bSMaciej Fijalkowski } 1959efc2214bSMaciej Fijalkowski 1960efc2214bSMaciej Fijalkowski /** 1961efc2214bSMaciej Fijalkowski * ice_xdp - implements XDP handler 1962efc2214bSMaciej Fijalkowski * @dev: netdevice 1963efc2214bSMaciej Fijalkowski * @xdp: XDP command 1964efc2214bSMaciej Fijalkowski */ 1965efc2214bSMaciej Fijalkowski static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1966efc2214bSMaciej Fijalkowski { 1967efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 1968efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 1969efc2214bSMaciej Fijalkowski 1970efc2214bSMaciej Fijalkowski if (vsi->type != ICE_VSI_PF) { 1971efc2214bSMaciej Fijalkowski NL_SET_ERR_MSG_MOD(xdp->extack, 1972efc2214bSMaciej Fijalkowski "XDP can be loaded only on PF VSI"); 1973efc2214bSMaciej Fijalkowski return -EINVAL; 1974efc2214bSMaciej Fijalkowski } 1975efc2214bSMaciej Fijalkowski 1976efc2214bSMaciej Fijalkowski switch (xdp->command) { 1977efc2214bSMaciej Fijalkowski case XDP_SETUP_PROG: 1978efc2214bSMaciej Fijalkowski return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 1979efc2214bSMaciej Fijalkowski case XDP_QUERY_PROG: 1980efc2214bSMaciej Fijalkowski xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; 1981efc2214bSMaciej Fijalkowski return 0; 19822d4238f5SKrzysztof Kazimierczak case XDP_SETUP_XSK_UMEM: 19832d4238f5SKrzysztof Kazimierczak return ice_xsk_umem_setup(vsi, xdp->xsk.umem, 19842d4238f5SKrzysztof Kazimierczak xdp->xsk.queue_id); 1985efc2214bSMaciej Fijalkowski default: 1986efc2214bSMaciej Fijalkowski return -EINVAL; 1987efc2214bSMaciej Fijalkowski } 1988efc2214bSMaciej Fijalkowski } 1989efc2214bSMaciej Fijalkowski 1990efc2214bSMaciej Fijalkowski /** 1991940b61afSAnirudh Venkataramanan * ice_ena_misc_vector - enable the non-queue interrupts 1992940b61afSAnirudh Venkataramanan * @pf: board private structure 1993940b61afSAnirudh Venkataramanan */ 1994940b61afSAnirudh Venkataramanan static void ice_ena_misc_vector(struct ice_pf *pf) 1995940b61afSAnirudh Venkataramanan { 1996940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 1997940b61afSAnirudh Venkataramanan u32 val; 1998940b61afSAnirudh Venkataramanan 1999940b61afSAnirudh Venkataramanan /* clear things first */ 2000940b61afSAnirudh Venkataramanan wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2001940b61afSAnirudh Venkataramanan rd32(hw, PFINT_OICR); /* read to clear */ 2002940b61afSAnirudh Venkataramanan 20033bcd7fa3SBruce Allan val = (PFINT_OICR_ECC_ERR_M | 2004940b61afSAnirudh Venkataramanan PFINT_OICR_MAL_DETECT_M | 2005940b61afSAnirudh Venkataramanan PFINT_OICR_GRST_M | 2006940b61afSAnirudh Venkataramanan PFINT_OICR_PCI_EXCEPTION_M | 2007007676b4SAnirudh Venkataramanan PFINT_OICR_VFLR_M | 20083bcd7fa3SBruce Allan PFINT_OICR_HMC_ERR_M | 20093bcd7fa3SBruce Allan PFINT_OICR_PE_CRITERR_M); 2010940b61afSAnirudh Venkataramanan 2011940b61afSAnirudh Venkataramanan wr32(hw, PFINT_OICR_ENA, val); 2012940b61afSAnirudh Venkataramanan 2013940b61afSAnirudh Venkataramanan /* SW_ITR_IDX = 0, but don't change INTENA */ 2014cbe66bfeSBrett Creeley wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 2015940b61afSAnirudh Venkataramanan GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 2016940b61afSAnirudh Venkataramanan } 2017940b61afSAnirudh Venkataramanan 2018940b61afSAnirudh Venkataramanan /** 2019940b61afSAnirudh Venkataramanan * ice_misc_intr - misc interrupt handler 2020940b61afSAnirudh Venkataramanan * @irq: interrupt number 2021940b61afSAnirudh Venkataramanan * @data: pointer to a q_vector 2022940b61afSAnirudh Venkataramanan */ 2023940b61afSAnirudh Venkataramanan static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 2024940b61afSAnirudh Venkataramanan { 2025940b61afSAnirudh Venkataramanan struct ice_pf *pf = (struct ice_pf *)data; 2026940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 2027940b61afSAnirudh Venkataramanan irqreturn_t ret = IRQ_NONE; 20284015d11eSBrett Creeley struct device *dev; 2029940b61afSAnirudh Venkataramanan u32 oicr, ena_mask; 2030940b61afSAnirudh Venkataramanan 20314015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 2032940b61afSAnirudh Venkataramanan set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 203375d2b253SAnirudh Venkataramanan set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 2034940b61afSAnirudh Venkataramanan 2035940b61afSAnirudh Venkataramanan oicr = rd32(hw, PFINT_OICR); 2036940b61afSAnirudh Venkataramanan ena_mask = rd32(hw, PFINT_OICR_ENA); 2037940b61afSAnirudh Venkataramanan 20380e674aebSAnirudh Venkataramanan if (oicr & PFINT_OICR_SWINT_M) { 20390e674aebSAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_SWINT_M; 20400e674aebSAnirudh Venkataramanan pf->sw_int_count++; 20410e674aebSAnirudh Venkataramanan } 20420e674aebSAnirudh Venkataramanan 2043b3969fd7SSudheer Mogilappagari if (oicr & PFINT_OICR_MAL_DETECT_M) { 2044b3969fd7SSudheer Mogilappagari ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 2045b3969fd7SSudheer Mogilappagari set_bit(__ICE_MDD_EVENT_PENDING, pf->state); 2046b3969fd7SSudheer Mogilappagari } 2047007676b4SAnirudh Venkataramanan if (oicr & PFINT_OICR_VFLR_M) { 2048007676b4SAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_VFLR_M; 2049007676b4SAnirudh Venkataramanan set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); 2050007676b4SAnirudh Venkataramanan } 2051b3969fd7SSudheer Mogilappagari 20520b28b702SAnirudh Venkataramanan if (oicr & PFINT_OICR_GRST_M) { 20530b28b702SAnirudh Venkataramanan u32 reset; 2054b3969fd7SSudheer Mogilappagari 20550b28b702SAnirudh Venkataramanan /* we have a reset warning */ 20560b28b702SAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_GRST_M; 20570b28b702SAnirudh Venkataramanan reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 20580b28b702SAnirudh Venkataramanan GLGEN_RSTAT_RESET_TYPE_S; 20590b28b702SAnirudh Venkataramanan 20600b28b702SAnirudh Venkataramanan if (reset == ICE_RESET_CORER) 20610b28b702SAnirudh Venkataramanan pf->corer_count++; 20620b28b702SAnirudh Venkataramanan else if (reset == ICE_RESET_GLOBR) 20630b28b702SAnirudh Venkataramanan pf->globr_count++; 2064ca4929b6SBrett Creeley else if (reset == ICE_RESET_EMPR) 20650b28b702SAnirudh Venkataramanan pf->empr_count++; 2066ca4929b6SBrett Creeley else 20674015d11eSBrett Creeley dev_dbg(dev, "Invalid reset type %d\n", reset); 20680b28b702SAnirudh Venkataramanan 20690b28b702SAnirudh Venkataramanan /* If a reset cycle isn't already in progress, we set a bit in 20700b28b702SAnirudh Venkataramanan * pf->state so that the service task can start a reset/rebuild. 20710b28b702SAnirudh Venkataramanan * We also make note of which reset happened so that peer 20720b28b702SAnirudh Venkataramanan * devices/drivers can be informed. 20730b28b702SAnirudh Venkataramanan */ 20745df7e45dSDave Ertman if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { 20750b28b702SAnirudh Venkataramanan if (reset == ICE_RESET_CORER) 20760b28b702SAnirudh Venkataramanan set_bit(__ICE_CORER_RECV, pf->state); 20770b28b702SAnirudh Venkataramanan else if (reset == ICE_RESET_GLOBR) 20780b28b702SAnirudh Venkataramanan set_bit(__ICE_GLOBR_RECV, pf->state); 20790b28b702SAnirudh Venkataramanan else 20800b28b702SAnirudh Venkataramanan set_bit(__ICE_EMPR_RECV, pf->state); 20810b28b702SAnirudh Venkataramanan 2082fd2a9817SAnirudh Venkataramanan /* There are couple of different bits at play here. 2083fd2a9817SAnirudh Venkataramanan * hw->reset_ongoing indicates whether the hardware is 2084fd2a9817SAnirudh Venkataramanan * in reset. This is set to true when a reset interrupt 2085fd2a9817SAnirudh Venkataramanan * is received and set back to false after the driver 2086fd2a9817SAnirudh Venkataramanan * has determined that the hardware is out of reset. 2087fd2a9817SAnirudh Venkataramanan * 20885df7e45dSDave Ertman * __ICE_RESET_OICR_RECV in pf->state indicates 2089fd2a9817SAnirudh Venkataramanan * that a post reset rebuild is required before the 2090fd2a9817SAnirudh Venkataramanan * driver is operational again. This is set above. 2091fd2a9817SAnirudh Venkataramanan * 2092fd2a9817SAnirudh Venkataramanan * As this is the start of the reset/rebuild cycle, set 2093fd2a9817SAnirudh Venkataramanan * both to indicate that. 2094fd2a9817SAnirudh Venkataramanan */ 2095fd2a9817SAnirudh Venkataramanan hw->reset_ongoing = true; 20960b28b702SAnirudh Venkataramanan } 20970b28b702SAnirudh Venkataramanan } 20980b28b702SAnirudh Venkataramanan 2099940b61afSAnirudh Venkataramanan if (oicr & PFINT_OICR_HMC_ERR_M) { 2100940b61afSAnirudh Venkataramanan ena_mask &= ~PFINT_OICR_HMC_ERR_M; 21014015d11eSBrett Creeley dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", 2102940b61afSAnirudh Venkataramanan rd32(hw, PFHMC_ERRORINFO), 2103940b61afSAnirudh Venkataramanan rd32(hw, PFHMC_ERRORDATA)); 2104940b61afSAnirudh Venkataramanan } 2105940b61afSAnirudh Venkataramanan 21068d7189d2SMd Fahad Iqbal Polash /* Report any remaining unexpected interrupts */ 2107940b61afSAnirudh Venkataramanan oicr &= ena_mask; 2108940b61afSAnirudh Venkataramanan if (oicr) { 21094015d11eSBrett Creeley dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 2110940b61afSAnirudh Venkataramanan /* If a critical error is pending there is no choice but to 2111940b61afSAnirudh Venkataramanan * reset the device. 2112940b61afSAnirudh Venkataramanan */ 2113940b61afSAnirudh Venkataramanan if (oicr & (PFINT_OICR_PE_CRITERR_M | 2114940b61afSAnirudh Venkataramanan PFINT_OICR_PCI_EXCEPTION_M | 21150b28b702SAnirudh Venkataramanan PFINT_OICR_ECC_ERR_M)) { 2116940b61afSAnirudh Venkataramanan set_bit(__ICE_PFR_REQ, pf->state); 21170b28b702SAnirudh Venkataramanan ice_service_task_schedule(pf); 21180b28b702SAnirudh Venkataramanan } 2119940b61afSAnirudh Venkataramanan } 2120940b61afSAnirudh Venkataramanan ret = IRQ_HANDLED; 2121940b61afSAnirudh Venkataramanan 2122940b61afSAnirudh Venkataramanan if (!test_bit(__ICE_DOWN, pf->state)) { 2123940b61afSAnirudh Venkataramanan ice_service_task_schedule(pf); 2124cdedef59SAnirudh Venkataramanan ice_irq_dynamic_ena(hw, NULL, NULL); 2125940b61afSAnirudh Venkataramanan } 2126940b61afSAnirudh Venkataramanan 2127940b61afSAnirudh Venkataramanan return ret; 2128940b61afSAnirudh Venkataramanan } 2129940b61afSAnirudh Venkataramanan 2130940b61afSAnirudh Venkataramanan /** 21310e04e8e1SBrett Creeley * ice_dis_ctrlq_interrupts - disable control queue interrupts 21320e04e8e1SBrett Creeley * @hw: pointer to HW structure 21330e04e8e1SBrett Creeley */ 21340e04e8e1SBrett Creeley static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 21350e04e8e1SBrett Creeley { 21360e04e8e1SBrett Creeley /* disable Admin queue Interrupt causes */ 21370e04e8e1SBrett Creeley wr32(hw, PFINT_FW_CTL, 21380e04e8e1SBrett Creeley rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 21390e04e8e1SBrett Creeley 21400e04e8e1SBrett Creeley /* disable Mailbox queue Interrupt causes */ 21410e04e8e1SBrett Creeley wr32(hw, PFINT_MBX_CTL, 21420e04e8e1SBrett Creeley rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 21430e04e8e1SBrett Creeley 21440e04e8e1SBrett Creeley /* disable Control queue Interrupt causes */ 21450e04e8e1SBrett Creeley wr32(hw, PFINT_OICR_CTL, 21460e04e8e1SBrett Creeley rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 21470e04e8e1SBrett Creeley 21480e04e8e1SBrett Creeley ice_flush(hw); 21490e04e8e1SBrett Creeley } 21500e04e8e1SBrett Creeley 21510e04e8e1SBrett Creeley /** 2152940b61afSAnirudh Venkataramanan * ice_free_irq_msix_misc - Unroll misc vector setup 2153940b61afSAnirudh Venkataramanan * @pf: board private structure 2154940b61afSAnirudh Venkataramanan */ 2155940b61afSAnirudh Venkataramanan static void ice_free_irq_msix_misc(struct ice_pf *pf) 2156940b61afSAnirudh Venkataramanan { 21570e04e8e1SBrett Creeley struct ice_hw *hw = &pf->hw; 21580e04e8e1SBrett Creeley 21590e04e8e1SBrett Creeley ice_dis_ctrlq_interrupts(hw); 21600e04e8e1SBrett Creeley 2161940b61afSAnirudh Venkataramanan /* disable OICR interrupt */ 21620e04e8e1SBrett Creeley wr32(hw, PFINT_OICR_ENA, 0); 21630e04e8e1SBrett Creeley ice_flush(hw); 2164940b61afSAnirudh Venkataramanan 2165ba880734SBrett Creeley if (pf->msix_entries) { 2166cbe66bfeSBrett Creeley synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 21674015d11eSBrett Creeley devm_free_irq(ice_pf_to_dev(pf), 2168cbe66bfeSBrett Creeley pf->msix_entries[pf->oicr_idx].vector, pf); 2169940b61afSAnirudh Venkataramanan } 2170940b61afSAnirudh Venkataramanan 2171eb0208ecSPreethi Banala pf->num_avail_sw_msix += 1; 2172cbe66bfeSBrett Creeley ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 2173940b61afSAnirudh Venkataramanan } 2174940b61afSAnirudh Venkataramanan 2175940b61afSAnirudh Venkataramanan /** 21760e04e8e1SBrett Creeley * ice_ena_ctrlq_interrupts - enable control queue interrupts 21770e04e8e1SBrett Creeley * @hw: pointer to HW structure 2178b07833a0SBrett Creeley * @reg_idx: HW vector index to associate the control queue interrupts with 21790e04e8e1SBrett Creeley */ 2180b07833a0SBrett Creeley static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 21810e04e8e1SBrett Creeley { 21820e04e8e1SBrett Creeley u32 val; 21830e04e8e1SBrett Creeley 2184b07833a0SBrett Creeley val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 21850e04e8e1SBrett Creeley PFINT_OICR_CTL_CAUSE_ENA_M); 21860e04e8e1SBrett Creeley wr32(hw, PFINT_OICR_CTL, val); 21870e04e8e1SBrett Creeley 21880e04e8e1SBrett Creeley /* enable Admin queue Interrupt causes */ 2189b07833a0SBrett Creeley val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 21900e04e8e1SBrett Creeley PFINT_FW_CTL_CAUSE_ENA_M); 21910e04e8e1SBrett Creeley wr32(hw, PFINT_FW_CTL, val); 21920e04e8e1SBrett Creeley 21930e04e8e1SBrett Creeley /* enable Mailbox queue Interrupt causes */ 2194b07833a0SBrett Creeley val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 21950e04e8e1SBrett Creeley PFINT_MBX_CTL_CAUSE_ENA_M); 21960e04e8e1SBrett Creeley wr32(hw, PFINT_MBX_CTL, val); 21970e04e8e1SBrett Creeley 21980e04e8e1SBrett Creeley ice_flush(hw); 21990e04e8e1SBrett Creeley } 22000e04e8e1SBrett Creeley 22010e04e8e1SBrett Creeley /** 2202940b61afSAnirudh Venkataramanan * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 2203940b61afSAnirudh Venkataramanan * @pf: board private structure 2204940b61afSAnirudh Venkataramanan * 2205940b61afSAnirudh Venkataramanan * This sets up the handler for MSIX 0, which is used to manage the 2206940b61afSAnirudh Venkataramanan * non-queue interrupts, e.g. AdminQ and errors. This is not used 2207940b61afSAnirudh Venkataramanan * when in MSI or Legacy interrupt mode. 2208940b61afSAnirudh Venkataramanan */ 2209940b61afSAnirudh Venkataramanan static int ice_req_irq_msix_misc(struct ice_pf *pf) 2210940b61afSAnirudh Venkataramanan { 22114015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 2212940b61afSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 2213940b61afSAnirudh Venkataramanan int oicr_idx, err = 0; 2214940b61afSAnirudh Venkataramanan 2215940b61afSAnirudh Venkataramanan if (!pf->int_name[0]) 2216940b61afSAnirudh Venkataramanan snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 22174015d11eSBrett Creeley dev_driver_string(dev), dev_name(dev)); 2218940b61afSAnirudh Venkataramanan 22190b28b702SAnirudh Venkataramanan /* Do not request IRQ but do enable OICR interrupt since settings are 22200b28b702SAnirudh Venkataramanan * lost during reset. Note that this function is called only during 22210b28b702SAnirudh Venkataramanan * rebuild path and not while reset is in progress. 22220b28b702SAnirudh Venkataramanan */ 22235df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state)) 22240b28b702SAnirudh Venkataramanan goto skip_req_irq; 22250b28b702SAnirudh Venkataramanan 2226cbe66bfeSBrett Creeley /* reserve one vector in irq_tracker for misc interrupts */ 2227cbe66bfeSBrett Creeley oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 2228940b61afSAnirudh Venkataramanan if (oicr_idx < 0) 2229940b61afSAnirudh Venkataramanan return oicr_idx; 2230940b61afSAnirudh Venkataramanan 2231eb0208ecSPreethi Banala pf->num_avail_sw_msix -= 1; 2232cbe66bfeSBrett Creeley pf->oicr_idx = oicr_idx; 2233940b61afSAnirudh Venkataramanan 22344015d11eSBrett Creeley err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, 2235940b61afSAnirudh Venkataramanan ice_misc_intr, 0, pf->int_name, pf); 2236940b61afSAnirudh Venkataramanan if (err) { 22374015d11eSBrett Creeley dev_err(dev, "devm_request_irq for %s failed: %d\n", 2238940b61afSAnirudh Venkataramanan pf->int_name, err); 2239cbe66bfeSBrett Creeley ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 2240eb0208ecSPreethi Banala pf->num_avail_sw_msix += 1; 2241940b61afSAnirudh Venkataramanan return err; 2242940b61afSAnirudh Venkataramanan } 2243940b61afSAnirudh Venkataramanan 22440b28b702SAnirudh Venkataramanan skip_req_irq: 2245940b61afSAnirudh Venkataramanan ice_ena_misc_vector(pf); 2246940b61afSAnirudh Venkataramanan 2247cbe66bfeSBrett Creeley ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 2248cbe66bfeSBrett Creeley wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 224963f545edSBrett Creeley ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 2250940b61afSAnirudh Venkataramanan 2251940b61afSAnirudh Venkataramanan ice_flush(hw); 2252cdedef59SAnirudh Venkataramanan ice_irq_dynamic_ena(hw, NULL, NULL); 2253940b61afSAnirudh Venkataramanan 2254940b61afSAnirudh Venkataramanan return 0; 2255940b61afSAnirudh Venkataramanan } 2256940b61afSAnirudh Venkataramanan 2257940b61afSAnirudh Venkataramanan /** 2258df0f8479SAnirudh Venkataramanan * ice_napi_add - register NAPI handler for the VSI 2259df0f8479SAnirudh Venkataramanan * @vsi: VSI for which NAPI handler is to be registered 2260df0f8479SAnirudh Venkataramanan * 2261df0f8479SAnirudh Venkataramanan * This function is only called in the driver's load path. Registering the NAPI 2262df0f8479SAnirudh Venkataramanan * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 2263df0f8479SAnirudh Venkataramanan * reset/rebuild, etc.) 2264df0f8479SAnirudh Venkataramanan */ 2265df0f8479SAnirudh Venkataramanan static void ice_napi_add(struct ice_vsi *vsi) 2266df0f8479SAnirudh Venkataramanan { 2267df0f8479SAnirudh Venkataramanan int v_idx; 2268df0f8479SAnirudh Venkataramanan 2269df0f8479SAnirudh Venkataramanan if (!vsi->netdev) 2270df0f8479SAnirudh Venkataramanan return; 2271df0f8479SAnirudh Venkataramanan 22720c2561c8SBrett Creeley ice_for_each_q_vector(vsi, v_idx) 2273df0f8479SAnirudh Venkataramanan netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 2274df0f8479SAnirudh Venkataramanan ice_napi_poll, NAPI_POLL_WEIGHT); 2275df0f8479SAnirudh Venkataramanan } 2276df0f8479SAnirudh Venkataramanan 2277df0f8479SAnirudh Venkataramanan /** 2278462acf6aSTony Nguyen * ice_set_ops - set netdev and ethtools ops for the given netdev 2279462acf6aSTony Nguyen * @netdev: netdev instance 22803a858ba3SAnirudh Venkataramanan */ 2281462acf6aSTony Nguyen static void ice_set_ops(struct net_device *netdev) 22823a858ba3SAnirudh Venkataramanan { 2283462acf6aSTony Nguyen struct ice_pf *pf = ice_netdev_to_pf(netdev); 2284462acf6aSTony Nguyen 2285462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 2286462acf6aSTony Nguyen netdev->netdev_ops = &ice_netdev_safe_mode_ops; 2287462acf6aSTony Nguyen ice_set_ethtool_safe_mode_ops(netdev); 2288462acf6aSTony Nguyen return; 2289462acf6aSTony Nguyen } 2290462acf6aSTony Nguyen 2291462acf6aSTony Nguyen netdev->netdev_ops = &ice_netdev_ops; 2292462acf6aSTony Nguyen ice_set_ethtool_ops(netdev); 2293462acf6aSTony Nguyen } 2294462acf6aSTony Nguyen 2295462acf6aSTony Nguyen /** 2296462acf6aSTony Nguyen * ice_set_netdev_features - set features for the given netdev 2297462acf6aSTony Nguyen * @netdev: netdev instance 2298462acf6aSTony Nguyen */ 2299462acf6aSTony Nguyen static void ice_set_netdev_features(struct net_device *netdev) 2300462acf6aSTony Nguyen { 2301462acf6aSTony Nguyen struct ice_pf *pf = ice_netdev_to_pf(netdev); 2302d76a60baSAnirudh Venkataramanan netdev_features_t csumo_features; 2303d76a60baSAnirudh Venkataramanan netdev_features_t vlano_features; 2304d76a60baSAnirudh Venkataramanan netdev_features_t dflt_features; 2305d76a60baSAnirudh Venkataramanan netdev_features_t tso_features; 23063a858ba3SAnirudh Venkataramanan 2307462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 2308462acf6aSTony Nguyen /* safe mode */ 2309462acf6aSTony Nguyen netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 2310462acf6aSTony Nguyen netdev->hw_features = netdev->features; 2311462acf6aSTony Nguyen return; 2312462acf6aSTony Nguyen } 23133a858ba3SAnirudh Venkataramanan 2314d76a60baSAnirudh Venkataramanan dflt_features = NETIF_F_SG | 23153a858ba3SAnirudh Venkataramanan NETIF_F_HIGHDMA | 23163a858ba3SAnirudh Venkataramanan NETIF_F_RXHASH; 23173a858ba3SAnirudh Venkataramanan 2318d76a60baSAnirudh Venkataramanan csumo_features = NETIF_F_RXCSUM | 2319d76a60baSAnirudh Venkataramanan NETIF_F_IP_CSUM | 2320cf909e19SAnirudh Venkataramanan NETIF_F_SCTP_CRC | 2321d76a60baSAnirudh Venkataramanan NETIF_F_IPV6_CSUM; 2322d76a60baSAnirudh Venkataramanan 2323d76a60baSAnirudh Venkataramanan vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 2324d76a60baSAnirudh Venkataramanan NETIF_F_HW_VLAN_CTAG_TX | 2325d76a60baSAnirudh Venkataramanan NETIF_F_HW_VLAN_CTAG_RX; 2326d76a60baSAnirudh Venkataramanan 2327a54e3b8cSBrett Creeley tso_features = NETIF_F_TSO | 2328a54e3b8cSBrett Creeley NETIF_F_GSO_UDP_L4; 2329d76a60baSAnirudh Venkataramanan 2330d76a60baSAnirudh Venkataramanan /* set features that user can change */ 2331d76a60baSAnirudh Venkataramanan netdev->hw_features = dflt_features | csumo_features | 2332d76a60baSAnirudh Venkataramanan vlano_features | tso_features; 2333d76a60baSAnirudh Venkataramanan 23343a858ba3SAnirudh Venkataramanan /* enable features */ 23353a858ba3SAnirudh Venkataramanan netdev->features |= netdev->hw_features; 2336d76a60baSAnirudh Venkataramanan /* encap and VLAN devices inherit default, csumo and tso features */ 2337d76a60baSAnirudh Venkataramanan netdev->hw_enc_features |= dflt_features | csumo_features | 2338d76a60baSAnirudh Venkataramanan tso_features; 2339d76a60baSAnirudh Venkataramanan netdev->vlan_features |= dflt_features | csumo_features | 2340d76a60baSAnirudh Venkataramanan tso_features; 2341462acf6aSTony Nguyen } 2342462acf6aSTony Nguyen 2343462acf6aSTony Nguyen /** 2344462acf6aSTony Nguyen * ice_cfg_netdev - Allocate, configure and register a netdev 2345462acf6aSTony Nguyen * @vsi: the VSI associated with the new netdev 2346462acf6aSTony Nguyen * 2347462acf6aSTony Nguyen * Returns 0 on success, negative value on failure 2348462acf6aSTony Nguyen */ 2349462acf6aSTony Nguyen static int ice_cfg_netdev(struct ice_vsi *vsi) 2350462acf6aSTony Nguyen { 2351462acf6aSTony Nguyen struct ice_pf *pf = vsi->back; 2352462acf6aSTony Nguyen struct ice_netdev_priv *np; 2353462acf6aSTony Nguyen struct net_device *netdev; 2354462acf6aSTony Nguyen u8 mac_addr[ETH_ALEN]; 2355462acf6aSTony Nguyen int err; 2356462acf6aSTony Nguyen 2357462acf6aSTony Nguyen netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 2358462acf6aSTony Nguyen vsi->alloc_rxq); 2359462acf6aSTony Nguyen if (!netdev) 2360462acf6aSTony Nguyen return -ENOMEM; 2361462acf6aSTony Nguyen 2362462acf6aSTony Nguyen vsi->netdev = netdev; 2363462acf6aSTony Nguyen np = netdev_priv(netdev); 2364462acf6aSTony Nguyen np->vsi = vsi; 2365462acf6aSTony Nguyen 2366462acf6aSTony Nguyen ice_set_netdev_features(netdev); 2367462acf6aSTony Nguyen 2368462acf6aSTony Nguyen ice_set_ops(netdev); 23693a858ba3SAnirudh Venkataramanan 23703a858ba3SAnirudh Venkataramanan if (vsi->type == ICE_VSI_PF) { 23714015d11eSBrett Creeley SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf)); 23723a858ba3SAnirudh Venkataramanan ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 23733a858ba3SAnirudh Venkataramanan ether_addr_copy(netdev->dev_addr, mac_addr); 23743a858ba3SAnirudh Venkataramanan ether_addr_copy(netdev->perm_addr, mac_addr); 23753a858ba3SAnirudh Venkataramanan } 23763a858ba3SAnirudh Venkataramanan 23773a858ba3SAnirudh Venkataramanan netdev->priv_flags |= IFF_UNICAST_FLT; 23783a858ba3SAnirudh Venkataramanan 2379462acf6aSTony Nguyen /* Setup netdev TC information */ 2380462acf6aSTony Nguyen ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 2381cdedef59SAnirudh Venkataramanan 23823a858ba3SAnirudh Venkataramanan /* setup watchdog timeout value to be 5 second */ 23833a858ba3SAnirudh Venkataramanan netdev->watchdog_timeo = 5 * HZ; 23843a858ba3SAnirudh Venkataramanan 23853a858ba3SAnirudh Venkataramanan netdev->min_mtu = ETH_MIN_MTU; 23863a858ba3SAnirudh Venkataramanan netdev->max_mtu = ICE_MAX_MTU; 23873a858ba3SAnirudh Venkataramanan 2388df0f8479SAnirudh Venkataramanan err = register_netdev(vsi->netdev); 23893a858ba3SAnirudh Venkataramanan if (err) 23903a858ba3SAnirudh Venkataramanan return err; 23913a858ba3SAnirudh Venkataramanan 2392df0f8479SAnirudh Venkataramanan netif_carrier_off(vsi->netdev); 23933a858ba3SAnirudh Venkataramanan 2394df0f8479SAnirudh Venkataramanan /* make sure transmit queues start off as stopped */ 2395df0f8479SAnirudh Venkataramanan netif_tx_stop_all_queues(vsi->netdev); 23963a858ba3SAnirudh Venkataramanan 23973a858ba3SAnirudh Venkataramanan return 0; 23983a858ba3SAnirudh Venkataramanan } 23993a858ba3SAnirudh Venkataramanan 24003a858ba3SAnirudh Venkataramanan /** 2401d76a60baSAnirudh Venkataramanan * ice_fill_rss_lut - Fill the RSS lookup table with default values 2402d76a60baSAnirudh Venkataramanan * @lut: Lookup table 2403d76a60baSAnirudh Venkataramanan * @rss_table_size: Lookup table size 2404d76a60baSAnirudh Venkataramanan * @rss_size: Range of queue number for hashing 2405d76a60baSAnirudh Venkataramanan */ 2406d76a60baSAnirudh Venkataramanan void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 2407d76a60baSAnirudh Venkataramanan { 2408d76a60baSAnirudh Venkataramanan u16 i; 2409d76a60baSAnirudh Venkataramanan 2410d76a60baSAnirudh Venkataramanan for (i = 0; i < rss_table_size; i++) 2411d76a60baSAnirudh Venkataramanan lut[i] = i % rss_size; 2412d76a60baSAnirudh Venkataramanan } 2413d76a60baSAnirudh Venkataramanan 2414d76a60baSAnirudh Venkataramanan /** 24150f9d5027SAnirudh Venkataramanan * ice_pf_vsi_setup - Set up a PF VSI 24160f9d5027SAnirudh Venkataramanan * @pf: board private structure 24170f9d5027SAnirudh Venkataramanan * @pi: pointer to the port_info instance 24180f9d5027SAnirudh Venkataramanan * 24190e674aebSAnirudh Venkataramanan * Returns pointer to the successfully allocated VSI software struct 24200e674aebSAnirudh Venkataramanan * on success, otherwise returns NULL on failure. 24210f9d5027SAnirudh Venkataramanan */ 24220f9d5027SAnirudh Venkataramanan static struct ice_vsi * 24230f9d5027SAnirudh Venkataramanan ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 24240f9d5027SAnirudh Venkataramanan { 24250f9d5027SAnirudh Venkataramanan return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 24260f9d5027SAnirudh Venkataramanan } 24270f9d5027SAnirudh Venkataramanan 24280f9d5027SAnirudh Venkataramanan /** 24290e674aebSAnirudh Venkataramanan * ice_lb_vsi_setup - Set up a loopback VSI 24300e674aebSAnirudh Venkataramanan * @pf: board private structure 24310e674aebSAnirudh Venkataramanan * @pi: pointer to the port_info instance 24320e674aebSAnirudh Venkataramanan * 24330e674aebSAnirudh Venkataramanan * Returns pointer to the successfully allocated VSI software struct 24340e674aebSAnirudh Venkataramanan * on success, otherwise returns NULL on failure. 24350e674aebSAnirudh Venkataramanan */ 24360e674aebSAnirudh Venkataramanan struct ice_vsi * 24370e674aebSAnirudh Venkataramanan ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 24380e674aebSAnirudh Venkataramanan { 24390e674aebSAnirudh Venkataramanan return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); 24400e674aebSAnirudh Venkataramanan } 24410e674aebSAnirudh Venkataramanan 24420e674aebSAnirudh Venkataramanan /** 2443f9867df6SAnirudh Venkataramanan * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 2444d76a60baSAnirudh Venkataramanan * @netdev: network interface to be adjusted 2445d76a60baSAnirudh Venkataramanan * @proto: unused protocol 2446f9867df6SAnirudh Venkataramanan * @vid: VLAN ID to be added 2447d76a60baSAnirudh Venkataramanan * 2448f9867df6SAnirudh Venkataramanan * net_device_ops implementation for adding VLAN IDs 2449d76a60baSAnirudh Venkataramanan */ 2450c8b7abddSBruce Allan static int 2451c8b7abddSBruce Allan ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, 2452c8b7abddSBruce Allan u16 vid) 2453d76a60baSAnirudh Venkataramanan { 2454d76a60baSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 2455d76a60baSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 24565eda8afdSAkeem G Abodunrin int ret; 2457d76a60baSAnirudh Venkataramanan 2458d76a60baSAnirudh Venkataramanan if (vid >= VLAN_N_VID) { 2459d76a60baSAnirudh Venkataramanan netdev_err(netdev, "VLAN id requested %d is out of range %d\n", 2460d76a60baSAnirudh Venkataramanan vid, VLAN_N_VID); 2461d76a60baSAnirudh Venkataramanan return -EINVAL; 2462d76a60baSAnirudh Venkataramanan } 2463d76a60baSAnirudh Venkataramanan 2464d76a60baSAnirudh Venkataramanan if (vsi->info.pvid) 2465d76a60baSAnirudh Venkataramanan return -EINVAL; 2466d76a60baSAnirudh Venkataramanan 246742f3efefSBrett Creeley /* VLAN 0 is added by default during load/reset */ 246842f3efefSBrett Creeley if (!vid) 246942f3efefSBrett Creeley return 0; 247042f3efefSBrett Creeley 247142f3efefSBrett Creeley /* Enable VLAN pruning when a VLAN other than 0 is added */ 247242f3efefSBrett Creeley if (!ice_vsi_is_vlan_pruning_ena(vsi)) { 24735eda8afdSAkeem G Abodunrin ret = ice_cfg_vlan_pruning(vsi, true, false); 24744f74dcc1SBrett Creeley if (ret) 24754f74dcc1SBrett Creeley return ret; 24764f74dcc1SBrett Creeley } 24774f74dcc1SBrett Creeley 247842f3efefSBrett Creeley /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 247942f3efefSBrett Creeley * packets aren't pruned by the device's internal switch on Rx 2480d76a60baSAnirudh Venkataramanan */ 24815eda8afdSAkeem G Abodunrin ret = ice_vsi_add_vlan(vsi, vid); 24825eda8afdSAkeem G Abodunrin if (!ret) { 24835eda8afdSAkeem G Abodunrin vsi->vlan_ena = true; 24845eda8afdSAkeem G Abodunrin set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 24855eda8afdSAkeem G Abodunrin } 24865eda8afdSAkeem G Abodunrin 24875eda8afdSAkeem G Abodunrin return ret; 2488d76a60baSAnirudh Venkataramanan } 2489d76a60baSAnirudh Venkataramanan 2490d76a60baSAnirudh Venkataramanan /** 2491f9867df6SAnirudh Venkataramanan * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 2492d76a60baSAnirudh Venkataramanan * @netdev: network interface to be adjusted 2493d76a60baSAnirudh Venkataramanan * @proto: unused protocol 2494f9867df6SAnirudh Venkataramanan * @vid: VLAN ID to be removed 2495d76a60baSAnirudh Venkataramanan * 2496f9867df6SAnirudh Venkataramanan * net_device_ops implementation for removing VLAN IDs 2497d76a60baSAnirudh Venkataramanan */ 2498c8b7abddSBruce Allan static int 2499c8b7abddSBruce Allan ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, 2500c8b7abddSBruce Allan u16 vid) 2501d76a60baSAnirudh Venkataramanan { 2502d76a60baSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 2503d76a60baSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 25045eda8afdSAkeem G Abodunrin int ret; 2505d76a60baSAnirudh Venkataramanan 2506d76a60baSAnirudh Venkataramanan if (vsi->info.pvid) 2507d76a60baSAnirudh Venkataramanan return -EINVAL; 2508d76a60baSAnirudh Venkataramanan 250942f3efefSBrett Creeley /* don't allow removal of VLAN 0 */ 251042f3efefSBrett Creeley if (!vid) 251142f3efefSBrett Creeley return 0; 251242f3efefSBrett Creeley 25134f74dcc1SBrett Creeley /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 25144f74dcc1SBrett Creeley * information 2515d76a60baSAnirudh Venkataramanan */ 25165eda8afdSAkeem G Abodunrin ret = ice_vsi_kill_vlan(vsi, vid); 25175eda8afdSAkeem G Abodunrin if (ret) 25185eda8afdSAkeem G Abodunrin return ret; 2519d76a60baSAnirudh Venkataramanan 252042f3efefSBrett Creeley /* Disable pruning when VLAN 0 is the only VLAN rule */ 252142f3efefSBrett Creeley if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) 25225eda8afdSAkeem G Abodunrin ret = ice_cfg_vlan_pruning(vsi, false, false); 25234f74dcc1SBrett Creeley 25245eda8afdSAkeem G Abodunrin vsi->vlan_ena = false; 25255eda8afdSAkeem G Abodunrin set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 25265eda8afdSAkeem G Abodunrin return ret; 2527d76a60baSAnirudh Venkataramanan } 2528d76a60baSAnirudh Venkataramanan 2529d76a60baSAnirudh Venkataramanan /** 25303a858ba3SAnirudh Venkataramanan * ice_setup_pf_sw - Setup the HW switch on startup or after reset 25313a858ba3SAnirudh Venkataramanan * @pf: board private structure 25323a858ba3SAnirudh Venkataramanan * 25333a858ba3SAnirudh Venkataramanan * Returns 0 on success, negative value on failure 25343a858ba3SAnirudh Venkataramanan */ 25353a858ba3SAnirudh Venkataramanan static int ice_setup_pf_sw(struct ice_pf *pf) 25363a858ba3SAnirudh Venkataramanan { 25373a858ba3SAnirudh Venkataramanan struct ice_vsi *vsi; 25383a858ba3SAnirudh Venkataramanan int status = 0; 25393a858ba3SAnirudh Venkataramanan 25405df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state)) 25410f9d5027SAnirudh Venkataramanan return -EBUSY; 25420f9d5027SAnirudh Venkataramanan 25430f9d5027SAnirudh Venkataramanan vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 25443a858ba3SAnirudh Venkataramanan if (!vsi) { 25453a858ba3SAnirudh Venkataramanan status = -ENOMEM; 25460f9d5027SAnirudh Venkataramanan goto unroll_vsi_setup; 25470b28b702SAnirudh Venkataramanan } 25483a858ba3SAnirudh Venkataramanan 2549df0f8479SAnirudh Venkataramanan status = ice_cfg_netdev(vsi); 2550df0f8479SAnirudh Venkataramanan if (status) { 2551df0f8479SAnirudh Venkataramanan status = -ENODEV; 2552df0f8479SAnirudh Venkataramanan goto unroll_vsi_setup; 2553df0f8479SAnirudh Venkataramanan } 2554efc2214bSMaciej Fijalkowski /* netdev has to be configured before setting frame size */ 2555efc2214bSMaciej Fijalkowski ice_vsi_cfg_frame_size(vsi); 2556df0f8479SAnirudh Venkataramanan 2557b94b013eSDave Ertman /* Setup DCB netlink interface */ 2558b94b013eSDave Ertman ice_dcbnl_setup(vsi); 2559b94b013eSDave Ertman 2560df0f8479SAnirudh Venkataramanan /* registering the NAPI handler requires both the queues and 2561df0f8479SAnirudh Venkataramanan * netdev to be created, which are done in ice_pf_vsi_setup() 2562df0f8479SAnirudh Venkataramanan * and ice_cfg_netdev() respectively 2563df0f8479SAnirudh Venkataramanan */ 2564df0f8479SAnirudh Venkataramanan ice_napi_add(vsi); 2565df0f8479SAnirudh Venkataramanan 2566561f4379STony Nguyen status = ice_init_mac_fltr(pf); 25679daf8208SAnirudh Venkataramanan if (status) 2568df0f8479SAnirudh Venkataramanan goto unroll_napi_add; 25699daf8208SAnirudh Venkataramanan 25709daf8208SAnirudh Venkataramanan return status; 25719daf8208SAnirudh Venkataramanan 2572df0f8479SAnirudh Venkataramanan unroll_napi_add: 25733a858ba3SAnirudh Venkataramanan if (vsi) { 2574df0f8479SAnirudh Venkataramanan ice_napi_del(vsi); 25753a858ba3SAnirudh Venkataramanan if (vsi->netdev) { 2576df0f8479SAnirudh Venkataramanan if (vsi->netdev->reg_state == NETREG_REGISTERED) 2577df0f8479SAnirudh Venkataramanan unregister_netdev(vsi->netdev); 25783a858ba3SAnirudh Venkataramanan free_netdev(vsi->netdev); 25793a858ba3SAnirudh Venkataramanan vsi->netdev = NULL; 25803a858ba3SAnirudh Venkataramanan } 2581df0f8479SAnirudh Venkataramanan } 25829daf8208SAnirudh Venkataramanan 2583df0f8479SAnirudh Venkataramanan unroll_vsi_setup: 2584df0f8479SAnirudh Venkataramanan if (vsi) { 2585df0f8479SAnirudh Venkataramanan ice_vsi_free_q_vectors(vsi); 25863a858ba3SAnirudh Venkataramanan ice_vsi_delete(vsi); 25873a858ba3SAnirudh Venkataramanan ice_vsi_put_qs(vsi); 25883a858ba3SAnirudh Venkataramanan ice_vsi_clear(vsi); 25893a858ba3SAnirudh Venkataramanan } 25903a858ba3SAnirudh Venkataramanan return status; 25913a858ba3SAnirudh Venkataramanan } 25923a858ba3SAnirudh Venkataramanan 25933a858ba3SAnirudh Venkataramanan /** 25948c243700SAnirudh Venkataramanan * ice_get_avail_q_count - Get count of queues in use 25958c243700SAnirudh Venkataramanan * @pf_qmap: bitmap to get queue use count from 25968c243700SAnirudh Venkataramanan * @lock: pointer to a mutex that protects access to pf_qmap 25978c243700SAnirudh Venkataramanan * @size: size of the bitmap 2598940b61afSAnirudh Venkataramanan */ 25998c243700SAnirudh Venkataramanan static u16 26008c243700SAnirudh Venkataramanan ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 2601940b61afSAnirudh Venkataramanan { 26028c243700SAnirudh Venkataramanan u16 count = 0, bit; 2603940b61afSAnirudh Venkataramanan 26048c243700SAnirudh Venkataramanan mutex_lock(lock); 26058c243700SAnirudh Venkataramanan for_each_clear_bit(bit, pf_qmap, size) 26068c243700SAnirudh Venkataramanan count++; 26078c243700SAnirudh Venkataramanan mutex_unlock(lock); 2608940b61afSAnirudh Venkataramanan 26098c243700SAnirudh Venkataramanan return count; 26108c243700SAnirudh Venkataramanan } 2611d76a60baSAnirudh Venkataramanan 26128c243700SAnirudh Venkataramanan /** 26138c243700SAnirudh Venkataramanan * ice_get_avail_txq_count - Get count of Tx queues in use 26148c243700SAnirudh Venkataramanan * @pf: pointer to an ice_pf instance 26158c243700SAnirudh Venkataramanan */ 26168c243700SAnirudh Venkataramanan u16 ice_get_avail_txq_count(struct ice_pf *pf) 26178c243700SAnirudh Venkataramanan { 26188c243700SAnirudh Venkataramanan return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 26198c243700SAnirudh Venkataramanan pf->max_pf_txqs); 26208c243700SAnirudh Venkataramanan } 2621940b61afSAnirudh Venkataramanan 26228c243700SAnirudh Venkataramanan /** 26238c243700SAnirudh Venkataramanan * ice_get_avail_rxq_count - Get count of Rx queues in use 26248c243700SAnirudh Venkataramanan * @pf: pointer to an ice_pf instance 26258c243700SAnirudh Venkataramanan */ 26268c243700SAnirudh Venkataramanan u16 ice_get_avail_rxq_count(struct ice_pf *pf) 26278c243700SAnirudh Venkataramanan { 26288c243700SAnirudh Venkataramanan return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 26298c243700SAnirudh Venkataramanan pf->max_pf_rxqs); 2630940b61afSAnirudh Venkataramanan } 2631940b61afSAnirudh Venkataramanan 2632940b61afSAnirudh Venkataramanan /** 2633940b61afSAnirudh Venkataramanan * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 2634940b61afSAnirudh Venkataramanan * @pf: board private structure to initialize 2635940b61afSAnirudh Venkataramanan */ 2636940b61afSAnirudh Venkataramanan static void ice_deinit_pf(struct ice_pf *pf) 2637940b61afSAnirudh Venkataramanan { 26388d81fa55SAkeem G Abodunrin ice_service_task_stop(pf); 2639940b61afSAnirudh Venkataramanan mutex_destroy(&pf->sw_mutex); 2640b94b013eSDave Ertman mutex_destroy(&pf->tc_mutex); 2641940b61afSAnirudh Venkataramanan mutex_destroy(&pf->avail_q_mutex); 264278b5713aSAnirudh Venkataramanan 264378b5713aSAnirudh Venkataramanan if (pf->avail_txqs) { 264478b5713aSAnirudh Venkataramanan bitmap_free(pf->avail_txqs); 264578b5713aSAnirudh Venkataramanan pf->avail_txqs = NULL; 264678b5713aSAnirudh Venkataramanan } 264778b5713aSAnirudh Venkataramanan 264878b5713aSAnirudh Venkataramanan if (pf->avail_rxqs) { 264978b5713aSAnirudh Venkataramanan bitmap_free(pf->avail_rxqs); 265078b5713aSAnirudh Venkataramanan pf->avail_rxqs = NULL; 265178b5713aSAnirudh Venkataramanan } 2652940b61afSAnirudh Venkataramanan } 2653940b61afSAnirudh Venkataramanan 2654940b61afSAnirudh Venkataramanan /** 2655462acf6aSTony Nguyen * ice_set_pf_caps - set PFs capability flags 2656462acf6aSTony Nguyen * @pf: pointer to the PF instance 2657462acf6aSTony Nguyen */ 2658462acf6aSTony Nguyen static void ice_set_pf_caps(struct ice_pf *pf) 2659462acf6aSTony Nguyen { 2660462acf6aSTony Nguyen struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 2661462acf6aSTony Nguyen 2662462acf6aSTony Nguyen clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 2663462acf6aSTony Nguyen if (func_caps->common_cap.dcb) 2664462acf6aSTony Nguyen set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 2665462acf6aSTony Nguyen clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 2666462acf6aSTony Nguyen if (func_caps->common_cap.sr_iov_1_1) { 2667462acf6aSTony Nguyen set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 2668462acf6aSTony Nguyen pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, 2669462acf6aSTony Nguyen ICE_MAX_VF_COUNT); 2670462acf6aSTony Nguyen } 2671462acf6aSTony Nguyen clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 2672462acf6aSTony Nguyen if (func_caps->common_cap.rss_table_size) 2673462acf6aSTony Nguyen set_bit(ICE_FLAG_RSS_ENA, pf->flags); 2674462acf6aSTony Nguyen 2675462acf6aSTony Nguyen pf->max_pf_txqs = func_caps->common_cap.num_txq; 2676462acf6aSTony Nguyen pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 2677462acf6aSTony Nguyen } 2678462acf6aSTony Nguyen 2679462acf6aSTony Nguyen /** 2680940b61afSAnirudh Venkataramanan * ice_init_pf - Initialize general software structures (struct ice_pf) 2681940b61afSAnirudh Venkataramanan * @pf: board private structure to initialize 2682940b61afSAnirudh Venkataramanan */ 268378b5713aSAnirudh Venkataramanan static int ice_init_pf(struct ice_pf *pf) 2684940b61afSAnirudh Venkataramanan { 2685462acf6aSTony Nguyen ice_set_pf_caps(pf); 2686940b61afSAnirudh Venkataramanan 2687940b61afSAnirudh Venkataramanan mutex_init(&pf->sw_mutex); 2688b94b013eSDave Ertman mutex_init(&pf->tc_mutex); 2689d76a60baSAnirudh Venkataramanan 2690940b61afSAnirudh Venkataramanan /* setup service timer and periodic service task */ 2691940b61afSAnirudh Venkataramanan timer_setup(&pf->serv_tmr, ice_service_timer, 0); 2692940b61afSAnirudh Venkataramanan pf->serv_tmr_period = HZ; 2693940b61afSAnirudh Venkataramanan INIT_WORK(&pf->serv_task, ice_service_task); 2694940b61afSAnirudh Venkataramanan clear_bit(__ICE_SERVICE_SCHED, pf->state); 269578b5713aSAnirudh Venkataramanan 2696462acf6aSTony Nguyen mutex_init(&pf->avail_q_mutex); 269778b5713aSAnirudh Venkataramanan pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 269878b5713aSAnirudh Venkataramanan if (!pf->avail_txqs) 269978b5713aSAnirudh Venkataramanan return -ENOMEM; 270078b5713aSAnirudh Venkataramanan 270178b5713aSAnirudh Venkataramanan pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 270278b5713aSAnirudh Venkataramanan if (!pf->avail_rxqs) { 27034015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); 270478b5713aSAnirudh Venkataramanan pf->avail_txqs = NULL; 270578b5713aSAnirudh Venkataramanan return -ENOMEM; 270678b5713aSAnirudh Venkataramanan } 270778b5713aSAnirudh Venkataramanan 270878b5713aSAnirudh Venkataramanan return 0; 2709940b61afSAnirudh Venkataramanan } 2710940b61afSAnirudh Venkataramanan 2711940b61afSAnirudh Venkataramanan /** 2712940b61afSAnirudh Venkataramanan * ice_ena_msix_range - Request a range of MSIX vectors from the OS 2713940b61afSAnirudh Venkataramanan * @pf: board private structure 2714940b61afSAnirudh Venkataramanan * 2715940b61afSAnirudh Venkataramanan * compute the number of MSIX vectors required (v_budget) and request from 2716940b61afSAnirudh Venkataramanan * the OS. Return the number of vectors reserved or negative on failure 2717940b61afSAnirudh Venkataramanan */ 2718940b61afSAnirudh Venkataramanan static int ice_ena_msix_range(struct ice_pf *pf) 2719940b61afSAnirudh Venkataramanan { 27204015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 2721940b61afSAnirudh Venkataramanan int v_left, v_actual, v_budget = 0; 2722940b61afSAnirudh Venkataramanan int needed, err, i; 2723940b61afSAnirudh Venkataramanan 2724940b61afSAnirudh Venkataramanan v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 2725940b61afSAnirudh Venkataramanan 2726940b61afSAnirudh Venkataramanan /* reserve one vector for miscellaneous handler */ 2727940b61afSAnirudh Venkataramanan needed = 1; 2728152b978aSAnirudh Venkataramanan if (v_left < needed) 2729152b978aSAnirudh Venkataramanan goto no_hw_vecs_left_err; 2730940b61afSAnirudh Venkataramanan v_budget += needed; 2731940b61afSAnirudh Venkataramanan v_left -= needed; 2732940b61afSAnirudh Venkataramanan 2733940b61afSAnirudh Venkataramanan /* reserve vectors for LAN traffic */ 2734152b978aSAnirudh Venkataramanan needed = min_t(int, num_online_cpus(), v_left); 2735152b978aSAnirudh Venkataramanan if (v_left < needed) 2736152b978aSAnirudh Venkataramanan goto no_hw_vecs_left_err; 2737152b978aSAnirudh Venkataramanan pf->num_lan_msix = needed; 2738152b978aSAnirudh Venkataramanan v_budget += needed; 2739152b978aSAnirudh Venkataramanan v_left -= needed; 2740940b61afSAnirudh Venkataramanan 27414015d11eSBrett Creeley pf->msix_entries = devm_kcalloc(dev, v_budget, 2742c6dfd690SBruce Allan sizeof(*pf->msix_entries), GFP_KERNEL); 2743940b61afSAnirudh Venkataramanan 2744940b61afSAnirudh Venkataramanan if (!pf->msix_entries) { 2745940b61afSAnirudh Venkataramanan err = -ENOMEM; 2746940b61afSAnirudh Venkataramanan goto exit_err; 2747940b61afSAnirudh Venkataramanan } 2748940b61afSAnirudh Venkataramanan 2749940b61afSAnirudh Venkataramanan for (i = 0; i < v_budget; i++) 2750940b61afSAnirudh Venkataramanan pf->msix_entries[i].entry = i; 2751940b61afSAnirudh Venkataramanan 2752940b61afSAnirudh Venkataramanan /* actually reserve the vectors */ 2753940b61afSAnirudh Venkataramanan v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 2754940b61afSAnirudh Venkataramanan ICE_MIN_MSIX, v_budget); 2755940b61afSAnirudh Venkataramanan 2756940b61afSAnirudh Venkataramanan if (v_actual < 0) { 27574015d11eSBrett Creeley dev_err(dev, "unable to reserve MSI-X vectors\n"); 2758940b61afSAnirudh Venkataramanan err = v_actual; 2759940b61afSAnirudh Venkataramanan goto msix_err; 2760940b61afSAnirudh Venkataramanan } 2761940b61afSAnirudh Venkataramanan 2762940b61afSAnirudh Venkataramanan if (v_actual < v_budget) { 276319cce2c6SAnirudh Venkataramanan dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 2764940b61afSAnirudh Venkataramanan v_budget, v_actual); 2765152b978aSAnirudh Venkataramanan /* 2 vectors for LAN (traffic + OICR) */ 2766152b978aSAnirudh Venkataramanan #define ICE_MIN_LAN_VECS 2 2767152b978aSAnirudh Venkataramanan 2768152b978aSAnirudh Venkataramanan if (v_actual < ICE_MIN_LAN_VECS) { 2769152b978aSAnirudh Venkataramanan /* error if we can't get minimum vectors */ 2770940b61afSAnirudh Venkataramanan pci_disable_msix(pf->pdev); 2771940b61afSAnirudh Venkataramanan err = -ERANGE; 2772940b61afSAnirudh Venkataramanan goto msix_err; 2773152b978aSAnirudh Venkataramanan } else { 2774152b978aSAnirudh Venkataramanan pf->num_lan_msix = ICE_MIN_LAN_VECS; 2775940b61afSAnirudh Venkataramanan } 2776940b61afSAnirudh Venkataramanan } 2777940b61afSAnirudh Venkataramanan 2778940b61afSAnirudh Venkataramanan return v_actual; 2779940b61afSAnirudh Venkataramanan 2780940b61afSAnirudh Venkataramanan msix_err: 27814015d11eSBrett Creeley devm_kfree(dev, pf->msix_entries); 2782940b61afSAnirudh Venkataramanan goto exit_err; 2783940b61afSAnirudh Venkataramanan 2784152b978aSAnirudh Venkataramanan no_hw_vecs_left_err: 278519cce2c6SAnirudh Venkataramanan dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", 2786152b978aSAnirudh Venkataramanan needed, v_left); 2787152b978aSAnirudh Venkataramanan err = -ERANGE; 2788940b61afSAnirudh Venkataramanan exit_err: 2789940b61afSAnirudh Venkataramanan pf->num_lan_msix = 0; 2790940b61afSAnirudh Venkataramanan return err; 2791940b61afSAnirudh Venkataramanan } 2792940b61afSAnirudh Venkataramanan 2793940b61afSAnirudh Venkataramanan /** 2794940b61afSAnirudh Venkataramanan * ice_dis_msix - Disable MSI-X interrupt setup in OS 2795940b61afSAnirudh Venkataramanan * @pf: board private structure 2796940b61afSAnirudh Venkataramanan */ 2797940b61afSAnirudh Venkataramanan static void ice_dis_msix(struct ice_pf *pf) 2798940b61afSAnirudh Venkataramanan { 2799940b61afSAnirudh Venkataramanan pci_disable_msix(pf->pdev); 28004015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 2801940b61afSAnirudh Venkataramanan pf->msix_entries = NULL; 2802940b61afSAnirudh Venkataramanan } 2803940b61afSAnirudh Venkataramanan 2804940b61afSAnirudh Venkataramanan /** 2805eb0208ecSPreethi Banala * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 2806eb0208ecSPreethi Banala * @pf: board private structure 2807eb0208ecSPreethi Banala */ 2808eb0208ecSPreethi Banala static void ice_clear_interrupt_scheme(struct ice_pf *pf) 2809eb0208ecSPreethi Banala { 2810eb0208ecSPreethi Banala ice_dis_msix(pf); 2811eb0208ecSPreethi Banala 2812cbe66bfeSBrett Creeley if (pf->irq_tracker) { 28134015d11eSBrett Creeley devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 2814cbe66bfeSBrett Creeley pf->irq_tracker = NULL; 2815eb0208ecSPreethi Banala } 2816eb0208ecSPreethi Banala } 2817eb0208ecSPreethi Banala 2818eb0208ecSPreethi Banala /** 2819940b61afSAnirudh Venkataramanan * ice_init_interrupt_scheme - Determine proper interrupt scheme 2820940b61afSAnirudh Venkataramanan * @pf: board private structure to initialize 2821940b61afSAnirudh Venkataramanan */ 2822940b61afSAnirudh Venkataramanan static int ice_init_interrupt_scheme(struct ice_pf *pf) 2823940b61afSAnirudh Venkataramanan { 2824cbe66bfeSBrett Creeley int vectors; 2825940b61afSAnirudh Venkataramanan 2826940b61afSAnirudh Venkataramanan vectors = ice_ena_msix_range(pf); 2827940b61afSAnirudh Venkataramanan 2828940b61afSAnirudh Venkataramanan if (vectors < 0) 2829940b61afSAnirudh Venkataramanan return vectors; 2830940b61afSAnirudh Venkataramanan 2831940b61afSAnirudh Venkataramanan /* set up vector assignment tracking */ 2832cbe66bfeSBrett Creeley pf->irq_tracker = 28334015d11eSBrett Creeley devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) + 2834c6dfd690SBruce Allan (sizeof(u16) * vectors), GFP_KERNEL); 2835cbe66bfeSBrett Creeley if (!pf->irq_tracker) { 2836940b61afSAnirudh Venkataramanan ice_dis_msix(pf); 2837940b61afSAnirudh Venkataramanan return -ENOMEM; 2838940b61afSAnirudh Venkataramanan } 2839940b61afSAnirudh Venkataramanan 2840eb0208ecSPreethi Banala /* populate SW interrupts pool with number of OS granted IRQs. */ 2841eb0208ecSPreethi Banala pf->num_avail_sw_msix = vectors; 2842cbe66bfeSBrett Creeley pf->irq_tracker->num_entries = vectors; 2843cbe66bfeSBrett Creeley pf->irq_tracker->end = pf->irq_tracker->num_entries; 2844940b61afSAnirudh Venkataramanan 2845940b61afSAnirudh Venkataramanan return 0; 2846940b61afSAnirudh Venkataramanan } 2847940b61afSAnirudh Venkataramanan 2848940b61afSAnirudh Venkataramanan /** 284987324e74SHenry Tieman * ice_vsi_recfg_qs - Change the number of queues on a VSI 285087324e74SHenry Tieman * @vsi: VSI being changed 285187324e74SHenry Tieman * @new_rx: new number of Rx queues 285287324e74SHenry Tieman * @new_tx: new number of Tx queues 285387324e74SHenry Tieman * 285487324e74SHenry Tieman * Only change the number of queues if new_tx, or new_rx is non-0. 285587324e74SHenry Tieman * 285687324e74SHenry Tieman * Returns 0 on success. 285787324e74SHenry Tieman */ 285887324e74SHenry Tieman int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 285987324e74SHenry Tieman { 286087324e74SHenry Tieman struct ice_pf *pf = vsi->back; 286187324e74SHenry Tieman int err = 0, timeout = 50; 286287324e74SHenry Tieman 286387324e74SHenry Tieman if (!new_rx && !new_tx) 286487324e74SHenry Tieman return -EINVAL; 286587324e74SHenry Tieman 286687324e74SHenry Tieman while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { 286787324e74SHenry Tieman timeout--; 286887324e74SHenry Tieman if (!timeout) 286987324e74SHenry Tieman return -EBUSY; 287087324e74SHenry Tieman usleep_range(1000, 2000); 287187324e74SHenry Tieman } 287287324e74SHenry Tieman 287387324e74SHenry Tieman if (new_tx) 287487324e74SHenry Tieman vsi->req_txq = new_tx; 287587324e74SHenry Tieman if (new_rx) 287687324e74SHenry Tieman vsi->req_rxq = new_rx; 287787324e74SHenry Tieman 287887324e74SHenry Tieman /* set for the next time the netdev is started */ 287987324e74SHenry Tieman if (!netif_running(vsi->netdev)) { 288087324e74SHenry Tieman ice_vsi_rebuild(vsi, false); 288187324e74SHenry Tieman dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 288287324e74SHenry Tieman goto done; 288387324e74SHenry Tieman } 288487324e74SHenry Tieman 288587324e74SHenry Tieman ice_vsi_close(vsi); 288687324e74SHenry Tieman ice_vsi_rebuild(vsi, false); 288787324e74SHenry Tieman ice_pf_dcb_recfg(pf); 288887324e74SHenry Tieman ice_vsi_open(vsi); 288987324e74SHenry Tieman done: 289087324e74SHenry Tieman clear_bit(__ICE_CFG_BUSY, pf->state); 289187324e74SHenry Tieman return err; 289287324e74SHenry Tieman } 289387324e74SHenry Tieman 289487324e74SHenry Tieman /** 2895462acf6aSTony Nguyen * ice_log_pkg_init - log result of DDP package load 2896462acf6aSTony Nguyen * @hw: pointer to hardware info 2897462acf6aSTony Nguyen * @status: status of package load 2898462acf6aSTony Nguyen */ 2899462acf6aSTony Nguyen static void 2900462acf6aSTony Nguyen ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) 2901462acf6aSTony Nguyen { 2902462acf6aSTony Nguyen struct ice_pf *pf = (struct ice_pf *)hw->back; 29034015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 2904462acf6aSTony Nguyen 2905462acf6aSTony Nguyen switch (*status) { 2906462acf6aSTony Nguyen case ICE_SUCCESS: 2907462acf6aSTony Nguyen /* The package download AdminQ command returned success because 2908462acf6aSTony Nguyen * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 2909462acf6aSTony Nguyen * already a package loaded on the device. 2910462acf6aSTony Nguyen */ 2911462acf6aSTony Nguyen if (hw->pkg_ver.major == hw->active_pkg_ver.major && 2912462acf6aSTony Nguyen hw->pkg_ver.minor == hw->active_pkg_ver.minor && 2913462acf6aSTony Nguyen hw->pkg_ver.update == hw->active_pkg_ver.update && 2914462acf6aSTony Nguyen hw->pkg_ver.draft == hw->active_pkg_ver.draft && 2915462acf6aSTony Nguyen !memcmp(hw->pkg_name, hw->active_pkg_name, 2916462acf6aSTony Nguyen sizeof(hw->pkg_name))) { 2917462acf6aSTony Nguyen if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) 291819cce2c6SAnirudh Venkataramanan dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 2919462acf6aSTony Nguyen hw->active_pkg_name, 2920462acf6aSTony Nguyen hw->active_pkg_ver.major, 2921462acf6aSTony Nguyen hw->active_pkg_ver.minor, 2922462acf6aSTony Nguyen hw->active_pkg_ver.update, 2923462acf6aSTony Nguyen hw->active_pkg_ver.draft); 2924462acf6aSTony Nguyen else 292519cce2c6SAnirudh Venkataramanan dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 2926462acf6aSTony Nguyen hw->active_pkg_name, 2927462acf6aSTony Nguyen hw->active_pkg_ver.major, 2928462acf6aSTony Nguyen hw->active_pkg_ver.minor, 2929462acf6aSTony Nguyen hw->active_pkg_ver.update, 2930462acf6aSTony Nguyen hw->active_pkg_ver.draft); 2931462acf6aSTony Nguyen } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || 2932462acf6aSTony Nguyen hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { 293319cce2c6SAnirudh Venkataramanan dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 2934462acf6aSTony Nguyen hw->active_pkg_name, 2935462acf6aSTony Nguyen hw->active_pkg_ver.major, 2936462acf6aSTony Nguyen hw->active_pkg_ver.minor, 2937462acf6aSTony Nguyen ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 2938462acf6aSTony Nguyen *status = ICE_ERR_NOT_SUPPORTED; 2939462acf6aSTony Nguyen } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 2940462acf6aSTony Nguyen hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { 294119cce2c6SAnirudh Venkataramanan dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 2942462acf6aSTony Nguyen hw->active_pkg_name, 2943462acf6aSTony Nguyen hw->active_pkg_ver.major, 2944462acf6aSTony Nguyen hw->active_pkg_ver.minor, 2945462acf6aSTony Nguyen hw->active_pkg_ver.update, 2946462acf6aSTony Nguyen hw->active_pkg_ver.draft, 2947462acf6aSTony Nguyen hw->pkg_name, 2948462acf6aSTony Nguyen hw->pkg_ver.major, 2949462acf6aSTony Nguyen hw->pkg_ver.minor, 2950462acf6aSTony Nguyen hw->pkg_ver.update, 2951462acf6aSTony Nguyen hw->pkg_ver.draft); 2952462acf6aSTony Nguyen } else { 295319cce2c6SAnirudh Venkataramanan dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); 2954462acf6aSTony Nguyen *status = ICE_ERR_NOT_SUPPORTED; 2955462acf6aSTony Nguyen } 2956462acf6aSTony Nguyen break; 2957462acf6aSTony Nguyen case ICE_ERR_BUF_TOO_SHORT: 2958462acf6aSTony Nguyen /* fall-through */ 2959462acf6aSTony Nguyen case ICE_ERR_CFG: 296019cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 2961462acf6aSTony Nguyen break; 2962462acf6aSTony Nguyen case ICE_ERR_NOT_SUPPORTED: 2963462acf6aSTony Nguyen /* Package File version not supported */ 2964462acf6aSTony Nguyen if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || 2965462acf6aSTony Nguyen (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 2966462acf6aSTony Nguyen hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) 296719cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 2968462acf6aSTony Nguyen else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || 2969462acf6aSTony Nguyen (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 2970462acf6aSTony Nguyen hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) 297119cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 2972462acf6aSTony Nguyen ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 2973462acf6aSTony Nguyen break; 2974462acf6aSTony Nguyen case ICE_ERR_AQ_ERROR: 2975e000248eSBruce Allan switch (hw->pkg_dwnld_status) { 2976462acf6aSTony Nguyen case ICE_AQ_RC_ENOSEC: 2977462acf6aSTony Nguyen case ICE_AQ_RC_EBADSIG: 297819cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 2979462acf6aSTony Nguyen return; 2980462acf6aSTony Nguyen case ICE_AQ_RC_ESVN: 298119cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 2982462acf6aSTony Nguyen return; 2983462acf6aSTony Nguyen case ICE_AQ_RC_EBADMAN: 2984462acf6aSTony Nguyen case ICE_AQ_RC_EBADBUF: 298519cce2c6SAnirudh Venkataramanan dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 2986462acf6aSTony Nguyen return; 2987462acf6aSTony Nguyen default: 2988462acf6aSTony Nguyen break; 2989462acf6aSTony Nguyen } 2990462acf6aSTony Nguyen /* fall-through */ 2991462acf6aSTony Nguyen default: 299219cce2c6SAnirudh Venkataramanan dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", 2993462acf6aSTony Nguyen *status); 2994462acf6aSTony Nguyen break; 2995462acf6aSTony Nguyen } 2996462acf6aSTony Nguyen } 2997462acf6aSTony Nguyen 2998462acf6aSTony Nguyen /** 2999462acf6aSTony Nguyen * ice_load_pkg - load/reload the DDP Package file 3000462acf6aSTony Nguyen * @firmware: firmware structure when firmware requested or NULL for reload 3001462acf6aSTony Nguyen * @pf: pointer to the PF instance 3002462acf6aSTony Nguyen * 3003462acf6aSTony Nguyen * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 3004462acf6aSTony Nguyen * initialize HW tables. 3005462acf6aSTony Nguyen */ 3006462acf6aSTony Nguyen static void 3007462acf6aSTony Nguyen ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 3008462acf6aSTony Nguyen { 3009462acf6aSTony Nguyen enum ice_status status = ICE_ERR_PARAM; 30104015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 3011462acf6aSTony Nguyen struct ice_hw *hw = &pf->hw; 3012462acf6aSTony Nguyen 3013462acf6aSTony Nguyen /* Load DDP Package */ 3014462acf6aSTony Nguyen if (firmware && !hw->pkg_copy) { 3015462acf6aSTony Nguyen status = ice_copy_and_init_pkg(hw, firmware->data, 3016462acf6aSTony Nguyen firmware->size); 3017462acf6aSTony Nguyen ice_log_pkg_init(hw, &status); 3018462acf6aSTony Nguyen } else if (!firmware && hw->pkg_copy) { 3019462acf6aSTony Nguyen /* Reload package during rebuild after CORER/GLOBR reset */ 3020462acf6aSTony Nguyen status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 3021462acf6aSTony Nguyen ice_log_pkg_init(hw, &status); 3022462acf6aSTony Nguyen } else { 302319cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 3024462acf6aSTony Nguyen } 3025462acf6aSTony Nguyen 3026462acf6aSTony Nguyen if (status) { 3027462acf6aSTony Nguyen /* Safe Mode */ 3028462acf6aSTony Nguyen clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 3029462acf6aSTony Nguyen return; 3030462acf6aSTony Nguyen } 3031462acf6aSTony Nguyen 3032462acf6aSTony Nguyen /* Successful download package is the precondition for advanced 3033462acf6aSTony Nguyen * features, hence setting the ICE_FLAG_ADV_FEATURES flag 3034462acf6aSTony Nguyen */ 3035462acf6aSTony Nguyen set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 3036462acf6aSTony Nguyen } 3037462acf6aSTony Nguyen 3038462acf6aSTony Nguyen /** 3039c585ea42SBrett Creeley * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 3040c585ea42SBrett Creeley * @pf: pointer to the PF structure 3041c585ea42SBrett Creeley * 3042c585ea42SBrett Creeley * There is no error returned here because the driver should be able to handle 3043c585ea42SBrett Creeley * 128 Byte cache lines, so we only print a warning in case issues are seen, 3044c585ea42SBrett Creeley * specifically with Tx. 3045c585ea42SBrett Creeley */ 3046c585ea42SBrett Creeley static void ice_verify_cacheline_size(struct ice_pf *pf) 3047c585ea42SBrett Creeley { 3048c585ea42SBrett Creeley if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 304919cce2c6SAnirudh Venkataramanan dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 3050c585ea42SBrett Creeley ICE_CACHE_LINE_BYTES); 3051c585ea42SBrett Creeley } 3052c585ea42SBrett Creeley 3053c585ea42SBrett Creeley /** 3054e3710a01SPaul M Stillwell Jr * ice_send_version - update firmware with driver version 3055e3710a01SPaul M Stillwell Jr * @pf: PF struct 3056e3710a01SPaul M Stillwell Jr * 3057e3710a01SPaul M Stillwell Jr * Returns ICE_SUCCESS on success, else error code 3058e3710a01SPaul M Stillwell Jr */ 3059e3710a01SPaul M Stillwell Jr static enum ice_status ice_send_version(struct ice_pf *pf) 3060e3710a01SPaul M Stillwell Jr { 3061e3710a01SPaul M Stillwell Jr struct ice_driver_ver dv; 3062e3710a01SPaul M Stillwell Jr 3063e3710a01SPaul M Stillwell Jr dv.major_ver = DRV_VERSION_MAJOR; 3064e3710a01SPaul M Stillwell Jr dv.minor_ver = DRV_VERSION_MINOR; 3065e3710a01SPaul M Stillwell Jr dv.build_ver = DRV_VERSION_BUILD; 3066e3710a01SPaul M Stillwell Jr dv.subbuild_ver = 0; 3067e3710a01SPaul M Stillwell Jr strscpy((char *)dv.driver_string, DRV_VERSION, 3068e3710a01SPaul M Stillwell Jr sizeof(dv.driver_string)); 3069e3710a01SPaul M Stillwell Jr return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 3070e3710a01SPaul M Stillwell Jr } 3071e3710a01SPaul M Stillwell Jr 3072e3710a01SPaul M Stillwell Jr /** 3073462acf6aSTony Nguyen * ice_get_opt_fw_name - return optional firmware file name or NULL 3074462acf6aSTony Nguyen * @pf: pointer to the PF instance 3075462acf6aSTony Nguyen */ 3076462acf6aSTony Nguyen static char *ice_get_opt_fw_name(struct ice_pf *pf) 3077462acf6aSTony Nguyen { 3078462acf6aSTony Nguyen /* Optional firmware name same as default with additional dash 3079462acf6aSTony Nguyen * followed by a EUI-64 identifier (PCIe Device Serial Number) 3080462acf6aSTony Nguyen */ 3081462acf6aSTony Nguyen struct pci_dev *pdev = pf->pdev; 3082462acf6aSTony Nguyen char *opt_fw_filename = NULL; 3083462acf6aSTony Nguyen u32 dword; 3084462acf6aSTony Nguyen u8 dsn[8]; 3085462acf6aSTony Nguyen int pos; 3086462acf6aSTony Nguyen 3087462acf6aSTony Nguyen /* Determine the name of the optional file using the DSN (two 3088462acf6aSTony Nguyen * dwords following the start of the DSN Capability). 3089462acf6aSTony Nguyen */ 3090462acf6aSTony Nguyen pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 3091462acf6aSTony Nguyen if (pos) { 3092462acf6aSTony Nguyen opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 3093462acf6aSTony Nguyen if (!opt_fw_filename) 3094462acf6aSTony Nguyen return NULL; 3095462acf6aSTony Nguyen 3096462acf6aSTony Nguyen pci_read_config_dword(pdev, pos + 4, &dword); 3097462acf6aSTony Nguyen put_unaligned_le32(dword, &dsn[0]); 3098462acf6aSTony Nguyen pci_read_config_dword(pdev, pos + 8, &dword); 3099462acf6aSTony Nguyen put_unaligned_le32(dword, &dsn[4]); 3100462acf6aSTony Nguyen snprintf(opt_fw_filename, NAME_MAX, 3101462acf6aSTony Nguyen "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg", 3102462acf6aSTony Nguyen ICE_DDP_PKG_PATH, 3103462acf6aSTony Nguyen dsn[7], dsn[6], dsn[5], dsn[4], 3104462acf6aSTony Nguyen dsn[3], dsn[2], dsn[1], dsn[0]); 3105462acf6aSTony Nguyen } 3106462acf6aSTony Nguyen 3107462acf6aSTony Nguyen return opt_fw_filename; 3108462acf6aSTony Nguyen } 3109462acf6aSTony Nguyen 3110462acf6aSTony Nguyen /** 3111462acf6aSTony Nguyen * ice_request_fw - Device initialization routine 3112462acf6aSTony Nguyen * @pf: pointer to the PF instance 3113462acf6aSTony Nguyen */ 3114462acf6aSTony Nguyen static void ice_request_fw(struct ice_pf *pf) 3115462acf6aSTony Nguyen { 3116462acf6aSTony Nguyen char *opt_fw_filename = ice_get_opt_fw_name(pf); 3117462acf6aSTony Nguyen const struct firmware *firmware = NULL; 31184015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 3119462acf6aSTony Nguyen int err = 0; 3120462acf6aSTony Nguyen 3121462acf6aSTony Nguyen /* optional device-specific DDP (if present) overrides the default DDP 3122462acf6aSTony Nguyen * package file. kernel logs a debug message if the file doesn't exist, 3123462acf6aSTony Nguyen * and warning messages for other errors. 3124462acf6aSTony Nguyen */ 3125462acf6aSTony Nguyen if (opt_fw_filename) { 3126462acf6aSTony Nguyen err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 3127462acf6aSTony Nguyen if (err) { 3128462acf6aSTony Nguyen kfree(opt_fw_filename); 3129462acf6aSTony Nguyen goto dflt_pkg_load; 3130462acf6aSTony Nguyen } 3131462acf6aSTony Nguyen 3132462acf6aSTony Nguyen /* request for firmware was successful. Download to device */ 3133462acf6aSTony Nguyen ice_load_pkg(firmware, pf); 3134462acf6aSTony Nguyen kfree(opt_fw_filename); 3135462acf6aSTony Nguyen release_firmware(firmware); 3136462acf6aSTony Nguyen return; 3137462acf6aSTony Nguyen } 3138462acf6aSTony Nguyen 3139462acf6aSTony Nguyen dflt_pkg_load: 3140462acf6aSTony Nguyen err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 3141462acf6aSTony Nguyen if (err) { 314219cce2c6SAnirudh Venkataramanan dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 3143462acf6aSTony Nguyen return; 3144462acf6aSTony Nguyen } 3145462acf6aSTony Nguyen 3146462acf6aSTony Nguyen /* request for firmware was successful. Download to device */ 3147462acf6aSTony Nguyen ice_load_pkg(firmware, pf); 3148462acf6aSTony Nguyen release_firmware(firmware); 3149462acf6aSTony Nguyen } 3150462acf6aSTony Nguyen 3151462acf6aSTony Nguyen /** 3152837f08fdSAnirudh Venkataramanan * ice_probe - Device initialization routine 3153837f08fdSAnirudh Venkataramanan * @pdev: PCI device information struct 3154837f08fdSAnirudh Venkataramanan * @ent: entry in ice_pci_tbl 3155837f08fdSAnirudh Venkataramanan * 3156837f08fdSAnirudh Venkataramanan * Returns 0 on success, negative on failure 3157837f08fdSAnirudh Venkataramanan */ 3158c8b7abddSBruce Allan static int 3159c8b7abddSBruce Allan ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 3160837f08fdSAnirudh Venkataramanan { 316177ed84f4SBruce Allan struct device *dev = &pdev->dev; 3162837f08fdSAnirudh Venkataramanan struct ice_pf *pf; 3163837f08fdSAnirudh Venkataramanan struct ice_hw *hw; 3164837f08fdSAnirudh Venkataramanan int err; 3165837f08fdSAnirudh Venkataramanan 31664ee656bbSTony Nguyen /* this driver uses devres, see 31674ee656bbSTony Nguyen * Documentation/driver-api/driver-model/devres.rst 31684ee656bbSTony Nguyen */ 3169837f08fdSAnirudh Venkataramanan err = pcim_enable_device(pdev); 3170837f08fdSAnirudh Venkataramanan if (err) 3171837f08fdSAnirudh Venkataramanan return err; 3172837f08fdSAnirudh Venkataramanan 3173837f08fdSAnirudh Venkataramanan err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 3174837f08fdSAnirudh Venkataramanan if (err) { 317577ed84f4SBruce Allan dev_err(dev, "BAR0 I/O map error %d\n", err); 3176837f08fdSAnirudh Venkataramanan return err; 3177837f08fdSAnirudh Venkataramanan } 3178837f08fdSAnirudh Venkataramanan 317977ed84f4SBruce Allan pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL); 3180837f08fdSAnirudh Venkataramanan if (!pf) 3181837f08fdSAnirudh Venkataramanan return -ENOMEM; 3182837f08fdSAnirudh Venkataramanan 31832f2da36eSAnirudh Venkataramanan /* set up for high or low DMA */ 318477ed84f4SBruce Allan err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 3185837f08fdSAnirudh Venkataramanan if (err) 318677ed84f4SBruce Allan err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 3187837f08fdSAnirudh Venkataramanan if (err) { 318877ed84f4SBruce Allan dev_err(dev, "DMA configuration failed: 0x%x\n", err); 3189837f08fdSAnirudh Venkataramanan return err; 3190837f08fdSAnirudh Venkataramanan } 3191837f08fdSAnirudh Venkataramanan 3192837f08fdSAnirudh Venkataramanan pci_enable_pcie_error_reporting(pdev); 3193837f08fdSAnirudh Venkataramanan pci_set_master(pdev); 3194837f08fdSAnirudh Venkataramanan 3195837f08fdSAnirudh Venkataramanan pf->pdev = pdev; 3196837f08fdSAnirudh Venkataramanan pci_set_drvdata(pdev, pf); 3197837f08fdSAnirudh Venkataramanan set_bit(__ICE_DOWN, pf->state); 31988d81fa55SAkeem G Abodunrin /* Disable service task until DOWN bit is cleared */ 31998d81fa55SAkeem G Abodunrin set_bit(__ICE_SERVICE_DIS, pf->state); 3200837f08fdSAnirudh Venkataramanan 3201837f08fdSAnirudh Venkataramanan hw = &pf->hw; 3202837f08fdSAnirudh Venkataramanan hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 32034e56802eSMichal Swiatkowski pci_save_state(pdev); 32044e56802eSMichal Swiatkowski 3205837f08fdSAnirudh Venkataramanan hw->back = pf; 3206837f08fdSAnirudh Venkataramanan hw->vendor_id = pdev->vendor; 3207837f08fdSAnirudh Venkataramanan hw->device_id = pdev->device; 3208837f08fdSAnirudh Venkataramanan pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3209837f08fdSAnirudh Venkataramanan hw->subsystem_vendor_id = pdev->subsystem_vendor; 3210837f08fdSAnirudh Venkataramanan hw->subsystem_device_id = pdev->subsystem_device; 3211837f08fdSAnirudh Venkataramanan hw->bus.device = PCI_SLOT(pdev->devfn); 3212837f08fdSAnirudh Venkataramanan hw->bus.func = PCI_FUNC(pdev->devfn); 3213f31e4b6fSAnirudh Venkataramanan ice_set_ctrlq_len(hw); 3214f31e4b6fSAnirudh Venkataramanan 3215837f08fdSAnirudh Venkataramanan pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 3216837f08fdSAnirudh Venkataramanan 32177ec59eeaSAnirudh Venkataramanan #ifndef CONFIG_DYNAMIC_DEBUG 32187ec59eeaSAnirudh Venkataramanan if (debug < -1) 32197ec59eeaSAnirudh Venkataramanan hw->debug_mask = debug; 32207ec59eeaSAnirudh Venkataramanan #endif 32217ec59eeaSAnirudh Venkataramanan 3222f31e4b6fSAnirudh Venkataramanan err = ice_init_hw(hw); 3223f31e4b6fSAnirudh Venkataramanan if (err) { 322477ed84f4SBruce Allan dev_err(dev, "ice_init_hw failed: %d\n", err); 3225f31e4b6fSAnirudh Venkataramanan err = -EIO; 3226f31e4b6fSAnirudh Venkataramanan goto err_exit_unroll; 3227f31e4b6fSAnirudh Venkataramanan } 3228f31e4b6fSAnirudh Venkataramanan 3229462acf6aSTony Nguyen ice_request_fw(pf); 3230462acf6aSTony Nguyen 3231462acf6aSTony Nguyen /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 3232462acf6aSTony Nguyen * set in pf->state, which will cause ice_is_safe_mode to return 3233462acf6aSTony Nguyen * true 3234462acf6aSTony Nguyen */ 3235462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) { 323619cce2c6SAnirudh Venkataramanan dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); 3237462acf6aSTony Nguyen /* we already got function/device capabilities but these don't 3238462acf6aSTony Nguyen * reflect what the driver needs to do in safe mode. Instead of 3239462acf6aSTony Nguyen * adding conditional logic everywhere to ignore these 3240462acf6aSTony Nguyen * device/function capabilities, override them. 3241462acf6aSTony Nguyen */ 3242462acf6aSTony Nguyen ice_set_safe_mode_caps(hw); 3243462acf6aSTony Nguyen } 3244462acf6aSTony Nguyen 324578b5713aSAnirudh Venkataramanan err = ice_init_pf(pf); 324678b5713aSAnirudh Venkataramanan if (err) { 324778b5713aSAnirudh Venkataramanan dev_err(dev, "ice_init_pf failed: %d\n", err); 324878b5713aSAnirudh Venkataramanan goto err_init_pf_unroll; 324978b5713aSAnirudh Venkataramanan } 3250940b61afSAnirudh Venkataramanan 3251995c90f2SAnirudh Venkataramanan pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 3252940b61afSAnirudh Venkataramanan if (!pf->num_alloc_vsi) { 3253940b61afSAnirudh Venkataramanan err = -EIO; 3254940b61afSAnirudh Venkataramanan goto err_init_pf_unroll; 3255940b61afSAnirudh Venkataramanan } 3256940b61afSAnirudh Venkataramanan 325777ed84f4SBruce Allan pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 325877ed84f4SBruce Allan GFP_KERNEL); 3259940b61afSAnirudh Venkataramanan if (!pf->vsi) { 3260940b61afSAnirudh Venkataramanan err = -ENOMEM; 3261940b61afSAnirudh Venkataramanan goto err_init_pf_unroll; 3262940b61afSAnirudh Venkataramanan } 3263940b61afSAnirudh Venkataramanan 3264940b61afSAnirudh Venkataramanan err = ice_init_interrupt_scheme(pf); 3265940b61afSAnirudh Venkataramanan if (err) { 326677ed84f4SBruce Allan dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 3267940b61afSAnirudh Venkataramanan err = -EIO; 3268940b61afSAnirudh Venkataramanan goto err_init_interrupt_unroll; 3269940b61afSAnirudh Venkataramanan } 3270940b61afSAnirudh Venkataramanan 32718d81fa55SAkeem G Abodunrin /* Driver is mostly up */ 32728d81fa55SAkeem G Abodunrin clear_bit(__ICE_DOWN, pf->state); 32738d81fa55SAkeem G Abodunrin 3274940b61afSAnirudh Venkataramanan /* In case of MSIX we are going to setup the misc vector right here 3275940b61afSAnirudh Venkataramanan * to handle admin queue events etc. In case of legacy and MSI 3276940b61afSAnirudh Venkataramanan * the misc functionality and queue processing is combined in 3277940b61afSAnirudh Venkataramanan * the same vector and that gets setup at open. 3278940b61afSAnirudh Venkataramanan */ 3279940b61afSAnirudh Venkataramanan err = ice_req_irq_msix_misc(pf); 3280940b61afSAnirudh Venkataramanan if (err) { 328177ed84f4SBruce Allan dev_err(dev, "setup of misc vector failed: %d\n", err); 3282940b61afSAnirudh Venkataramanan goto err_init_interrupt_unroll; 3283940b61afSAnirudh Venkataramanan } 3284940b61afSAnirudh Venkataramanan 3285940b61afSAnirudh Venkataramanan /* create switch struct for the switch element created by FW on boot */ 328677ed84f4SBruce Allan pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 3287940b61afSAnirudh Venkataramanan if (!pf->first_sw) { 3288940b61afSAnirudh Venkataramanan err = -ENOMEM; 3289940b61afSAnirudh Venkataramanan goto err_msix_misc_unroll; 3290940b61afSAnirudh Venkataramanan } 3291940b61afSAnirudh Venkataramanan 3292b1edc14aSMd Fahad Iqbal Polash if (hw->evb_veb) 3293940b61afSAnirudh Venkataramanan pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 3294b1edc14aSMd Fahad Iqbal Polash else 3295b1edc14aSMd Fahad Iqbal Polash pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 3296b1edc14aSMd Fahad Iqbal Polash 3297940b61afSAnirudh Venkataramanan pf->first_sw->pf = pf; 3298940b61afSAnirudh Venkataramanan 3299940b61afSAnirudh Venkataramanan /* record the sw_id available for later use */ 3300940b61afSAnirudh Venkataramanan pf->first_sw->sw_id = hw->port_info->sw_id; 3301940b61afSAnirudh Venkataramanan 33023a858ba3SAnirudh Venkataramanan err = ice_setup_pf_sw(pf); 33033a858ba3SAnirudh Venkataramanan if (err) { 33042f2da36eSAnirudh Venkataramanan dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 33053a858ba3SAnirudh Venkataramanan goto err_alloc_sw_unroll; 33063a858ba3SAnirudh Venkataramanan } 33079daf8208SAnirudh Venkataramanan 33088d81fa55SAkeem G Abodunrin clear_bit(__ICE_SERVICE_DIS, pf->state); 33099daf8208SAnirudh Venkataramanan 3310e3710a01SPaul M Stillwell Jr /* tell the firmware we are up */ 3311e3710a01SPaul M Stillwell Jr err = ice_send_version(pf); 3312e3710a01SPaul M Stillwell Jr if (err) { 331319cce2c6SAnirudh Venkataramanan dev_err(dev, "probe failed sending driver version %s. error: %d\n", 3314e3710a01SPaul M Stillwell Jr ice_drv_ver, err); 3315e3710a01SPaul M Stillwell Jr goto err_alloc_sw_unroll; 3316e3710a01SPaul M Stillwell Jr } 3317e3710a01SPaul M Stillwell Jr 33189daf8208SAnirudh Venkataramanan /* since everything is good, start the service timer */ 33199daf8208SAnirudh Venkataramanan mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 33209daf8208SAnirudh Venkataramanan 3321250c3b3eSBrett Creeley err = ice_init_link_events(pf->hw.port_info); 3322250c3b3eSBrett Creeley if (err) { 3323250c3b3eSBrett Creeley dev_err(dev, "ice_init_link_events failed: %d\n", err); 3324250c3b3eSBrett Creeley goto err_alloc_sw_unroll; 3325250c3b3eSBrett Creeley } 3326250c3b3eSBrett Creeley 3327c585ea42SBrett Creeley ice_verify_cacheline_size(pf); 3328c585ea42SBrett Creeley 3329462acf6aSTony Nguyen /* If no DDP driven features have to be setup, return here */ 3330462acf6aSTony Nguyen if (ice_is_safe_mode(pf)) 3331462acf6aSTony Nguyen return 0; 3332462acf6aSTony Nguyen 3333462acf6aSTony Nguyen /* initialize DDP driven features */ 3334462acf6aSTony Nguyen 3335462acf6aSTony Nguyen /* Note: DCB init failure is non-fatal to load */ 3336462acf6aSTony Nguyen if (ice_init_pf_dcb(pf, false)) { 3337462acf6aSTony Nguyen clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3338462acf6aSTony Nguyen clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 3339462acf6aSTony Nguyen } else { 3340462acf6aSTony Nguyen ice_cfg_lldp_mib_change(&pf->hw, true); 3341462acf6aSTony Nguyen } 3342462acf6aSTony Nguyen 3343e18ff118SPaul Greenwalt /* print PCI link speed and width */ 3344e18ff118SPaul Greenwalt pcie_print_link_status(pf->pdev); 3345e18ff118SPaul Greenwalt 3346837f08fdSAnirudh Venkataramanan return 0; 3347f31e4b6fSAnirudh Venkataramanan 33483a858ba3SAnirudh Venkataramanan err_alloc_sw_unroll: 33498d81fa55SAkeem G Abodunrin set_bit(__ICE_SERVICE_DIS, pf->state); 33503a858ba3SAnirudh Venkataramanan set_bit(__ICE_DOWN, pf->state); 33514015d11eSBrett Creeley devm_kfree(dev, pf->first_sw); 3352940b61afSAnirudh Venkataramanan err_msix_misc_unroll: 3353940b61afSAnirudh Venkataramanan ice_free_irq_msix_misc(pf); 3354940b61afSAnirudh Venkataramanan err_init_interrupt_unroll: 3355940b61afSAnirudh Venkataramanan ice_clear_interrupt_scheme(pf); 335677ed84f4SBruce Allan devm_kfree(dev, pf->vsi); 3357940b61afSAnirudh Venkataramanan err_init_pf_unroll: 3358940b61afSAnirudh Venkataramanan ice_deinit_pf(pf); 3359940b61afSAnirudh Venkataramanan ice_deinit_hw(hw); 3360f31e4b6fSAnirudh Venkataramanan err_exit_unroll: 3361f31e4b6fSAnirudh Venkataramanan pci_disable_pcie_error_reporting(pdev); 3362f31e4b6fSAnirudh Venkataramanan return err; 3363837f08fdSAnirudh Venkataramanan } 3364837f08fdSAnirudh Venkataramanan 3365837f08fdSAnirudh Venkataramanan /** 3366837f08fdSAnirudh Venkataramanan * ice_remove - Device removal routine 3367837f08fdSAnirudh Venkataramanan * @pdev: PCI device information struct 3368837f08fdSAnirudh Venkataramanan */ 3369837f08fdSAnirudh Venkataramanan static void ice_remove(struct pci_dev *pdev) 3370837f08fdSAnirudh Venkataramanan { 3371837f08fdSAnirudh Venkataramanan struct ice_pf *pf = pci_get_drvdata(pdev); 337281b23589SDave Ertman int i; 3373837f08fdSAnirudh Venkataramanan 3374837f08fdSAnirudh Venkataramanan if (!pf) 3375837f08fdSAnirudh Venkataramanan return; 3376837f08fdSAnirudh Venkataramanan 3377afd9d4abSAnirudh Venkataramanan for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 3378afd9d4abSAnirudh Venkataramanan if (!ice_is_reset_in_progress(pf->state)) 3379afd9d4abSAnirudh Venkataramanan break; 3380afd9d4abSAnirudh Venkataramanan msleep(100); 3381afd9d4abSAnirudh Venkataramanan } 3382afd9d4abSAnirudh Venkataramanan 3383837f08fdSAnirudh Venkataramanan set_bit(__ICE_DOWN, pf->state); 33848d81fa55SAkeem G Abodunrin ice_service_task_stop(pf); 3385f31e4b6fSAnirudh Venkataramanan 3386ddf30f7fSAnirudh Venkataramanan if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 3387ddf30f7fSAnirudh Venkataramanan ice_free_vfs(pf); 33880f9d5027SAnirudh Venkataramanan ice_vsi_release_all(pf); 3389940b61afSAnirudh Venkataramanan ice_free_irq_msix_misc(pf); 339081b23589SDave Ertman ice_for_each_vsi(pf, i) { 339181b23589SDave Ertman if (!pf->vsi[i]) 339281b23589SDave Ertman continue; 339381b23589SDave Ertman ice_vsi_free_q_vectors(pf->vsi[i]); 339481b23589SDave Ertman } 3395940b61afSAnirudh Venkataramanan ice_deinit_pf(pf); 3396f31e4b6fSAnirudh Venkataramanan ice_deinit_hw(&pf->hw); 339718057cb3SBruce Allan /* Issue a PFR as part of the prescribed driver unload flow. Do not 339818057cb3SBruce Allan * do it via ice_schedule_reset() since there is no need to rebuild 339918057cb3SBruce Allan * and the service task is already stopped. 340018057cb3SBruce Allan */ 340118057cb3SBruce Allan ice_reset(&pf->hw, ICE_RESET_PFR); 3402c6012ac1SBruce Allan pci_wait_for_pending_transaction(pdev); 3403c6012ac1SBruce Allan ice_clear_interrupt_scheme(pf); 3404837f08fdSAnirudh Venkataramanan pci_disable_pcie_error_reporting(pdev); 3405837f08fdSAnirudh Venkataramanan } 3406837f08fdSAnirudh Venkataramanan 34075995b6d0SBrett Creeley /** 34085995b6d0SBrett Creeley * ice_pci_err_detected - warning that PCI error has been detected 34095995b6d0SBrett Creeley * @pdev: PCI device information struct 34105995b6d0SBrett Creeley * @err: the type of PCI error 34115995b6d0SBrett Creeley * 34125995b6d0SBrett Creeley * Called to warn that something happened on the PCI bus and the error handling 34135995b6d0SBrett Creeley * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 34145995b6d0SBrett Creeley */ 34155995b6d0SBrett Creeley static pci_ers_result_t 34165995b6d0SBrett Creeley ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err) 34175995b6d0SBrett Creeley { 34185995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 34195995b6d0SBrett Creeley 34205995b6d0SBrett Creeley if (!pf) { 34215995b6d0SBrett Creeley dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 34225995b6d0SBrett Creeley __func__, err); 34235995b6d0SBrett Creeley return PCI_ERS_RESULT_DISCONNECT; 34245995b6d0SBrett Creeley } 34255995b6d0SBrett Creeley 34265995b6d0SBrett Creeley if (!test_bit(__ICE_SUSPENDED, pf->state)) { 34275995b6d0SBrett Creeley ice_service_task_stop(pf); 34285995b6d0SBrett Creeley 34295995b6d0SBrett Creeley if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { 34305995b6d0SBrett Creeley set_bit(__ICE_PFR_REQ, pf->state); 34315995b6d0SBrett Creeley ice_prepare_for_reset(pf); 34325995b6d0SBrett Creeley } 34335995b6d0SBrett Creeley } 34345995b6d0SBrett Creeley 34355995b6d0SBrett Creeley return PCI_ERS_RESULT_NEED_RESET; 34365995b6d0SBrett Creeley } 34375995b6d0SBrett Creeley 34385995b6d0SBrett Creeley /** 34395995b6d0SBrett Creeley * ice_pci_err_slot_reset - a PCI slot reset has just happened 34405995b6d0SBrett Creeley * @pdev: PCI device information struct 34415995b6d0SBrett Creeley * 34425995b6d0SBrett Creeley * Called to determine if the driver can recover from the PCI slot reset by 34435995b6d0SBrett Creeley * using a register read to determine if the device is recoverable. 34445995b6d0SBrett Creeley */ 34455995b6d0SBrett Creeley static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 34465995b6d0SBrett Creeley { 34475995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 34485995b6d0SBrett Creeley pci_ers_result_t result; 34495995b6d0SBrett Creeley int err; 34505995b6d0SBrett Creeley u32 reg; 34515995b6d0SBrett Creeley 34525995b6d0SBrett Creeley err = pci_enable_device_mem(pdev); 34535995b6d0SBrett Creeley if (err) { 345419cce2c6SAnirudh Venkataramanan dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 34555995b6d0SBrett Creeley err); 34565995b6d0SBrett Creeley result = PCI_ERS_RESULT_DISCONNECT; 34575995b6d0SBrett Creeley } else { 34585995b6d0SBrett Creeley pci_set_master(pdev); 34595995b6d0SBrett Creeley pci_restore_state(pdev); 34605995b6d0SBrett Creeley pci_save_state(pdev); 34615995b6d0SBrett Creeley pci_wake_from_d3(pdev, false); 34625995b6d0SBrett Creeley 34635995b6d0SBrett Creeley /* Check for life */ 34645995b6d0SBrett Creeley reg = rd32(&pf->hw, GLGEN_RTRIG); 34655995b6d0SBrett Creeley if (!reg) 34665995b6d0SBrett Creeley result = PCI_ERS_RESULT_RECOVERED; 34675995b6d0SBrett Creeley else 34685995b6d0SBrett Creeley result = PCI_ERS_RESULT_DISCONNECT; 34695995b6d0SBrett Creeley } 34705995b6d0SBrett Creeley 34715995b6d0SBrett Creeley err = pci_cleanup_aer_uncorrect_error_status(pdev); 34725995b6d0SBrett Creeley if (err) 347319cce2c6SAnirudh Venkataramanan dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n", 34745995b6d0SBrett Creeley err); 34755995b6d0SBrett Creeley /* non-fatal, continue */ 34765995b6d0SBrett Creeley 34775995b6d0SBrett Creeley return result; 34785995b6d0SBrett Creeley } 34795995b6d0SBrett Creeley 34805995b6d0SBrett Creeley /** 34815995b6d0SBrett Creeley * ice_pci_err_resume - restart operations after PCI error recovery 34825995b6d0SBrett Creeley * @pdev: PCI device information struct 34835995b6d0SBrett Creeley * 34845995b6d0SBrett Creeley * Called to allow the driver to bring things back up after PCI error and/or 34855995b6d0SBrett Creeley * reset recovery have finished 34865995b6d0SBrett Creeley */ 34875995b6d0SBrett Creeley static void ice_pci_err_resume(struct pci_dev *pdev) 34885995b6d0SBrett Creeley { 34895995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 34905995b6d0SBrett Creeley 34915995b6d0SBrett Creeley if (!pf) { 349219cce2c6SAnirudh Venkataramanan dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 349319cce2c6SAnirudh Venkataramanan __func__); 34945995b6d0SBrett Creeley return; 34955995b6d0SBrett Creeley } 34965995b6d0SBrett Creeley 34975995b6d0SBrett Creeley if (test_bit(__ICE_SUSPENDED, pf->state)) { 34985995b6d0SBrett Creeley dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 34995995b6d0SBrett Creeley __func__); 35005995b6d0SBrett Creeley return; 35015995b6d0SBrett Creeley } 35025995b6d0SBrett Creeley 35035995b6d0SBrett Creeley ice_do_reset(pf, ICE_RESET_PFR); 35045995b6d0SBrett Creeley ice_service_task_restart(pf); 35055995b6d0SBrett Creeley mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 35065995b6d0SBrett Creeley } 35075995b6d0SBrett Creeley 35085995b6d0SBrett Creeley /** 35095995b6d0SBrett Creeley * ice_pci_err_reset_prepare - prepare device driver for PCI reset 35105995b6d0SBrett Creeley * @pdev: PCI device information struct 35115995b6d0SBrett Creeley */ 35125995b6d0SBrett Creeley static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 35135995b6d0SBrett Creeley { 35145995b6d0SBrett Creeley struct ice_pf *pf = pci_get_drvdata(pdev); 35155995b6d0SBrett Creeley 35165995b6d0SBrett Creeley if (!test_bit(__ICE_SUSPENDED, pf->state)) { 35175995b6d0SBrett Creeley ice_service_task_stop(pf); 35185995b6d0SBrett Creeley 35195995b6d0SBrett Creeley if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { 35205995b6d0SBrett Creeley set_bit(__ICE_PFR_REQ, pf->state); 35215995b6d0SBrett Creeley ice_prepare_for_reset(pf); 35225995b6d0SBrett Creeley } 35235995b6d0SBrett Creeley } 35245995b6d0SBrett Creeley } 35255995b6d0SBrett Creeley 35265995b6d0SBrett Creeley /** 35275995b6d0SBrett Creeley * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 35285995b6d0SBrett Creeley * @pdev: PCI device information struct 35295995b6d0SBrett Creeley */ 35305995b6d0SBrett Creeley static void ice_pci_err_reset_done(struct pci_dev *pdev) 35315995b6d0SBrett Creeley { 35325995b6d0SBrett Creeley ice_pci_err_resume(pdev); 35335995b6d0SBrett Creeley } 35345995b6d0SBrett Creeley 3535837f08fdSAnirudh Venkataramanan /* ice_pci_tbl - PCI Device ID Table 3536837f08fdSAnirudh Venkataramanan * 3537837f08fdSAnirudh Venkataramanan * Wildcard entries (PCI_ANY_ID) should come last 3538837f08fdSAnirudh Venkataramanan * Last entry must be all 0s 3539837f08fdSAnirudh Venkataramanan * 3540837f08fdSAnirudh Venkataramanan * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 3541837f08fdSAnirudh Venkataramanan * Class, Class Mask, private data (not used) } 3542837f08fdSAnirudh Venkataramanan */ 3543837f08fdSAnirudh Venkataramanan static const struct pci_device_id ice_pci_tbl[] = { 3544633d7449SAnirudh Venkataramanan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 3545633d7449SAnirudh Venkataramanan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 3546633d7449SAnirudh Venkataramanan { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 35475d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 35485d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 35495d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 35505d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 35515d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 35525d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 }, 35535d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 35545d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 35555d9e618cSJacob Keller { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 3556837f08fdSAnirudh Venkataramanan /* required last entry */ 3557837f08fdSAnirudh Venkataramanan { 0, } 3558837f08fdSAnirudh Venkataramanan }; 3559837f08fdSAnirudh Venkataramanan MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 3560837f08fdSAnirudh Venkataramanan 35615995b6d0SBrett Creeley static const struct pci_error_handlers ice_pci_err_handler = { 35625995b6d0SBrett Creeley .error_detected = ice_pci_err_detected, 35635995b6d0SBrett Creeley .slot_reset = ice_pci_err_slot_reset, 35645995b6d0SBrett Creeley .reset_prepare = ice_pci_err_reset_prepare, 35655995b6d0SBrett Creeley .reset_done = ice_pci_err_reset_done, 35665995b6d0SBrett Creeley .resume = ice_pci_err_resume 35675995b6d0SBrett Creeley }; 35685995b6d0SBrett Creeley 3569837f08fdSAnirudh Venkataramanan static struct pci_driver ice_driver = { 3570837f08fdSAnirudh Venkataramanan .name = KBUILD_MODNAME, 3571837f08fdSAnirudh Venkataramanan .id_table = ice_pci_tbl, 3572837f08fdSAnirudh Venkataramanan .probe = ice_probe, 3573837f08fdSAnirudh Venkataramanan .remove = ice_remove, 3574ddf30f7fSAnirudh Venkataramanan .sriov_configure = ice_sriov_configure, 35755995b6d0SBrett Creeley .err_handler = &ice_pci_err_handler 3576837f08fdSAnirudh Venkataramanan }; 3577837f08fdSAnirudh Venkataramanan 3578837f08fdSAnirudh Venkataramanan /** 3579837f08fdSAnirudh Venkataramanan * ice_module_init - Driver registration routine 3580837f08fdSAnirudh Venkataramanan * 3581837f08fdSAnirudh Venkataramanan * ice_module_init is the first routine called when the driver is 3582837f08fdSAnirudh Venkataramanan * loaded. All it does is register with the PCI subsystem. 3583837f08fdSAnirudh Venkataramanan */ 3584837f08fdSAnirudh Venkataramanan static int __init ice_module_init(void) 3585837f08fdSAnirudh Venkataramanan { 3586837f08fdSAnirudh Venkataramanan int status; 3587837f08fdSAnirudh Venkataramanan 3588837f08fdSAnirudh Venkataramanan pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); 3589837f08fdSAnirudh Venkataramanan pr_info("%s\n", ice_copyright); 3590837f08fdSAnirudh Venkataramanan 35910f9d5027SAnirudh Venkataramanan ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 3592940b61afSAnirudh Venkataramanan if (!ice_wq) { 3593940b61afSAnirudh Venkataramanan pr_err("Failed to create workqueue\n"); 3594940b61afSAnirudh Venkataramanan return -ENOMEM; 3595940b61afSAnirudh Venkataramanan } 3596940b61afSAnirudh Venkataramanan 3597837f08fdSAnirudh Venkataramanan status = pci_register_driver(&ice_driver); 3598940b61afSAnirudh Venkataramanan if (status) { 35992f2da36eSAnirudh Venkataramanan pr_err("failed to register PCI driver, err %d\n", status); 3600940b61afSAnirudh Venkataramanan destroy_workqueue(ice_wq); 3601940b61afSAnirudh Venkataramanan } 3602837f08fdSAnirudh Venkataramanan 3603837f08fdSAnirudh Venkataramanan return status; 3604837f08fdSAnirudh Venkataramanan } 3605837f08fdSAnirudh Venkataramanan module_init(ice_module_init); 3606837f08fdSAnirudh Venkataramanan 3607837f08fdSAnirudh Venkataramanan /** 3608837f08fdSAnirudh Venkataramanan * ice_module_exit - Driver exit cleanup routine 3609837f08fdSAnirudh Venkataramanan * 3610837f08fdSAnirudh Venkataramanan * ice_module_exit is called just before the driver is removed 3611837f08fdSAnirudh Venkataramanan * from memory. 3612837f08fdSAnirudh Venkataramanan */ 3613837f08fdSAnirudh Venkataramanan static void __exit ice_module_exit(void) 3614837f08fdSAnirudh Venkataramanan { 3615837f08fdSAnirudh Venkataramanan pci_unregister_driver(&ice_driver); 3616940b61afSAnirudh Venkataramanan destroy_workqueue(ice_wq); 3617837f08fdSAnirudh Venkataramanan pr_info("module unloaded\n"); 3618837f08fdSAnirudh Venkataramanan } 3619837f08fdSAnirudh Venkataramanan module_exit(ice_module_exit); 36203a858ba3SAnirudh Venkataramanan 36213a858ba3SAnirudh Venkataramanan /** 3622f9867df6SAnirudh Venkataramanan * ice_set_mac_address - NDO callback to set MAC address 3623e94d4478SAnirudh Venkataramanan * @netdev: network interface device structure 3624e94d4478SAnirudh Venkataramanan * @pi: pointer to an address structure 3625e94d4478SAnirudh Venkataramanan * 3626e94d4478SAnirudh Venkataramanan * Returns 0 on success, negative on failure 3627e94d4478SAnirudh Venkataramanan */ 3628e94d4478SAnirudh Venkataramanan static int ice_set_mac_address(struct net_device *netdev, void *pi) 3629e94d4478SAnirudh Venkataramanan { 3630e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 3631e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 3632e94d4478SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 3633e94d4478SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 3634e94d4478SAnirudh Venkataramanan struct sockaddr *addr = pi; 3635e94d4478SAnirudh Venkataramanan enum ice_status status; 3636e94d4478SAnirudh Venkataramanan u8 flags = 0; 3637bbb968e8SAkeem G Abodunrin int err = 0; 3638e94d4478SAnirudh Venkataramanan u8 *mac; 3639e94d4478SAnirudh Venkataramanan 3640e94d4478SAnirudh Venkataramanan mac = (u8 *)addr->sa_data; 3641e94d4478SAnirudh Venkataramanan 3642e94d4478SAnirudh Venkataramanan if (!is_valid_ether_addr(mac)) 3643e94d4478SAnirudh Venkataramanan return -EADDRNOTAVAIL; 3644e94d4478SAnirudh Venkataramanan 3645e94d4478SAnirudh Venkataramanan if (ether_addr_equal(netdev->dev_addr, mac)) { 3646e94d4478SAnirudh Venkataramanan netdev_warn(netdev, "already using mac %pM\n", mac); 3647e94d4478SAnirudh Venkataramanan return 0; 3648e94d4478SAnirudh Venkataramanan } 3649e94d4478SAnirudh Venkataramanan 3650e94d4478SAnirudh Venkataramanan if (test_bit(__ICE_DOWN, pf->state) || 36515df7e45dSDave Ertman ice_is_reset_in_progress(pf->state)) { 3652e94d4478SAnirudh Venkataramanan netdev_err(netdev, "can't set mac %pM. device not ready\n", 3653e94d4478SAnirudh Venkataramanan mac); 3654e94d4478SAnirudh Venkataramanan return -EBUSY; 3655e94d4478SAnirudh Venkataramanan } 3656e94d4478SAnirudh Venkataramanan 3657f9867df6SAnirudh Venkataramanan /* When we change the MAC address we also have to change the MAC address 3658f9867df6SAnirudh Venkataramanan * based filter rules that were created previously for the old MAC 3659e94d4478SAnirudh Venkataramanan * address. So first, we remove the old filter rule using ice_remove_mac 3660bbb968e8SAkeem G Abodunrin * and then create a new filter rule using ice_add_mac via 3661bbb968e8SAkeem G Abodunrin * ice_vsi_cfg_mac_fltr function call for both add and/or remove 3662bbb968e8SAkeem G Abodunrin * filters. 3663e94d4478SAnirudh Venkataramanan */ 3664bbb968e8SAkeem G Abodunrin status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false); 3665e94d4478SAnirudh Venkataramanan if (status) { 3666e94d4478SAnirudh Venkataramanan err = -EADDRNOTAVAIL; 3667bbb968e8SAkeem G Abodunrin goto err_update_filters; 3668e94d4478SAnirudh Venkataramanan } 3669e94d4478SAnirudh Venkataramanan 3670bbb968e8SAkeem G Abodunrin status = ice_vsi_cfg_mac_fltr(vsi, mac, true); 3671e94d4478SAnirudh Venkataramanan if (status) { 3672e94d4478SAnirudh Venkataramanan err = -EADDRNOTAVAIL; 3673bbb968e8SAkeem G Abodunrin goto err_update_filters; 3674e94d4478SAnirudh Venkataramanan } 3675e94d4478SAnirudh Venkataramanan 3676bbb968e8SAkeem G Abodunrin err_update_filters: 3677e94d4478SAnirudh Venkataramanan if (err) { 36782f2da36eSAnirudh Venkataramanan netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 3679e94d4478SAnirudh Venkataramanan mac); 3680e94d4478SAnirudh Venkataramanan return err; 3681e94d4478SAnirudh Venkataramanan } 3682e94d4478SAnirudh Venkataramanan 3683f9867df6SAnirudh Venkataramanan /* change the netdev's MAC address */ 3684e94d4478SAnirudh Venkataramanan memcpy(netdev->dev_addr, mac, netdev->addr_len); 36852f2da36eSAnirudh Venkataramanan netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 3686e94d4478SAnirudh Venkataramanan netdev->dev_addr); 3687e94d4478SAnirudh Venkataramanan 3688f9867df6SAnirudh Venkataramanan /* write new MAC address to the firmware */ 3689e94d4478SAnirudh Venkataramanan flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 3690e94d4478SAnirudh Venkataramanan status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 3691e94d4478SAnirudh Venkataramanan if (status) { 3692bbb968e8SAkeem G Abodunrin netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 3693bbb968e8SAkeem G Abodunrin mac, status); 3694e94d4478SAnirudh Venkataramanan } 3695e94d4478SAnirudh Venkataramanan return 0; 3696e94d4478SAnirudh Venkataramanan } 3697e94d4478SAnirudh Venkataramanan 3698e94d4478SAnirudh Venkataramanan /** 3699e94d4478SAnirudh Venkataramanan * ice_set_rx_mode - NDO callback to set the netdev filters 3700e94d4478SAnirudh Venkataramanan * @netdev: network interface device structure 3701e94d4478SAnirudh Venkataramanan */ 3702e94d4478SAnirudh Venkataramanan static void ice_set_rx_mode(struct net_device *netdev) 3703e94d4478SAnirudh Venkataramanan { 3704e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 3705e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 3706e94d4478SAnirudh Venkataramanan 3707e94d4478SAnirudh Venkataramanan if (!vsi) 3708e94d4478SAnirudh Venkataramanan return; 3709e94d4478SAnirudh Venkataramanan 3710e94d4478SAnirudh Venkataramanan /* Set the flags to synchronize filters 3711e94d4478SAnirudh Venkataramanan * ndo_set_rx_mode may be triggered even without a change in netdev 3712e94d4478SAnirudh Venkataramanan * flags 3713e94d4478SAnirudh Venkataramanan */ 3714e94d4478SAnirudh Venkataramanan set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 3715e94d4478SAnirudh Venkataramanan set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 3716e94d4478SAnirudh Venkataramanan set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 3717e94d4478SAnirudh Venkataramanan 3718e94d4478SAnirudh Venkataramanan /* schedule our worker thread which will take care of 3719e94d4478SAnirudh Venkataramanan * applying the new filter changes 3720e94d4478SAnirudh Venkataramanan */ 3721e94d4478SAnirudh Venkataramanan ice_service_task_schedule(vsi->back); 3722e94d4478SAnirudh Venkataramanan } 3723e94d4478SAnirudh Venkataramanan 3724e94d4478SAnirudh Venkataramanan /** 37251ddef455SUsha Ketineni * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 37261ddef455SUsha Ketineni * @netdev: network interface device structure 37271ddef455SUsha Ketineni * @queue_index: Queue ID 37281ddef455SUsha Ketineni * @maxrate: maximum bandwidth in Mbps 37291ddef455SUsha Ketineni */ 37301ddef455SUsha Ketineni static int 37311ddef455SUsha Ketineni ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 37321ddef455SUsha Ketineni { 37331ddef455SUsha Ketineni struct ice_netdev_priv *np = netdev_priv(netdev); 37341ddef455SUsha Ketineni struct ice_vsi *vsi = np->vsi; 37351ddef455SUsha Ketineni enum ice_status status; 37361ddef455SUsha Ketineni u16 q_handle; 37371ddef455SUsha Ketineni u8 tc; 37381ddef455SUsha Ketineni 37391ddef455SUsha Ketineni /* Validate maxrate requested is within permitted range */ 37401ddef455SUsha Ketineni if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 374119cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 37421ddef455SUsha Ketineni maxrate, queue_index); 37431ddef455SUsha Ketineni return -EINVAL; 37441ddef455SUsha Ketineni } 37451ddef455SUsha Ketineni 37461ddef455SUsha Ketineni q_handle = vsi->tx_rings[queue_index]->q_handle; 37471ddef455SUsha Ketineni tc = ice_dcb_get_tc(vsi, queue_index); 37481ddef455SUsha Ketineni 37491ddef455SUsha Ketineni /* Set BW back to default, when user set maxrate to 0 */ 37501ddef455SUsha Ketineni if (!maxrate) 37511ddef455SUsha Ketineni status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 37521ddef455SUsha Ketineni q_handle, ICE_MAX_BW); 37531ddef455SUsha Ketineni else 37541ddef455SUsha Ketineni status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 37551ddef455SUsha Ketineni q_handle, ICE_MAX_BW, maxrate * 1000); 37561ddef455SUsha Ketineni if (status) { 375719cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 375819cce2c6SAnirudh Venkataramanan status); 37591ddef455SUsha Ketineni return -EIO; 37601ddef455SUsha Ketineni } 37611ddef455SUsha Ketineni 37621ddef455SUsha Ketineni return 0; 37631ddef455SUsha Ketineni } 37641ddef455SUsha Ketineni 37651ddef455SUsha Ketineni /** 3766e94d4478SAnirudh Venkataramanan * ice_fdb_add - add an entry to the hardware database 3767e94d4478SAnirudh Venkataramanan * @ndm: the input from the stack 3768e94d4478SAnirudh Venkataramanan * @tb: pointer to array of nladdr (unused) 3769e94d4478SAnirudh Venkataramanan * @dev: the net device pointer 3770e94d4478SAnirudh Venkataramanan * @addr: the MAC address entry being added 3771f9867df6SAnirudh Venkataramanan * @vid: VLAN ID 3772e94d4478SAnirudh Venkataramanan * @flags: instructions from stack about fdb operation 377399be37edSBruce Allan * @extack: netlink extended ack 3774e94d4478SAnirudh Venkataramanan */ 377599be37edSBruce Allan static int 377699be37edSBruce Allan ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 377799be37edSBruce Allan struct net_device *dev, const unsigned char *addr, u16 vid, 377899be37edSBruce Allan u16 flags, struct netlink_ext_ack __always_unused *extack) 3779e94d4478SAnirudh Venkataramanan { 3780e94d4478SAnirudh Venkataramanan int err; 3781e94d4478SAnirudh Venkataramanan 3782e94d4478SAnirudh Venkataramanan if (vid) { 3783e94d4478SAnirudh Venkataramanan netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 3784e94d4478SAnirudh Venkataramanan return -EINVAL; 3785e94d4478SAnirudh Venkataramanan } 3786e94d4478SAnirudh Venkataramanan if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 3787e94d4478SAnirudh Venkataramanan netdev_err(dev, "FDB only supports static addresses\n"); 3788e94d4478SAnirudh Venkataramanan return -EINVAL; 3789e94d4478SAnirudh Venkataramanan } 3790e94d4478SAnirudh Venkataramanan 3791e94d4478SAnirudh Venkataramanan if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 3792e94d4478SAnirudh Venkataramanan err = dev_uc_add_excl(dev, addr); 3793e94d4478SAnirudh Venkataramanan else if (is_multicast_ether_addr(addr)) 3794e94d4478SAnirudh Venkataramanan err = dev_mc_add_excl(dev, addr); 3795e94d4478SAnirudh Venkataramanan else 3796e94d4478SAnirudh Venkataramanan err = -EINVAL; 3797e94d4478SAnirudh Venkataramanan 3798e94d4478SAnirudh Venkataramanan /* Only return duplicate errors if NLM_F_EXCL is set */ 3799e94d4478SAnirudh Venkataramanan if (err == -EEXIST && !(flags & NLM_F_EXCL)) 3800e94d4478SAnirudh Venkataramanan err = 0; 3801e94d4478SAnirudh Venkataramanan 3802e94d4478SAnirudh Venkataramanan return err; 3803e94d4478SAnirudh Venkataramanan } 3804e94d4478SAnirudh Venkataramanan 3805e94d4478SAnirudh Venkataramanan /** 3806e94d4478SAnirudh Venkataramanan * ice_fdb_del - delete an entry from the hardware database 3807e94d4478SAnirudh Venkataramanan * @ndm: the input from the stack 3808e94d4478SAnirudh Venkataramanan * @tb: pointer to array of nladdr (unused) 3809e94d4478SAnirudh Venkataramanan * @dev: the net device pointer 3810e94d4478SAnirudh Venkataramanan * @addr: the MAC address entry being added 3811f9867df6SAnirudh Venkataramanan * @vid: VLAN ID 3812e94d4478SAnirudh Venkataramanan */ 3813c8b7abddSBruce Allan static int 3814c8b7abddSBruce Allan ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 3815e94d4478SAnirudh Venkataramanan struct net_device *dev, const unsigned char *addr, 3816e94d4478SAnirudh Venkataramanan __always_unused u16 vid) 3817e94d4478SAnirudh Venkataramanan { 3818e94d4478SAnirudh Venkataramanan int err; 3819e94d4478SAnirudh Venkataramanan 3820e94d4478SAnirudh Venkataramanan if (ndm->ndm_state & NUD_PERMANENT) { 3821e94d4478SAnirudh Venkataramanan netdev_err(dev, "FDB only supports static addresses\n"); 3822e94d4478SAnirudh Venkataramanan return -EINVAL; 3823e94d4478SAnirudh Venkataramanan } 3824e94d4478SAnirudh Venkataramanan 3825e94d4478SAnirudh Venkataramanan if (is_unicast_ether_addr(addr)) 3826e94d4478SAnirudh Venkataramanan err = dev_uc_del(dev, addr); 3827e94d4478SAnirudh Venkataramanan else if (is_multicast_ether_addr(addr)) 3828e94d4478SAnirudh Venkataramanan err = dev_mc_del(dev, addr); 3829e94d4478SAnirudh Venkataramanan else 3830e94d4478SAnirudh Venkataramanan err = -EINVAL; 3831e94d4478SAnirudh Venkataramanan 3832e94d4478SAnirudh Venkataramanan return err; 3833e94d4478SAnirudh Venkataramanan } 3834e94d4478SAnirudh Venkataramanan 3835e94d4478SAnirudh Venkataramanan /** 3836d76a60baSAnirudh Venkataramanan * ice_set_features - set the netdev feature flags 3837d76a60baSAnirudh Venkataramanan * @netdev: ptr to the netdev being adjusted 3838d76a60baSAnirudh Venkataramanan * @features: the feature set that the stack is suggesting 3839d76a60baSAnirudh Venkataramanan */ 3840c8b7abddSBruce Allan static int 3841c8b7abddSBruce Allan ice_set_features(struct net_device *netdev, netdev_features_t features) 3842d76a60baSAnirudh Venkataramanan { 3843d76a60baSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 3844d76a60baSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 38455f8cc355SHenry Tieman struct ice_pf *pf = vsi->back; 3846d76a60baSAnirudh Venkataramanan int ret = 0; 3847d76a60baSAnirudh Venkataramanan 3848462acf6aSTony Nguyen /* Don't set any netdev advanced features with device in Safe Mode */ 3849462acf6aSTony Nguyen if (ice_is_safe_mode(vsi->back)) { 385019cce2c6SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); 3851462acf6aSTony Nguyen return ret; 3852462acf6aSTony Nguyen } 3853462acf6aSTony Nguyen 38545f8cc355SHenry Tieman /* Do not change setting during reset */ 38555f8cc355SHenry Tieman if (ice_is_reset_in_progress(pf->state)) { 385619cce2c6SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 38575f8cc355SHenry Tieman return -EBUSY; 38585f8cc355SHenry Tieman } 38595f8cc355SHenry Tieman 38608f529ff9STony Nguyen /* Multiple features can be changed in one call so keep features in 38618f529ff9STony Nguyen * separate if/else statements to guarantee each feature is checked 38628f529ff9STony Nguyen */ 3863492af0abSMd Fahad Iqbal Polash if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 3864492af0abSMd Fahad Iqbal Polash ret = ice_vsi_manage_rss_lut(vsi, true); 3865492af0abSMd Fahad Iqbal Polash else if (!(features & NETIF_F_RXHASH) && 3866492af0abSMd Fahad Iqbal Polash netdev->features & NETIF_F_RXHASH) 3867492af0abSMd Fahad Iqbal Polash ret = ice_vsi_manage_rss_lut(vsi, false); 3868492af0abSMd Fahad Iqbal Polash 3869d76a60baSAnirudh Venkataramanan if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 3870d76a60baSAnirudh Venkataramanan !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 3871d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_stripping(vsi, true); 3872d76a60baSAnirudh Venkataramanan else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 3873d76a60baSAnirudh Venkataramanan (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 3874d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_stripping(vsi, false); 38758f529ff9STony Nguyen 38768f529ff9STony Nguyen if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 3877d76a60baSAnirudh Venkataramanan !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 3878d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_insertion(vsi); 3879d76a60baSAnirudh Venkataramanan else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 3880d76a60baSAnirudh Venkataramanan (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 3881d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_insertion(vsi); 3882d76a60baSAnirudh Venkataramanan 38833171948eSTony Nguyen if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 38843171948eSTony Nguyen !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 38853171948eSTony Nguyen ret = ice_cfg_vlan_pruning(vsi, true, false); 38863171948eSTony Nguyen else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 38873171948eSTony Nguyen (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 38883171948eSTony Nguyen ret = ice_cfg_vlan_pruning(vsi, false, false); 38893171948eSTony Nguyen 3890d76a60baSAnirudh Venkataramanan return ret; 3891d76a60baSAnirudh Venkataramanan } 3892d76a60baSAnirudh Venkataramanan 3893d76a60baSAnirudh Venkataramanan /** 3894f9867df6SAnirudh Venkataramanan * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI 3895f9867df6SAnirudh Venkataramanan * @vsi: VSI to setup VLAN properties for 3896d76a60baSAnirudh Venkataramanan */ 3897d76a60baSAnirudh Venkataramanan static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 3898d76a60baSAnirudh Venkataramanan { 3899d76a60baSAnirudh Venkataramanan int ret = 0; 3900d76a60baSAnirudh Venkataramanan 3901d76a60baSAnirudh Venkataramanan if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 3902d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_stripping(vsi, true); 3903d76a60baSAnirudh Venkataramanan if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 3904d76a60baSAnirudh Venkataramanan ret = ice_vsi_manage_vlan_insertion(vsi); 3905d76a60baSAnirudh Venkataramanan 3906d76a60baSAnirudh Venkataramanan return ret; 3907d76a60baSAnirudh Venkataramanan } 3908d76a60baSAnirudh Venkataramanan 3909d76a60baSAnirudh Venkataramanan /** 3910cdedef59SAnirudh Venkataramanan * ice_vsi_cfg - Setup the VSI 3911cdedef59SAnirudh Venkataramanan * @vsi: the VSI being configured 3912cdedef59SAnirudh Venkataramanan * 3913cdedef59SAnirudh Venkataramanan * Return 0 on success and negative value on error 3914cdedef59SAnirudh Venkataramanan */ 39150e674aebSAnirudh Venkataramanan int ice_vsi_cfg(struct ice_vsi *vsi) 3916cdedef59SAnirudh Venkataramanan { 3917cdedef59SAnirudh Venkataramanan int err; 3918cdedef59SAnirudh Venkataramanan 3919c7f2c42bSAnirudh Venkataramanan if (vsi->netdev) { 3920e94d4478SAnirudh Venkataramanan ice_set_rx_mode(vsi->netdev); 39219ecd25c2SAnirudh Venkataramanan 39229ecd25c2SAnirudh Venkataramanan err = ice_vsi_vlan_setup(vsi); 39239ecd25c2SAnirudh Venkataramanan 3924d76a60baSAnirudh Venkataramanan if (err) 3925d76a60baSAnirudh Venkataramanan return err; 3926c7f2c42bSAnirudh Venkataramanan } 3927a629cf0aSAnirudh Venkataramanan ice_vsi_cfg_dcb_rings(vsi); 392803f7a986SAnirudh Venkataramanan 392903f7a986SAnirudh Venkataramanan err = ice_vsi_cfg_lan_txqs(vsi); 3930efc2214bSMaciej Fijalkowski if (!err && ice_is_xdp_ena_vsi(vsi)) 3931efc2214bSMaciej Fijalkowski err = ice_vsi_cfg_xdp_txqs(vsi); 3932cdedef59SAnirudh Venkataramanan if (!err) 3933cdedef59SAnirudh Venkataramanan err = ice_vsi_cfg_rxqs(vsi); 3934cdedef59SAnirudh Venkataramanan 3935cdedef59SAnirudh Venkataramanan return err; 3936cdedef59SAnirudh Venkataramanan } 3937cdedef59SAnirudh Venkataramanan 3938cdedef59SAnirudh Venkataramanan /** 39392b245cb2SAnirudh Venkataramanan * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 39402b245cb2SAnirudh Venkataramanan * @vsi: the VSI being configured 39412b245cb2SAnirudh Venkataramanan */ 39422b245cb2SAnirudh Venkataramanan static void ice_napi_enable_all(struct ice_vsi *vsi) 39432b245cb2SAnirudh Venkataramanan { 39442b245cb2SAnirudh Venkataramanan int q_idx; 39452b245cb2SAnirudh Venkataramanan 39462b245cb2SAnirudh Venkataramanan if (!vsi->netdev) 39472b245cb2SAnirudh Venkataramanan return; 39482b245cb2SAnirudh Venkataramanan 39490c2561c8SBrett Creeley ice_for_each_q_vector(vsi, q_idx) { 3950eec90376SYoung Xiao struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 3951eec90376SYoung Xiao 3952eec90376SYoung Xiao if (q_vector->rx.ring || q_vector->tx.ring) 3953eec90376SYoung Xiao napi_enable(&q_vector->napi); 3954eec90376SYoung Xiao } 39552b245cb2SAnirudh Venkataramanan } 39562b245cb2SAnirudh Venkataramanan 39572b245cb2SAnirudh Venkataramanan /** 3958cdedef59SAnirudh Venkataramanan * ice_up_complete - Finish the last steps of bringing up a connection 3959cdedef59SAnirudh Venkataramanan * @vsi: The VSI being configured 3960cdedef59SAnirudh Venkataramanan * 3961cdedef59SAnirudh Venkataramanan * Return 0 on success and negative value on error 3962cdedef59SAnirudh Venkataramanan */ 3963cdedef59SAnirudh Venkataramanan static int ice_up_complete(struct ice_vsi *vsi) 3964cdedef59SAnirudh Venkataramanan { 3965cdedef59SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 3966cdedef59SAnirudh Venkataramanan int err; 3967cdedef59SAnirudh Venkataramanan 3968cdedef59SAnirudh Venkataramanan ice_vsi_cfg_msix(vsi); 3969cdedef59SAnirudh Venkataramanan 3970cdedef59SAnirudh Venkataramanan /* Enable only Rx rings, Tx rings were enabled by the FW when the 3971cdedef59SAnirudh Venkataramanan * Tx queue group list was configured and the context bits were 3972cdedef59SAnirudh Venkataramanan * programmed using ice_vsi_cfg_txqs 3973cdedef59SAnirudh Venkataramanan */ 397413a6233bSBrett Creeley err = ice_vsi_start_all_rx_rings(vsi); 3975cdedef59SAnirudh Venkataramanan if (err) 3976cdedef59SAnirudh Venkataramanan return err; 3977cdedef59SAnirudh Venkataramanan 3978cdedef59SAnirudh Venkataramanan clear_bit(__ICE_DOWN, vsi->state); 39792b245cb2SAnirudh Venkataramanan ice_napi_enable_all(vsi); 3980cdedef59SAnirudh Venkataramanan ice_vsi_ena_irq(vsi); 3981cdedef59SAnirudh Venkataramanan 3982cdedef59SAnirudh Venkataramanan if (vsi->port_info && 3983cdedef59SAnirudh Venkataramanan (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 3984cdedef59SAnirudh Venkataramanan vsi->netdev) { 3985cdedef59SAnirudh Venkataramanan ice_print_link_msg(vsi, true); 3986cdedef59SAnirudh Venkataramanan netif_tx_start_all_queues(vsi->netdev); 3987cdedef59SAnirudh Venkataramanan netif_carrier_on(vsi->netdev); 3988cdedef59SAnirudh Venkataramanan } 3989cdedef59SAnirudh Venkataramanan 3990cdedef59SAnirudh Venkataramanan ice_service_task_schedule(pf); 3991cdedef59SAnirudh Venkataramanan 39921b5c19c7SBruce Allan return 0; 3993cdedef59SAnirudh Venkataramanan } 3994cdedef59SAnirudh Venkataramanan 3995cdedef59SAnirudh Venkataramanan /** 3996fcea6f3dSAnirudh Venkataramanan * ice_up - Bring the connection back up after being down 3997fcea6f3dSAnirudh Venkataramanan * @vsi: VSI being configured 3998fcea6f3dSAnirudh Venkataramanan */ 3999fcea6f3dSAnirudh Venkataramanan int ice_up(struct ice_vsi *vsi) 4000fcea6f3dSAnirudh Venkataramanan { 4001fcea6f3dSAnirudh Venkataramanan int err; 4002fcea6f3dSAnirudh Venkataramanan 4003fcea6f3dSAnirudh Venkataramanan err = ice_vsi_cfg(vsi); 4004fcea6f3dSAnirudh Venkataramanan if (!err) 4005fcea6f3dSAnirudh Venkataramanan err = ice_up_complete(vsi); 4006fcea6f3dSAnirudh Venkataramanan 4007fcea6f3dSAnirudh Venkataramanan return err; 4008fcea6f3dSAnirudh Venkataramanan } 4009fcea6f3dSAnirudh Venkataramanan 4010fcea6f3dSAnirudh Venkataramanan /** 4011fcea6f3dSAnirudh Venkataramanan * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 4012fcea6f3dSAnirudh Venkataramanan * @ring: Tx or Rx ring to read stats from 4013fcea6f3dSAnirudh Venkataramanan * @pkts: packets stats counter 4014fcea6f3dSAnirudh Venkataramanan * @bytes: bytes stats counter 4015fcea6f3dSAnirudh Venkataramanan * 4016fcea6f3dSAnirudh Venkataramanan * This function fetches stats from the ring considering the atomic operations 4017fcea6f3dSAnirudh Venkataramanan * that needs to be performed to read u64 values in 32 bit machine. 4018fcea6f3dSAnirudh Venkataramanan */ 4019c8b7abddSBruce Allan static void 4020c8b7abddSBruce Allan ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) 4021fcea6f3dSAnirudh Venkataramanan { 4022fcea6f3dSAnirudh Venkataramanan unsigned int start; 4023fcea6f3dSAnirudh Venkataramanan *pkts = 0; 4024fcea6f3dSAnirudh Venkataramanan *bytes = 0; 4025fcea6f3dSAnirudh Venkataramanan 4026fcea6f3dSAnirudh Venkataramanan if (!ring) 4027fcea6f3dSAnirudh Venkataramanan return; 4028fcea6f3dSAnirudh Venkataramanan do { 4029fcea6f3dSAnirudh Venkataramanan start = u64_stats_fetch_begin_irq(&ring->syncp); 4030fcea6f3dSAnirudh Venkataramanan *pkts = ring->stats.pkts; 4031fcea6f3dSAnirudh Venkataramanan *bytes = ring->stats.bytes; 4032fcea6f3dSAnirudh Venkataramanan } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 4033fcea6f3dSAnirudh Venkataramanan } 4034fcea6f3dSAnirudh Venkataramanan 4035fcea6f3dSAnirudh Venkataramanan /** 4036fcea6f3dSAnirudh Venkataramanan * ice_update_vsi_ring_stats - Update VSI stats counters 4037fcea6f3dSAnirudh Venkataramanan * @vsi: the VSI to be updated 4038fcea6f3dSAnirudh Venkataramanan */ 4039fcea6f3dSAnirudh Venkataramanan static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 4040fcea6f3dSAnirudh Venkataramanan { 4041fcea6f3dSAnirudh Venkataramanan struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 4042fcea6f3dSAnirudh Venkataramanan struct ice_ring *ring; 4043fcea6f3dSAnirudh Venkataramanan u64 pkts, bytes; 4044fcea6f3dSAnirudh Venkataramanan int i; 4045fcea6f3dSAnirudh Venkataramanan 4046fcea6f3dSAnirudh Venkataramanan /* reset netdev stats */ 4047fcea6f3dSAnirudh Venkataramanan vsi_stats->tx_packets = 0; 4048fcea6f3dSAnirudh Venkataramanan vsi_stats->tx_bytes = 0; 4049fcea6f3dSAnirudh Venkataramanan vsi_stats->rx_packets = 0; 4050fcea6f3dSAnirudh Venkataramanan vsi_stats->rx_bytes = 0; 4051fcea6f3dSAnirudh Venkataramanan 4052fcea6f3dSAnirudh Venkataramanan /* reset non-netdev (extended) stats */ 4053fcea6f3dSAnirudh Venkataramanan vsi->tx_restart = 0; 4054fcea6f3dSAnirudh Venkataramanan vsi->tx_busy = 0; 4055fcea6f3dSAnirudh Venkataramanan vsi->tx_linearize = 0; 4056fcea6f3dSAnirudh Venkataramanan vsi->rx_buf_failed = 0; 4057fcea6f3dSAnirudh Venkataramanan vsi->rx_page_failed = 0; 4058fcea6f3dSAnirudh Venkataramanan 4059fcea6f3dSAnirudh Venkataramanan rcu_read_lock(); 4060fcea6f3dSAnirudh Venkataramanan 4061fcea6f3dSAnirudh Venkataramanan /* update Tx rings counters */ 4062fcea6f3dSAnirudh Venkataramanan ice_for_each_txq(vsi, i) { 4063fcea6f3dSAnirudh Venkataramanan ring = READ_ONCE(vsi->tx_rings[i]); 4064fcea6f3dSAnirudh Venkataramanan ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 4065fcea6f3dSAnirudh Venkataramanan vsi_stats->tx_packets += pkts; 4066fcea6f3dSAnirudh Venkataramanan vsi_stats->tx_bytes += bytes; 4067fcea6f3dSAnirudh Venkataramanan vsi->tx_restart += ring->tx_stats.restart_q; 4068fcea6f3dSAnirudh Venkataramanan vsi->tx_busy += ring->tx_stats.tx_busy; 4069fcea6f3dSAnirudh Venkataramanan vsi->tx_linearize += ring->tx_stats.tx_linearize; 4070fcea6f3dSAnirudh Venkataramanan } 4071fcea6f3dSAnirudh Venkataramanan 4072fcea6f3dSAnirudh Venkataramanan /* update Rx rings counters */ 4073fcea6f3dSAnirudh Venkataramanan ice_for_each_rxq(vsi, i) { 4074fcea6f3dSAnirudh Venkataramanan ring = READ_ONCE(vsi->rx_rings[i]); 4075fcea6f3dSAnirudh Venkataramanan ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 4076fcea6f3dSAnirudh Venkataramanan vsi_stats->rx_packets += pkts; 4077fcea6f3dSAnirudh Venkataramanan vsi_stats->rx_bytes += bytes; 4078fcea6f3dSAnirudh Venkataramanan vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 4079fcea6f3dSAnirudh Venkataramanan vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 4080fcea6f3dSAnirudh Venkataramanan } 4081fcea6f3dSAnirudh Venkataramanan 4082fcea6f3dSAnirudh Venkataramanan rcu_read_unlock(); 4083fcea6f3dSAnirudh Venkataramanan } 4084fcea6f3dSAnirudh Venkataramanan 4085fcea6f3dSAnirudh Venkataramanan /** 4086fcea6f3dSAnirudh Venkataramanan * ice_update_vsi_stats - Update VSI stats counters 4087fcea6f3dSAnirudh Venkataramanan * @vsi: the VSI to be updated 4088fcea6f3dSAnirudh Venkataramanan */ 40895a4a8673SBruce Allan void ice_update_vsi_stats(struct ice_vsi *vsi) 4090fcea6f3dSAnirudh Venkataramanan { 4091fcea6f3dSAnirudh Venkataramanan struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 4092fcea6f3dSAnirudh Venkataramanan struct ice_eth_stats *cur_es = &vsi->eth_stats; 4093fcea6f3dSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 4094fcea6f3dSAnirudh Venkataramanan 4095fcea6f3dSAnirudh Venkataramanan if (test_bit(__ICE_DOWN, vsi->state) || 4096fcea6f3dSAnirudh Venkataramanan test_bit(__ICE_CFG_BUSY, pf->state)) 4097fcea6f3dSAnirudh Venkataramanan return; 4098fcea6f3dSAnirudh Venkataramanan 4099fcea6f3dSAnirudh Venkataramanan /* get stats as recorded by Tx/Rx rings */ 4100fcea6f3dSAnirudh Venkataramanan ice_update_vsi_ring_stats(vsi); 4101fcea6f3dSAnirudh Venkataramanan 4102fcea6f3dSAnirudh Venkataramanan /* get VSI stats as recorded by the hardware */ 4103fcea6f3dSAnirudh Venkataramanan ice_update_eth_stats(vsi); 4104fcea6f3dSAnirudh Venkataramanan 4105fcea6f3dSAnirudh Venkataramanan cur_ns->tx_errors = cur_es->tx_errors; 4106fcea6f3dSAnirudh Venkataramanan cur_ns->rx_dropped = cur_es->rx_discards; 4107fcea6f3dSAnirudh Venkataramanan cur_ns->tx_dropped = cur_es->tx_discards; 4108fcea6f3dSAnirudh Venkataramanan cur_ns->multicast = cur_es->rx_multicast; 4109fcea6f3dSAnirudh Venkataramanan 4110fcea6f3dSAnirudh Venkataramanan /* update some more netdev stats if this is main VSI */ 4111fcea6f3dSAnirudh Venkataramanan if (vsi->type == ICE_VSI_PF) { 4112fcea6f3dSAnirudh Venkataramanan cur_ns->rx_crc_errors = pf->stats.crc_errors; 4113fcea6f3dSAnirudh Venkataramanan cur_ns->rx_errors = pf->stats.crc_errors + 4114fcea6f3dSAnirudh Venkataramanan pf->stats.illegal_bytes; 4115fcea6f3dSAnirudh Venkataramanan cur_ns->rx_length_errors = pf->stats.rx_len_errors; 411656923ab6SBrett Creeley /* record drops from the port level */ 411756923ab6SBrett Creeley cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 4118fcea6f3dSAnirudh Venkataramanan } 4119fcea6f3dSAnirudh Venkataramanan } 4120fcea6f3dSAnirudh Venkataramanan 4121fcea6f3dSAnirudh Venkataramanan /** 4122fcea6f3dSAnirudh Venkataramanan * ice_update_pf_stats - Update PF port stats counters 4123fcea6f3dSAnirudh Venkataramanan * @pf: PF whose stats needs to be updated 4124fcea6f3dSAnirudh Venkataramanan */ 41255a4a8673SBruce Allan void ice_update_pf_stats(struct ice_pf *pf) 4126fcea6f3dSAnirudh Venkataramanan { 4127fcea6f3dSAnirudh Venkataramanan struct ice_hw_port_stats *prev_ps, *cur_ps; 4128fcea6f3dSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 41299e7a5d17SUsha Ketineni u8 port; 4130fcea6f3dSAnirudh Venkataramanan 41319e7a5d17SUsha Ketineni port = hw->port_info->lport; 4132fcea6f3dSAnirudh Venkataramanan prev_ps = &pf->stats_prev; 4133fcea6f3dSAnirudh Venkataramanan cur_ps = &pf->stats; 4134fcea6f3dSAnirudh Venkataramanan 41359e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 413636517fd3SJacob Keller &prev_ps->eth.rx_bytes, 4137fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_bytes); 4138fcea6f3dSAnirudh Venkataramanan 41399e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 414036517fd3SJacob Keller &prev_ps->eth.rx_unicast, 4141fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_unicast); 4142fcea6f3dSAnirudh Venkataramanan 41439e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 414436517fd3SJacob Keller &prev_ps->eth.rx_multicast, 4145fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_multicast); 4146fcea6f3dSAnirudh Venkataramanan 41479e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 414836517fd3SJacob Keller &prev_ps->eth.rx_broadcast, 4149fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.rx_broadcast); 4150fcea6f3dSAnirudh Venkataramanan 415156923ab6SBrett Creeley ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 415256923ab6SBrett Creeley &prev_ps->eth.rx_discards, 415356923ab6SBrett Creeley &cur_ps->eth.rx_discards); 415456923ab6SBrett Creeley 41559e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 415636517fd3SJacob Keller &prev_ps->eth.tx_bytes, 4157fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_bytes); 4158fcea6f3dSAnirudh Venkataramanan 41599e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 416036517fd3SJacob Keller &prev_ps->eth.tx_unicast, 4161fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_unicast); 4162fcea6f3dSAnirudh Venkataramanan 41639e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 416436517fd3SJacob Keller &prev_ps->eth.tx_multicast, 4165fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_multicast); 4166fcea6f3dSAnirudh Venkataramanan 41679e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 416836517fd3SJacob Keller &prev_ps->eth.tx_broadcast, 4169fcea6f3dSAnirudh Venkataramanan &cur_ps->eth.tx_broadcast); 4170fcea6f3dSAnirudh Venkataramanan 41719e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 4172fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_dropped_link_down, 4173fcea6f3dSAnirudh Venkataramanan &cur_ps->tx_dropped_link_down); 4174fcea6f3dSAnirudh Venkataramanan 41759e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 417636517fd3SJacob Keller &prev_ps->rx_size_64, &cur_ps->rx_size_64); 4177fcea6f3dSAnirudh Venkataramanan 41789e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 417936517fd3SJacob Keller &prev_ps->rx_size_127, &cur_ps->rx_size_127); 4180fcea6f3dSAnirudh Venkataramanan 41819e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 418236517fd3SJacob Keller &prev_ps->rx_size_255, &cur_ps->rx_size_255); 4183fcea6f3dSAnirudh Venkataramanan 41849e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 418536517fd3SJacob Keller &prev_ps->rx_size_511, &cur_ps->rx_size_511); 4186fcea6f3dSAnirudh Venkataramanan 41879e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 4188fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 4189fcea6f3dSAnirudh Venkataramanan 41909e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 4191fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 4192fcea6f3dSAnirudh Venkataramanan 41939e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 4194fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_size_big, &cur_ps->rx_size_big); 4195fcea6f3dSAnirudh Venkataramanan 41969e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 419736517fd3SJacob Keller &prev_ps->tx_size_64, &cur_ps->tx_size_64); 4198fcea6f3dSAnirudh Venkataramanan 41999e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 420036517fd3SJacob Keller &prev_ps->tx_size_127, &cur_ps->tx_size_127); 4201fcea6f3dSAnirudh Venkataramanan 42029e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 420336517fd3SJacob Keller &prev_ps->tx_size_255, &cur_ps->tx_size_255); 4204fcea6f3dSAnirudh Venkataramanan 42059e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 420636517fd3SJacob Keller &prev_ps->tx_size_511, &cur_ps->tx_size_511); 4207fcea6f3dSAnirudh Venkataramanan 42089e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 4209fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 4210fcea6f3dSAnirudh Venkataramanan 42119e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 4212fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 4213fcea6f3dSAnirudh Venkataramanan 42149e7a5d17SUsha Ketineni ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 4215fcea6f3dSAnirudh Venkataramanan &prev_ps->tx_size_big, &cur_ps->tx_size_big); 4216fcea6f3dSAnirudh Venkataramanan 42179e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 4218fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 4219fcea6f3dSAnirudh Venkataramanan 42209e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 4221fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 4222fcea6f3dSAnirudh Venkataramanan 42239e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 4224fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 4225fcea6f3dSAnirudh Venkataramanan 42269e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 4227fcea6f3dSAnirudh Venkataramanan &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 4228fcea6f3dSAnirudh Venkataramanan 42294b0fdcebSAnirudh Venkataramanan ice_update_dcb_stats(pf); 42304b0fdcebSAnirudh Venkataramanan 42319e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 4232fcea6f3dSAnirudh Venkataramanan &prev_ps->crc_errors, &cur_ps->crc_errors); 4233fcea6f3dSAnirudh Venkataramanan 42349e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 4235fcea6f3dSAnirudh Venkataramanan &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 4236fcea6f3dSAnirudh Venkataramanan 42379e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 4238fcea6f3dSAnirudh Venkataramanan &prev_ps->mac_local_faults, 4239fcea6f3dSAnirudh Venkataramanan &cur_ps->mac_local_faults); 4240fcea6f3dSAnirudh Venkataramanan 42419e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 4242fcea6f3dSAnirudh Venkataramanan &prev_ps->mac_remote_faults, 4243fcea6f3dSAnirudh Venkataramanan &cur_ps->mac_remote_faults); 4244fcea6f3dSAnirudh Venkataramanan 42459e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 4246fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 4247fcea6f3dSAnirudh Venkataramanan 42489e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 4249fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_undersize, &cur_ps->rx_undersize); 4250fcea6f3dSAnirudh Venkataramanan 42519e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 4252fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_fragments, &cur_ps->rx_fragments); 4253fcea6f3dSAnirudh Venkataramanan 42549e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 4255fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_oversize, &cur_ps->rx_oversize); 4256fcea6f3dSAnirudh Venkataramanan 42579e7a5d17SUsha Ketineni ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 4258fcea6f3dSAnirudh Venkataramanan &prev_ps->rx_jabber, &cur_ps->rx_jabber); 4259fcea6f3dSAnirudh Venkataramanan 4260fcea6f3dSAnirudh Venkataramanan pf->stat_prev_loaded = true; 4261fcea6f3dSAnirudh Venkataramanan } 4262fcea6f3dSAnirudh Venkataramanan 4263fcea6f3dSAnirudh Venkataramanan /** 4264fcea6f3dSAnirudh Venkataramanan * ice_get_stats64 - get statistics for network device structure 4265fcea6f3dSAnirudh Venkataramanan * @netdev: network interface device structure 4266fcea6f3dSAnirudh Venkataramanan * @stats: main device statistics structure 4267fcea6f3dSAnirudh Venkataramanan */ 4268fcea6f3dSAnirudh Venkataramanan static 4269fcea6f3dSAnirudh Venkataramanan void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 4270fcea6f3dSAnirudh Venkataramanan { 4271fcea6f3dSAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 4272fcea6f3dSAnirudh Venkataramanan struct rtnl_link_stats64 *vsi_stats; 4273fcea6f3dSAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 4274fcea6f3dSAnirudh Venkataramanan 4275fcea6f3dSAnirudh Venkataramanan vsi_stats = &vsi->net_stats; 4276fcea6f3dSAnirudh Venkataramanan 42773d57fd10SDave Ertman if (!vsi->num_txq || !vsi->num_rxq) 4278fcea6f3dSAnirudh Venkataramanan return; 42793d57fd10SDave Ertman 4280fcea6f3dSAnirudh Venkataramanan /* netdev packet/byte stats come from ring counter. These are obtained 4281fcea6f3dSAnirudh Venkataramanan * by summing up ring counters (done by ice_update_vsi_ring_stats). 42823d57fd10SDave Ertman * But, only call the update routine and read the registers if VSI is 42833d57fd10SDave Ertman * not down. 4284fcea6f3dSAnirudh Venkataramanan */ 42853d57fd10SDave Ertman if (!test_bit(__ICE_DOWN, vsi->state)) 4286fcea6f3dSAnirudh Venkataramanan ice_update_vsi_ring_stats(vsi); 4287fcea6f3dSAnirudh Venkataramanan stats->tx_packets = vsi_stats->tx_packets; 4288fcea6f3dSAnirudh Venkataramanan stats->tx_bytes = vsi_stats->tx_bytes; 4289fcea6f3dSAnirudh Venkataramanan stats->rx_packets = vsi_stats->rx_packets; 4290fcea6f3dSAnirudh Venkataramanan stats->rx_bytes = vsi_stats->rx_bytes; 4291fcea6f3dSAnirudh Venkataramanan 4292fcea6f3dSAnirudh Venkataramanan /* The rest of the stats can be read from the hardware but instead we 4293fcea6f3dSAnirudh Venkataramanan * just return values that the watchdog task has already obtained from 4294fcea6f3dSAnirudh Venkataramanan * the hardware. 4295fcea6f3dSAnirudh Venkataramanan */ 4296fcea6f3dSAnirudh Venkataramanan stats->multicast = vsi_stats->multicast; 4297fcea6f3dSAnirudh Venkataramanan stats->tx_errors = vsi_stats->tx_errors; 4298fcea6f3dSAnirudh Venkataramanan stats->tx_dropped = vsi_stats->tx_dropped; 4299fcea6f3dSAnirudh Venkataramanan stats->rx_errors = vsi_stats->rx_errors; 4300fcea6f3dSAnirudh Venkataramanan stats->rx_dropped = vsi_stats->rx_dropped; 4301fcea6f3dSAnirudh Venkataramanan stats->rx_crc_errors = vsi_stats->rx_crc_errors; 4302fcea6f3dSAnirudh Venkataramanan stats->rx_length_errors = vsi_stats->rx_length_errors; 4303fcea6f3dSAnirudh Venkataramanan } 4304fcea6f3dSAnirudh Venkataramanan 4305fcea6f3dSAnirudh Venkataramanan /** 43062b245cb2SAnirudh Venkataramanan * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 43072b245cb2SAnirudh Venkataramanan * @vsi: VSI having NAPI disabled 43082b245cb2SAnirudh Venkataramanan */ 43092b245cb2SAnirudh Venkataramanan static void ice_napi_disable_all(struct ice_vsi *vsi) 43102b245cb2SAnirudh Venkataramanan { 43112b245cb2SAnirudh Venkataramanan int q_idx; 43122b245cb2SAnirudh Venkataramanan 43132b245cb2SAnirudh Venkataramanan if (!vsi->netdev) 43142b245cb2SAnirudh Venkataramanan return; 43152b245cb2SAnirudh Venkataramanan 43160c2561c8SBrett Creeley ice_for_each_q_vector(vsi, q_idx) { 4317eec90376SYoung Xiao struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 4318eec90376SYoung Xiao 4319eec90376SYoung Xiao if (q_vector->rx.ring || q_vector->tx.ring) 4320eec90376SYoung Xiao napi_disable(&q_vector->napi); 4321eec90376SYoung Xiao } 43222b245cb2SAnirudh Venkataramanan } 43232b245cb2SAnirudh Venkataramanan 43242b245cb2SAnirudh Venkataramanan /** 4325cdedef59SAnirudh Venkataramanan * ice_down - Shutdown the connection 4326cdedef59SAnirudh Venkataramanan * @vsi: The VSI being stopped 4327cdedef59SAnirudh Venkataramanan */ 4328fcea6f3dSAnirudh Venkataramanan int ice_down(struct ice_vsi *vsi) 4329cdedef59SAnirudh Venkataramanan { 4330ab4ab73fSBruce Allan int i, tx_err, rx_err, link_err = 0; 4331cdedef59SAnirudh Venkataramanan 4332cdedef59SAnirudh Venkataramanan /* Caller of this function is expected to set the 4333cdedef59SAnirudh Venkataramanan * vsi->state __ICE_DOWN bit 4334cdedef59SAnirudh Venkataramanan */ 4335cdedef59SAnirudh Venkataramanan if (vsi->netdev) { 4336cdedef59SAnirudh Venkataramanan netif_carrier_off(vsi->netdev); 4337cdedef59SAnirudh Venkataramanan netif_tx_disable(vsi->netdev); 4338cdedef59SAnirudh Venkataramanan } 4339cdedef59SAnirudh Venkataramanan 4340cdedef59SAnirudh Venkataramanan ice_vsi_dis_irq(vsi); 434103f7a986SAnirudh Venkataramanan 434203f7a986SAnirudh Venkataramanan tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 434372adf242SAnirudh Venkataramanan if (tx_err) 434419cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 434572adf242SAnirudh Venkataramanan vsi->vsi_num, tx_err); 4346efc2214bSMaciej Fijalkowski if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 4347efc2214bSMaciej Fijalkowski tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 4348efc2214bSMaciej Fijalkowski if (tx_err) 434919cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 4350efc2214bSMaciej Fijalkowski vsi->vsi_num, tx_err); 4351efc2214bSMaciej Fijalkowski } 435272adf242SAnirudh Venkataramanan 435313a6233bSBrett Creeley rx_err = ice_vsi_stop_all_rx_rings(vsi); 435472adf242SAnirudh Venkataramanan if (rx_err) 435519cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 435672adf242SAnirudh Venkataramanan vsi->vsi_num, rx_err); 435772adf242SAnirudh Venkataramanan 43582b245cb2SAnirudh Venkataramanan ice_napi_disable_all(vsi); 4359cdedef59SAnirudh Venkataramanan 4360ab4ab73fSBruce Allan if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 4361b6f934f0SBrett Creeley link_err = ice_force_phys_link_state(vsi, false); 4362b6f934f0SBrett Creeley if (link_err) 436319cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 4364b6f934f0SBrett Creeley vsi->vsi_num, link_err); 4365ab4ab73fSBruce Allan } 4366b6f934f0SBrett Creeley 4367cdedef59SAnirudh Venkataramanan ice_for_each_txq(vsi, i) 4368cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(vsi->tx_rings[i]); 4369cdedef59SAnirudh Venkataramanan 4370cdedef59SAnirudh Venkataramanan ice_for_each_rxq(vsi, i) 4371cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(vsi->rx_rings[i]); 4372cdedef59SAnirudh Venkataramanan 4373b6f934f0SBrett Creeley if (tx_err || rx_err || link_err) { 437419cce2c6SAnirudh Venkataramanan netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 4375cdedef59SAnirudh Venkataramanan vsi->vsi_num, vsi->vsw->sw_id); 437672adf242SAnirudh Venkataramanan return -EIO; 437772adf242SAnirudh Venkataramanan } 437872adf242SAnirudh Venkataramanan 437972adf242SAnirudh Venkataramanan return 0; 4380cdedef59SAnirudh Venkataramanan } 4381cdedef59SAnirudh Venkataramanan 4382cdedef59SAnirudh Venkataramanan /** 4383cdedef59SAnirudh Venkataramanan * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 4384cdedef59SAnirudh Venkataramanan * @vsi: VSI having resources allocated 4385cdedef59SAnirudh Venkataramanan * 4386cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on failure 4387cdedef59SAnirudh Venkataramanan */ 43880e674aebSAnirudh Venkataramanan int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 4389cdedef59SAnirudh Venkataramanan { 4390dab0588fSJesse Brandeburg int i, err = 0; 4391cdedef59SAnirudh Venkataramanan 4392cdedef59SAnirudh Venkataramanan if (!vsi->num_txq) { 43939a946843SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 4394cdedef59SAnirudh Venkataramanan vsi->vsi_num); 4395cdedef59SAnirudh Venkataramanan return -EINVAL; 4396cdedef59SAnirudh Venkataramanan } 4397cdedef59SAnirudh Venkataramanan 4398cdedef59SAnirudh Venkataramanan ice_for_each_txq(vsi, i) { 4399eb0ee8abSMichal Swiatkowski struct ice_ring *ring = vsi->tx_rings[i]; 4400eb0ee8abSMichal Swiatkowski 4401eb0ee8abSMichal Swiatkowski if (!ring) 4402eb0ee8abSMichal Swiatkowski return -EINVAL; 4403eb0ee8abSMichal Swiatkowski 4404eb0ee8abSMichal Swiatkowski ring->netdev = vsi->netdev; 4405eb0ee8abSMichal Swiatkowski err = ice_setup_tx_ring(ring); 4406cdedef59SAnirudh Venkataramanan if (err) 4407cdedef59SAnirudh Venkataramanan break; 4408cdedef59SAnirudh Venkataramanan } 4409cdedef59SAnirudh Venkataramanan 4410cdedef59SAnirudh Venkataramanan return err; 4411cdedef59SAnirudh Venkataramanan } 4412cdedef59SAnirudh Venkataramanan 4413cdedef59SAnirudh Venkataramanan /** 4414cdedef59SAnirudh Venkataramanan * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 4415cdedef59SAnirudh Venkataramanan * @vsi: VSI having resources allocated 4416cdedef59SAnirudh Venkataramanan * 4417cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on failure 4418cdedef59SAnirudh Venkataramanan */ 44190e674aebSAnirudh Venkataramanan int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 4420cdedef59SAnirudh Venkataramanan { 4421dab0588fSJesse Brandeburg int i, err = 0; 4422cdedef59SAnirudh Venkataramanan 4423cdedef59SAnirudh Venkataramanan if (!vsi->num_rxq) { 44249a946843SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 4425cdedef59SAnirudh Venkataramanan vsi->vsi_num); 4426cdedef59SAnirudh Venkataramanan return -EINVAL; 4427cdedef59SAnirudh Venkataramanan } 4428cdedef59SAnirudh Venkataramanan 4429cdedef59SAnirudh Venkataramanan ice_for_each_rxq(vsi, i) { 4430eb0ee8abSMichal Swiatkowski struct ice_ring *ring = vsi->rx_rings[i]; 4431eb0ee8abSMichal Swiatkowski 4432eb0ee8abSMichal Swiatkowski if (!ring) 4433eb0ee8abSMichal Swiatkowski return -EINVAL; 4434eb0ee8abSMichal Swiatkowski 4435eb0ee8abSMichal Swiatkowski ring->netdev = vsi->netdev; 4436eb0ee8abSMichal Swiatkowski err = ice_setup_rx_ring(ring); 4437cdedef59SAnirudh Venkataramanan if (err) 4438cdedef59SAnirudh Venkataramanan break; 4439cdedef59SAnirudh Venkataramanan } 4440cdedef59SAnirudh Venkataramanan 4441cdedef59SAnirudh Venkataramanan return err; 4442cdedef59SAnirudh Venkataramanan } 4443cdedef59SAnirudh Venkataramanan 4444cdedef59SAnirudh Venkataramanan /** 4445cdedef59SAnirudh Venkataramanan * ice_vsi_open - Called when a network interface is made active 4446cdedef59SAnirudh Venkataramanan * @vsi: the VSI to open 4447cdedef59SAnirudh Venkataramanan * 4448cdedef59SAnirudh Venkataramanan * Initialization of the VSI 4449cdedef59SAnirudh Venkataramanan * 4450cdedef59SAnirudh Venkataramanan * Returns 0 on success, negative value on error 4451cdedef59SAnirudh Venkataramanan */ 4452cdedef59SAnirudh Venkataramanan static int ice_vsi_open(struct ice_vsi *vsi) 4453cdedef59SAnirudh Venkataramanan { 4454cdedef59SAnirudh Venkataramanan char int_name[ICE_INT_NAME_STR_LEN]; 4455cdedef59SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 4456cdedef59SAnirudh Venkataramanan int err; 4457cdedef59SAnirudh Venkataramanan 4458cdedef59SAnirudh Venkataramanan /* allocate descriptors */ 4459cdedef59SAnirudh Venkataramanan err = ice_vsi_setup_tx_rings(vsi); 4460cdedef59SAnirudh Venkataramanan if (err) 4461cdedef59SAnirudh Venkataramanan goto err_setup_tx; 4462cdedef59SAnirudh Venkataramanan 4463cdedef59SAnirudh Venkataramanan err = ice_vsi_setup_rx_rings(vsi); 4464cdedef59SAnirudh Venkataramanan if (err) 4465cdedef59SAnirudh Venkataramanan goto err_setup_rx; 4466cdedef59SAnirudh Venkataramanan 4467cdedef59SAnirudh Venkataramanan err = ice_vsi_cfg(vsi); 4468cdedef59SAnirudh Venkataramanan if (err) 4469cdedef59SAnirudh Venkataramanan goto err_setup_rx; 4470cdedef59SAnirudh Venkataramanan 4471cdedef59SAnirudh Venkataramanan snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 44724015d11eSBrett Creeley dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 4473ba880734SBrett Creeley err = ice_vsi_req_irq_msix(vsi, int_name); 4474cdedef59SAnirudh Venkataramanan if (err) 4475cdedef59SAnirudh Venkataramanan goto err_setup_rx; 4476cdedef59SAnirudh Venkataramanan 4477cdedef59SAnirudh Venkataramanan /* Notify the stack of the actual queue counts. */ 4478cdedef59SAnirudh Venkataramanan err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 4479cdedef59SAnirudh Venkataramanan if (err) 4480cdedef59SAnirudh Venkataramanan goto err_set_qs; 4481cdedef59SAnirudh Venkataramanan 4482cdedef59SAnirudh Venkataramanan err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 4483cdedef59SAnirudh Venkataramanan if (err) 4484cdedef59SAnirudh Venkataramanan goto err_set_qs; 4485cdedef59SAnirudh Venkataramanan 4486cdedef59SAnirudh Venkataramanan err = ice_up_complete(vsi); 4487cdedef59SAnirudh Venkataramanan if (err) 4488cdedef59SAnirudh Venkataramanan goto err_up_complete; 4489cdedef59SAnirudh Venkataramanan 4490cdedef59SAnirudh Venkataramanan return 0; 4491cdedef59SAnirudh Venkataramanan 4492cdedef59SAnirudh Venkataramanan err_up_complete: 4493cdedef59SAnirudh Venkataramanan ice_down(vsi); 4494cdedef59SAnirudh Venkataramanan err_set_qs: 4495cdedef59SAnirudh Venkataramanan ice_vsi_free_irq(vsi); 4496cdedef59SAnirudh Venkataramanan err_setup_rx: 4497cdedef59SAnirudh Venkataramanan ice_vsi_free_rx_rings(vsi); 4498cdedef59SAnirudh Venkataramanan err_setup_tx: 4499cdedef59SAnirudh Venkataramanan ice_vsi_free_tx_rings(vsi); 4500cdedef59SAnirudh Venkataramanan 4501cdedef59SAnirudh Venkataramanan return err; 4502cdedef59SAnirudh Venkataramanan } 4503cdedef59SAnirudh Venkataramanan 4504cdedef59SAnirudh Venkataramanan /** 45050f9d5027SAnirudh Venkataramanan * ice_vsi_release_all - Delete all VSIs 45060f9d5027SAnirudh Venkataramanan * @pf: PF from which all VSIs are being removed 45070f9d5027SAnirudh Venkataramanan */ 45080f9d5027SAnirudh Venkataramanan static void ice_vsi_release_all(struct ice_pf *pf) 45090f9d5027SAnirudh Venkataramanan { 45100f9d5027SAnirudh Venkataramanan int err, i; 45110f9d5027SAnirudh Venkataramanan 45120f9d5027SAnirudh Venkataramanan if (!pf->vsi) 45130f9d5027SAnirudh Venkataramanan return; 45140f9d5027SAnirudh Venkataramanan 451580ed404aSBrett Creeley ice_for_each_vsi(pf, i) { 45160f9d5027SAnirudh Venkataramanan if (!pf->vsi[i]) 45170f9d5027SAnirudh Venkataramanan continue; 45180f9d5027SAnirudh Venkataramanan 45190f9d5027SAnirudh Venkataramanan err = ice_vsi_release(pf->vsi[i]); 45200f9d5027SAnirudh Venkataramanan if (err) 452119cce2c6SAnirudh Venkataramanan dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 45220f9d5027SAnirudh Venkataramanan i, err, pf->vsi[i]->vsi_num); 45230f9d5027SAnirudh Venkataramanan } 45240f9d5027SAnirudh Venkataramanan } 45250f9d5027SAnirudh Venkataramanan 45260f9d5027SAnirudh Venkataramanan /** 4527462acf6aSTony Nguyen * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 4528462acf6aSTony Nguyen * @pf: pointer to the PF instance 4529462acf6aSTony Nguyen * @type: VSI type to rebuild 4530462acf6aSTony Nguyen * 4531462acf6aSTony Nguyen * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 45320f9d5027SAnirudh Venkataramanan */ 4533462acf6aSTony Nguyen static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 45340f9d5027SAnirudh Venkataramanan { 45354015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 4536462acf6aSTony Nguyen enum ice_status status; 4537462acf6aSTony Nguyen int i, err; 45380f9d5027SAnirudh Venkataramanan 453980ed404aSBrett Creeley ice_for_each_vsi(pf, i) { 45404425e053SKrzysztof Kazimierczak struct ice_vsi *vsi = pf->vsi[i]; 45410f9d5027SAnirudh Venkataramanan 4542462acf6aSTony Nguyen if (!vsi || vsi->type != type) 45430f9d5027SAnirudh Venkataramanan continue; 45440f9d5027SAnirudh Venkataramanan 4545462acf6aSTony Nguyen /* rebuild the VSI */ 454687324e74SHenry Tieman err = ice_vsi_rebuild(vsi, true); 45470f9d5027SAnirudh Venkataramanan if (err) { 454819cce2c6SAnirudh Venkataramanan dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 4549964674f1SAnirudh Venkataramanan err, vsi->idx, ice_vsi_type_str(type)); 45500f9d5027SAnirudh Venkataramanan return err; 45510f9d5027SAnirudh Venkataramanan } 45520f9d5027SAnirudh Venkataramanan 4553462acf6aSTony Nguyen /* replay filters for the VSI */ 4554462acf6aSTony Nguyen status = ice_replay_vsi(&pf->hw, vsi->idx); 4555462acf6aSTony Nguyen if (status) { 455619cce2c6SAnirudh Venkataramanan dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n", 4557964674f1SAnirudh Venkataramanan status, vsi->idx, ice_vsi_type_str(type)); 4558334cb062SAnirudh Venkataramanan return -EIO; 4559334cb062SAnirudh Venkataramanan } 4560334cb062SAnirudh Venkataramanan 4561334cb062SAnirudh Venkataramanan /* Re-map HW VSI number, using VSI handle that has been 4562334cb062SAnirudh Venkataramanan * previously validated in ice_replay_vsi() call above 4563334cb062SAnirudh Venkataramanan */ 4564462acf6aSTony Nguyen vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 4565334cb062SAnirudh Venkataramanan 4566462acf6aSTony Nguyen /* enable the VSI */ 4567462acf6aSTony Nguyen err = ice_ena_vsi(vsi, false); 4568462acf6aSTony Nguyen if (err) { 456919cce2c6SAnirudh Venkataramanan dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 4570964674f1SAnirudh Venkataramanan err, vsi->idx, ice_vsi_type_str(type)); 4571462acf6aSTony Nguyen return err; 4572334cb062SAnirudh Venkataramanan } 4573334cb062SAnirudh Venkataramanan 45744015d11eSBrett Creeley dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 45754015d11eSBrett Creeley ice_vsi_type_str(type)); 4576462acf6aSTony Nguyen } 4577462acf6aSTony Nguyen 4578334cb062SAnirudh Venkataramanan return 0; 4579334cb062SAnirudh Venkataramanan } 4580334cb062SAnirudh Venkataramanan 4581334cb062SAnirudh Venkataramanan /** 4582462acf6aSTony Nguyen * ice_update_pf_netdev_link - Update PF netdev link status 4583462acf6aSTony Nguyen * @pf: pointer to the PF instance 4584462acf6aSTony Nguyen */ 4585462acf6aSTony Nguyen static void ice_update_pf_netdev_link(struct ice_pf *pf) 4586462acf6aSTony Nguyen { 4587462acf6aSTony Nguyen bool link_up; 4588462acf6aSTony Nguyen int i; 4589462acf6aSTony Nguyen 4590462acf6aSTony Nguyen ice_for_each_vsi(pf, i) { 4591462acf6aSTony Nguyen struct ice_vsi *vsi = pf->vsi[i]; 4592462acf6aSTony Nguyen 4593462acf6aSTony Nguyen if (!vsi || vsi->type != ICE_VSI_PF) 4594462acf6aSTony Nguyen return; 4595462acf6aSTony Nguyen 4596462acf6aSTony Nguyen ice_get_link_status(pf->vsi[i]->port_info, &link_up); 4597462acf6aSTony Nguyen if (link_up) { 4598462acf6aSTony Nguyen netif_carrier_on(pf->vsi[i]->netdev); 4599462acf6aSTony Nguyen netif_tx_wake_all_queues(pf->vsi[i]->netdev); 4600462acf6aSTony Nguyen } else { 4601462acf6aSTony Nguyen netif_carrier_off(pf->vsi[i]->netdev); 4602462acf6aSTony Nguyen netif_tx_stop_all_queues(pf->vsi[i]->netdev); 4603462acf6aSTony Nguyen } 4604462acf6aSTony Nguyen } 4605462acf6aSTony Nguyen } 4606462acf6aSTony Nguyen 4607462acf6aSTony Nguyen /** 46080b28b702SAnirudh Venkataramanan * ice_rebuild - rebuild after reset 46092f2da36eSAnirudh Venkataramanan * @pf: PF to rebuild 4610462acf6aSTony Nguyen * @reset_type: type of reset 46110b28b702SAnirudh Venkataramanan */ 4612462acf6aSTony Nguyen static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 46130b28b702SAnirudh Venkataramanan { 46144015d11eSBrett Creeley struct device *dev = ice_pf_to_dev(pf); 46150b28b702SAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 46160b28b702SAnirudh Venkataramanan enum ice_status ret; 4617462acf6aSTony Nguyen int err; 46180b28b702SAnirudh Venkataramanan 46190b28b702SAnirudh Venkataramanan if (test_bit(__ICE_DOWN, pf->state)) 46200b28b702SAnirudh Venkataramanan goto clear_recovery; 46210b28b702SAnirudh Venkataramanan 4622462acf6aSTony Nguyen dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 46230b28b702SAnirudh Venkataramanan 46240b28b702SAnirudh Venkataramanan ret = ice_init_all_ctrlq(hw); 46250b28b702SAnirudh Venkataramanan if (ret) { 46260b28b702SAnirudh Venkataramanan dev_err(dev, "control queues init failed %d\n", ret); 46270f9d5027SAnirudh Venkataramanan goto err_init_ctrlq; 46280b28b702SAnirudh Venkataramanan } 46290b28b702SAnirudh Venkataramanan 4630462acf6aSTony Nguyen /* if DDP was previously loaded successfully */ 4631462acf6aSTony Nguyen if (!ice_is_safe_mode(pf)) { 4632462acf6aSTony Nguyen /* reload the SW DB of filter tables */ 4633462acf6aSTony Nguyen if (reset_type == ICE_RESET_PFR) 4634462acf6aSTony Nguyen ice_fill_blk_tbls(hw); 4635462acf6aSTony Nguyen else 4636462acf6aSTony Nguyen /* Reload DDP Package after CORER/GLOBR reset */ 4637462acf6aSTony Nguyen ice_load_pkg(NULL, pf); 4638462acf6aSTony Nguyen } 4639462acf6aSTony Nguyen 46400b28b702SAnirudh Venkataramanan ret = ice_clear_pf_cfg(hw); 46410b28b702SAnirudh Venkataramanan if (ret) { 46420b28b702SAnirudh Venkataramanan dev_err(dev, "clear PF configuration failed %d\n", ret); 46430f9d5027SAnirudh Venkataramanan goto err_init_ctrlq; 46440b28b702SAnirudh Venkataramanan } 46450b28b702SAnirudh Venkataramanan 4646fc0f39bcSBrett Creeley if (pf->first_sw->dflt_vsi_ena) 464719cce2c6SAnirudh Venkataramanan dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); 4648fc0f39bcSBrett Creeley /* clear the default VSI configuration if it exists */ 4649fc0f39bcSBrett Creeley pf->first_sw->dflt_vsi = NULL; 4650fc0f39bcSBrett Creeley pf->first_sw->dflt_vsi_ena = false; 4651fc0f39bcSBrett Creeley 46520b28b702SAnirudh Venkataramanan ice_clear_pxe_mode(hw); 46530b28b702SAnirudh Venkataramanan 46540b28b702SAnirudh Venkataramanan ret = ice_get_caps(hw); 46550b28b702SAnirudh Venkataramanan if (ret) { 46560b28b702SAnirudh Venkataramanan dev_err(dev, "ice_get_caps failed %d\n", ret); 46570f9d5027SAnirudh Venkataramanan goto err_init_ctrlq; 46580b28b702SAnirudh Venkataramanan } 46590b28b702SAnirudh Venkataramanan 46600f9d5027SAnirudh Venkataramanan err = ice_sched_init_port(hw->port_info); 46610f9d5027SAnirudh Venkataramanan if (err) 46620f9d5027SAnirudh Venkataramanan goto err_sched_init_port; 46630f9d5027SAnirudh Venkataramanan 46645755143dSDave Ertman err = ice_update_link_info(hw->port_info); 46655755143dSDave Ertman if (err) 46664015d11eSBrett Creeley dev_err(dev, "Get link status error %d\n", err); 46675755143dSDave Ertman 46680b28b702SAnirudh Venkataramanan /* start misc vector */ 46690b28b702SAnirudh Venkataramanan err = ice_req_irq_msix_misc(pf); 46700b28b702SAnirudh Venkataramanan if (err) { 46710b28b702SAnirudh Venkataramanan dev_err(dev, "misc vector setup failed: %d\n", err); 4672462acf6aSTony Nguyen goto err_sched_init_port; 46730b28b702SAnirudh Venkataramanan } 46740b28b702SAnirudh Venkataramanan 4675462acf6aSTony Nguyen if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 4676462acf6aSTony Nguyen ice_dcb_rebuild(pf); 4677462acf6aSTony Nguyen 4678462acf6aSTony Nguyen /* rebuild PF VSI */ 4679462acf6aSTony Nguyen err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 46800f9d5027SAnirudh Venkataramanan if (err) { 4681462acf6aSTony Nguyen dev_err(dev, "PF VSI rebuild failed: %d\n", err); 46820f9d5027SAnirudh Venkataramanan goto err_vsi_rebuild; 46830f9d5027SAnirudh Venkataramanan } 46840b28b702SAnirudh Venkataramanan 4685462acf6aSTony Nguyen if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 4686462acf6aSTony Nguyen err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF); 4687462acf6aSTony Nguyen if (err) { 4688462acf6aSTony Nguyen dev_err(dev, "VF VSI rebuild failed: %d\n", err); 4689462acf6aSTony Nguyen goto err_vsi_rebuild; 4690462acf6aSTony Nguyen } 4691462acf6aSTony Nguyen } 4692ce317dd9SAnirudh Venkataramanan 4693462acf6aSTony Nguyen ice_update_pf_netdev_link(pf); 4694462acf6aSTony Nguyen 4695462acf6aSTony Nguyen /* tell the firmware we are up */ 4696462acf6aSTony Nguyen ret = ice_send_version(pf); 4697462acf6aSTony Nguyen if (ret) { 469819cce2c6SAnirudh Venkataramanan dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 4699462acf6aSTony Nguyen ret); 4700462acf6aSTony Nguyen goto err_vsi_rebuild; 4701ce317dd9SAnirudh Venkataramanan } 4702462acf6aSTony Nguyen 4703462acf6aSTony Nguyen ice_replay_post(hw); 4704ce317dd9SAnirudh Venkataramanan 47050f9d5027SAnirudh Venkataramanan /* if we get here, reset flow is successful */ 47060f9d5027SAnirudh Venkataramanan clear_bit(__ICE_RESET_FAILED, pf->state); 47070b28b702SAnirudh Venkataramanan return; 47080b28b702SAnirudh Venkataramanan 47090f9d5027SAnirudh Venkataramanan err_vsi_rebuild: 47100f9d5027SAnirudh Venkataramanan err_sched_init_port: 47110f9d5027SAnirudh Venkataramanan ice_sched_cleanup_all(hw); 47120f9d5027SAnirudh Venkataramanan err_init_ctrlq: 47130b28b702SAnirudh Venkataramanan ice_shutdown_all_ctrlq(hw); 47140b28b702SAnirudh Venkataramanan set_bit(__ICE_RESET_FAILED, pf->state); 47150b28b702SAnirudh Venkataramanan clear_recovery: 47160f9d5027SAnirudh Venkataramanan /* set this bit in PF state to control service task scheduling */ 47170f9d5027SAnirudh Venkataramanan set_bit(__ICE_NEEDS_RESTART, pf->state); 47180f9d5027SAnirudh Venkataramanan dev_err(dev, "Rebuild failed, unload and reload driver\n"); 47190b28b702SAnirudh Venkataramanan } 47200b28b702SAnirudh Venkataramanan 47210b28b702SAnirudh Venkataramanan /** 472223b44513SMaciej Fijalkowski * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 472323b44513SMaciej Fijalkowski * @vsi: Pointer to VSI structure 472423b44513SMaciej Fijalkowski */ 472523b44513SMaciej Fijalkowski static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 472623b44513SMaciej Fijalkowski { 472723b44513SMaciej Fijalkowski if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 472823b44513SMaciej Fijalkowski return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 472923b44513SMaciej Fijalkowski else 473023b44513SMaciej Fijalkowski return ICE_RXBUF_3072; 473123b44513SMaciej Fijalkowski } 473223b44513SMaciej Fijalkowski 473323b44513SMaciej Fijalkowski /** 4734e94d4478SAnirudh Venkataramanan * ice_change_mtu - NDO callback to change the MTU 4735e94d4478SAnirudh Venkataramanan * @netdev: network interface device structure 4736e94d4478SAnirudh Venkataramanan * @new_mtu: new value for maximum frame size 4737e94d4478SAnirudh Venkataramanan * 4738e94d4478SAnirudh Venkataramanan * Returns 0 on success, negative on failure 4739e94d4478SAnirudh Venkataramanan */ 4740e94d4478SAnirudh Venkataramanan static int ice_change_mtu(struct net_device *netdev, int new_mtu) 4741e94d4478SAnirudh Venkataramanan { 4742e94d4478SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 4743e94d4478SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 4744e94d4478SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 4745e94d4478SAnirudh Venkataramanan u8 count = 0; 4746e94d4478SAnirudh Venkataramanan 4747e94d4478SAnirudh Venkataramanan if (new_mtu == netdev->mtu) { 47482f2da36eSAnirudh Venkataramanan netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 4749e94d4478SAnirudh Venkataramanan return 0; 4750e94d4478SAnirudh Venkataramanan } 4751e94d4478SAnirudh Venkataramanan 4752efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(vsi)) { 475323b44513SMaciej Fijalkowski int frame_size = ice_max_xdp_frame_size(vsi); 4754efc2214bSMaciej Fijalkowski 4755efc2214bSMaciej Fijalkowski if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 4756efc2214bSMaciej Fijalkowski netdev_err(netdev, "max MTU for XDP usage is %d\n", 475723b44513SMaciej Fijalkowski frame_size - ICE_ETH_PKT_HDR_PAD); 4758efc2214bSMaciej Fijalkowski return -EINVAL; 4759efc2214bSMaciej Fijalkowski } 4760efc2214bSMaciej Fijalkowski } 4761efc2214bSMaciej Fijalkowski 4762e94d4478SAnirudh Venkataramanan if (new_mtu < netdev->min_mtu) { 47632f2da36eSAnirudh Venkataramanan netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", 4764e94d4478SAnirudh Venkataramanan netdev->min_mtu); 4765e94d4478SAnirudh Venkataramanan return -EINVAL; 4766e94d4478SAnirudh Venkataramanan } else if (new_mtu > netdev->max_mtu) { 47672f2da36eSAnirudh Venkataramanan netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", 4768e94d4478SAnirudh Venkataramanan netdev->min_mtu); 4769e94d4478SAnirudh Venkataramanan return -EINVAL; 4770e94d4478SAnirudh Venkataramanan } 4771e94d4478SAnirudh Venkataramanan /* if a reset is in progress, wait for some time for it to complete */ 4772e94d4478SAnirudh Venkataramanan do { 47735df7e45dSDave Ertman if (ice_is_reset_in_progress(pf->state)) { 4774e94d4478SAnirudh Venkataramanan count++; 4775e94d4478SAnirudh Venkataramanan usleep_range(1000, 2000); 4776e94d4478SAnirudh Venkataramanan } else { 4777e94d4478SAnirudh Venkataramanan break; 4778e94d4478SAnirudh Venkataramanan } 4779e94d4478SAnirudh Venkataramanan 4780e94d4478SAnirudh Venkataramanan } while (count < 100); 4781e94d4478SAnirudh Venkataramanan 4782e94d4478SAnirudh Venkataramanan if (count == 100) { 47832f2da36eSAnirudh Venkataramanan netdev_err(netdev, "can't change MTU. Device is busy\n"); 4784e94d4478SAnirudh Venkataramanan return -EBUSY; 4785e94d4478SAnirudh Venkataramanan } 4786e94d4478SAnirudh Venkataramanan 4787e94d4478SAnirudh Venkataramanan netdev->mtu = new_mtu; 4788e94d4478SAnirudh Venkataramanan 4789e94d4478SAnirudh Venkataramanan /* if VSI is up, bring it down and then back up */ 4790e94d4478SAnirudh Venkataramanan if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 4791e94d4478SAnirudh Venkataramanan int err; 4792e94d4478SAnirudh Venkataramanan 4793e94d4478SAnirudh Venkataramanan err = ice_down(vsi); 4794e94d4478SAnirudh Venkataramanan if (err) { 47952f2da36eSAnirudh Venkataramanan netdev_err(netdev, "change MTU if_up err %d\n", err); 4796e94d4478SAnirudh Venkataramanan return err; 4797e94d4478SAnirudh Venkataramanan } 4798e94d4478SAnirudh Venkataramanan 4799e94d4478SAnirudh Venkataramanan err = ice_up(vsi); 4800e94d4478SAnirudh Venkataramanan if (err) { 48012f2da36eSAnirudh Venkataramanan netdev_err(netdev, "change MTU if_up err %d\n", err); 4802e94d4478SAnirudh Venkataramanan return err; 4803e94d4478SAnirudh Venkataramanan } 4804e94d4478SAnirudh Venkataramanan } 4805e94d4478SAnirudh Venkataramanan 4806bda5b7dbSTony Nguyen netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 4807e94d4478SAnirudh Venkataramanan return 0; 4808e94d4478SAnirudh Venkataramanan } 4809e94d4478SAnirudh Venkataramanan 4810e94d4478SAnirudh Venkataramanan /** 4811d76a60baSAnirudh Venkataramanan * ice_set_rss - Set RSS keys and lut 4812d76a60baSAnirudh Venkataramanan * @vsi: Pointer to VSI structure 4813d76a60baSAnirudh Venkataramanan * @seed: RSS hash seed 4814d76a60baSAnirudh Venkataramanan * @lut: Lookup table 4815d76a60baSAnirudh Venkataramanan * @lut_size: Lookup table size 4816d76a60baSAnirudh Venkataramanan * 4817d76a60baSAnirudh Venkataramanan * Returns 0 on success, negative on failure 4818d76a60baSAnirudh Venkataramanan */ 4819d76a60baSAnirudh Venkataramanan int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 4820d76a60baSAnirudh Venkataramanan { 4821d76a60baSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 4822d76a60baSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 4823d76a60baSAnirudh Venkataramanan enum ice_status status; 48244015d11eSBrett Creeley struct device *dev; 4825d76a60baSAnirudh Venkataramanan 48264015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 4827d76a60baSAnirudh Venkataramanan if (seed) { 4828d76a60baSAnirudh Venkataramanan struct ice_aqc_get_set_rss_keys *buf = 4829d76a60baSAnirudh Venkataramanan (struct ice_aqc_get_set_rss_keys *)seed; 4830d76a60baSAnirudh Venkataramanan 48314fb33f31SAnirudh Venkataramanan status = ice_aq_set_rss_key(hw, vsi->idx, buf); 4832d76a60baSAnirudh Venkataramanan 4833d76a60baSAnirudh Venkataramanan if (status) { 48344015d11eSBrett Creeley dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n", 4835d76a60baSAnirudh Venkataramanan status, hw->adminq.rq_last_status); 4836d76a60baSAnirudh Venkataramanan return -EIO; 4837d76a60baSAnirudh Venkataramanan } 4838d76a60baSAnirudh Venkataramanan } 4839d76a60baSAnirudh Venkataramanan 4840d76a60baSAnirudh Venkataramanan if (lut) { 48414fb33f31SAnirudh Venkataramanan status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 48424fb33f31SAnirudh Venkataramanan lut, lut_size); 4843d76a60baSAnirudh Venkataramanan if (status) { 48444015d11eSBrett Creeley dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", 4845d76a60baSAnirudh Venkataramanan status, hw->adminq.rq_last_status); 4846d76a60baSAnirudh Venkataramanan return -EIO; 4847d76a60baSAnirudh Venkataramanan } 4848d76a60baSAnirudh Venkataramanan } 4849d76a60baSAnirudh Venkataramanan 4850d76a60baSAnirudh Venkataramanan return 0; 4851d76a60baSAnirudh Venkataramanan } 4852d76a60baSAnirudh Venkataramanan 4853d76a60baSAnirudh Venkataramanan /** 4854d76a60baSAnirudh Venkataramanan * ice_get_rss - Get RSS keys and lut 4855d76a60baSAnirudh Venkataramanan * @vsi: Pointer to VSI structure 4856d76a60baSAnirudh Venkataramanan * @seed: Buffer to store the keys 4857d76a60baSAnirudh Venkataramanan * @lut: Buffer to store the lookup table entries 4858d76a60baSAnirudh Venkataramanan * @lut_size: Size of buffer to store the lookup table entries 4859d76a60baSAnirudh Venkataramanan * 4860d76a60baSAnirudh Venkataramanan * Returns 0 on success, negative on failure 4861d76a60baSAnirudh Venkataramanan */ 4862d76a60baSAnirudh Venkataramanan int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 4863d76a60baSAnirudh Venkataramanan { 4864d76a60baSAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 4865d76a60baSAnirudh Venkataramanan struct ice_hw *hw = &pf->hw; 4866d76a60baSAnirudh Venkataramanan enum ice_status status; 48674015d11eSBrett Creeley struct device *dev; 4868d76a60baSAnirudh Venkataramanan 48694015d11eSBrett Creeley dev = ice_pf_to_dev(pf); 4870d76a60baSAnirudh Venkataramanan if (seed) { 4871d76a60baSAnirudh Venkataramanan struct ice_aqc_get_set_rss_keys *buf = 4872d76a60baSAnirudh Venkataramanan (struct ice_aqc_get_set_rss_keys *)seed; 4873d76a60baSAnirudh Venkataramanan 48744fb33f31SAnirudh Venkataramanan status = ice_aq_get_rss_key(hw, vsi->idx, buf); 4875d76a60baSAnirudh Venkataramanan if (status) { 48764015d11eSBrett Creeley dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n", 4877d76a60baSAnirudh Venkataramanan status, hw->adminq.rq_last_status); 4878d76a60baSAnirudh Venkataramanan return -EIO; 4879d76a60baSAnirudh Venkataramanan } 4880d76a60baSAnirudh Venkataramanan } 4881d76a60baSAnirudh Venkataramanan 4882d76a60baSAnirudh Venkataramanan if (lut) { 48834fb33f31SAnirudh Venkataramanan status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 48844fb33f31SAnirudh Venkataramanan lut, lut_size); 4885d76a60baSAnirudh Venkataramanan if (status) { 48864015d11eSBrett Creeley dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n", 4887d76a60baSAnirudh Venkataramanan status, hw->adminq.rq_last_status); 4888d76a60baSAnirudh Venkataramanan return -EIO; 4889d76a60baSAnirudh Venkataramanan } 4890d76a60baSAnirudh Venkataramanan } 4891d76a60baSAnirudh Venkataramanan 4892d76a60baSAnirudh Venkataramanan return 0; 4893d76a60baSAnirudh Venkataramanan } 4894d76a60baSAnirudh Venkataramanan 4895d76a60baSAnirudh Venkataramanan /** 4896b1edc14aSMd Fahad Iqbal Polash * ice_bridge_getlink - Get the hardware bridge mode 4897b1edc14aSMd Fahad Iqbal Polash * @skb: skb buff 4898f9867df6SAnirudh Venkataramanan * @pid: process ID 4899b1edc14aSMd Fahad Iqbal Polash * @seq: RTNL message seq 4900b1edc14aSMd Fahad Iqbal Polash * @dev: the netdev being configured 4901b1edc14aSMd Fahad Iqbal Polash * @filter_mask: filter mask passed in 4902b1edc14aSMd Fahad Iqbal Polash * @nlflags: netlink flags passed in 4903b1edc14aSMd Fahad Iqbal Polash * 4904b1edc14aSMd Fahad Iqbal Polash * Return the bridge mode (VEB/VEPA) 4905b1edc14aSMd Fahad Iqbal Polash */ 4906b1edc14aSMd Fahad Iqbal Polash static int 4907b1edc14aSMd Fahad Iqbal Polash ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4908b1edc14aSMd Fahad Iqbal Polash struct net_device *dev, u32 filter_mask, int nlflags) 4909b1edc14aSMd Fahad Iqbal Polash { 4910b1edc14aSMd Fahad Iqbal Polash struct ice_netdev_priv *np = netdev_priv(dev); 4911b1edc14aSMd Fahad Iqbal Polash struct ice_vsi *vsi = np->vsi; 4912b1edc14aSMd Fahad Iqbal Polash struct ice_pf *pf = vsi->back; 4913b1edc14aSMd Fahad Iqbal Polash u16 bmode; 4914b1edc14aSMd Fahad Iqbal Polash 4915b1edc14aSMd Fahad Iqbal Polash bmode = pf->first_sw->bridge_mode; 4916b1edc14aSMd Fahad Iqbal Polash 4917b1edc14aSMd Fahad Iqbal Polash return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 4918b1edc14aSMd Fahad Iqbal Polash filter_mask, NULL); 4919b1edc14aSMd Fahad Iqbal Polash } 4920b1edc14aSMd Fahad Iqbal Polash 4921b1edc14aSMd Fahad Iqbal Polash /** 4922b1edc14aSMd Fahad Iqbal Polash * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 4923b1edc14aSMd Fahad Iqbal Polash * @vsi: Pointer to VSI structure 4924b1edc14aSMd Fahad Iqbal Polash * @bmode: Hardware bridge mode (VEB/VEPA) 4925b1edc14aSMd Fahad Iqbal Polash * 4926b1edc14aSMd Fahad Iqbal Polash * Returns 0 on success, negative on failure 4927b1edc14aSMd Fahad Iqbal Polash */ 4928b1edc14aSMd Fahad Iqbal Polash static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 4929b1edc14aSMd Fahad Iqbal Polash { 4930b1edc14aSMd Fahad Iqbal Polash struct ice_aqc_vsi_props *vsi_props; 4931b1edc14aSMd Fahad Iqbal Polash struct ice_hw *hw = &vsi->back->hw; 4932198a666aSBruce Allan struct ice_vsi_ctx *ctxt; 4933b1edc14aSMd Fahad Iqbal Polash enum ice_status status; 4934198a666aSBruce Allan int ret = 0; 4935b1edc14aSMd Fahad Iqbal Polash 4936b1edc14aSMd Fahad Iqbal Polash vsi_props = &vsi->info; 4937198a666aSBruce Allan 49389efe35d0STony Nguyen ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4939198a666aSBruce Allan if (!ctxt) 4940198a666aSBruce Allan return -ENOMEM; 4941198a666aSBruce Allan 4942198a666aSBruce Allan ctxt->info = vsi->info; 4943b1edc14aSMd Fahad Iqbal Polash 4944b1edc14aSMd Fahad Iqbal Polash if (bmode == BRIDGE_MODE_VEB) 4945b1edc14aSMd Fahad Iqbal Polash /* change from VEPA to VEB mode */ 4946198a666aSBruce Allan ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 4947b1edc14aSMd Fahad Iqbal Polash else 4948b1edc14aSMd Fahad Iqbal Polash /* change from VEB to VEPA mode */ 4949198a666aSBruce Allan ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 4950198a666aSBruce Allan ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 49515726ca0eSAnirudh Venkataramanan 4952198a666aSBruce Allan status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4953b1edc14aSMd Fahad Iqbal Polash if (status) { 49549a946843SAnirudh Venkataramanan dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", 4955b1edc14aSMd Fahad Iqbal Polash bmode, status, hw->adminq.sq_last_status); 4956198a666aSBruce Allan ret = -EIO; 4957198a666aSBruce Allan goto out; 4958b1edc14aSMd Fahad Iqbal Polash } 4959b1edc14aSMd Fahad Iqbal Polash /* Update sw flags for book keeping */ 4960198a666aSBruce Allan vsi_props->sw_flags = ctxt->info.sw_flags; 4961b1edc14aSMd Fahad Iqbal Polash 4962198a666aSBruce Allan out: 49639efe35d0STony Nguyen kfree(ctxt); 4964198a666aSBruce Allan return ret; 4965b1edc14aSMd Fahad Iqbal Polash } 4966b1edc14aSMd Fahad Iqbal Polash 4967b1edc14aSMd Fahad Iqbal Polash /** 4968b1edc14aSMd Fahad Iqbal Polash * ice_bridge_setlink - Set the hardware bridge mode 4969b1edc14aSMd Fahad Iqbal Polash * @dev: the netdev being configured 4970b1edc14aSMd Fahad Iqbal Polash * @nlh: RTNL message 4971b1edc14aSMd Fahad Iqbal Polash * @flags: bridge setlink flags 49722fd527b7SPetr Machata * @extack: netlink extended ack 4973b1edc14aSMd Fahad Iqbal Polash * 4974b1edc14aSMd Fahad Iqbal Polash * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 4975b1edc14aSMd Fahad Iqbal Polash * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 4976b1edc14aSMd Fahad Iqbal Polash * not already set for all VSIs connected to this switch. And also update the 4977b1edc14aSMd Fahad Iqbal Polash * unicast switch filter rules for the corresponding switch of the netdev. 4978b1edc14aSMd Fahad Iqbal Polash */ 4979b1edc14aSMd Fahad Iqbal Polash static int 4980b1edc14aSMd Fahad Iqbal Polash ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 49813d505147SBruce Allan u16 __always_unused flags, 49823d505147SBruce Allan struct netlink_ext_ack __always_unused *extack) 4983b1edc14aSMd Fahad Iqbal Polash { 4984b1edc14aSMd Fahad Iqbal Polash struct ice_netdev_priv *np = netdev_priv(dev); 4985b1edc14aSMd Fahad Iqbal Polash struct ice_pf *pf = np->vsi->back; 4986b1edc14aSMd Fahad Iqbal Polash struct nlattr *attr, *br_spec; 4987b1edc14aSMd Fahad Iqbal Polash struct ice_hw *hw = &pf->hw; 4988b1edc14aSMd Fahad Iqbal Polash enum ice_status status; 4989b1edc14aSMd Fahad Iqbal Polash struct ice_sw *pf_sw; 4990b1edc14aSMd Fahad Iqbal Polash int rem, v, err = 0; 4991b1edc14aSMd Fahad Iqbal Polash 4992b1edc14aSMd Fahad Iqbal Polash pf_sw = pf->first_sw; 4993b1edc14aSMd Fahad Iqbal Polash /* find the attribute in the netlink message */ 4994b1edc14aSMd Fahad Iqbal Polash br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4995b1edc14aSMd Fahad Iqbal Polash 4996b1edc14aSMd Fahad Iqbal Polash nla_for_each_nested(attr, br_spec, rem) { 4997b1edc14aSMd Fahad Iqbal Polash __u16 mode; 4998b1edc14aSMd Fahad Iqbal Polash 4999b1edc14aSMd Fahad Iqbal Polash if (nla_type(attr) != IFLA_BRIDGE_MODE) 5000b1edc14aSMd Fahad Iqbal Polash continue; 5001b1edc14aSMd Fahad Iqbal Polash mode = nla_get_u16(attr); 5002b1edc14aSMd Fahad Iqbal Polash if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 5003b1edc14aSMd Fahad Iqbal Polash return -EINVAL; 5004b1edc14aSMd Fahad Iqbal Polash /* Continue if bridge mode is not being flipped */ 5005b1edc14aSMd Fahad Iqbal Polash if (mode == pf_sw->bridge_mode) 5006b1edc14aSMd Fahad Iqbal Polash continue; 5007b1edc14aSMd Fahad Iqbal Polash /* Iterates through the PF VSI list and update the loopback 5008b1edc14aSMd Fahad Iqbal Polash * mode of the VSI 5009b1edc14aSMd Fahad Iqbal Polash */ 5010b1edc14aSMd Fahad Iqbal Polash ice_for_each_vsi(pf, v) { 5011b1edc14aSMd Fahad Iqbal Polash if (!pf->vsi[v]) 5012b1edc14aSMd Fahad Iqbal Polash continue; 5013b1edc14aSMd Fahad Iqbal Polash err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 5014b1edc14aSMd Fahad Iqbal Polash if (err) 5015b1edc14aSMd Fahad Iqbal Polash return err; 5016b1edc14aSMd Fahad Iqbal Polash } 5017b1edc14aSMd Fahad Iqbal Polash 5018b1edc14aSMd Fahad Iqbal Polash hw->evb_veb = (mode == BRIDGE_MODE_VEB); 5019b1edc14aSMd Fahad Iqbal Polash /* Update the unicast switch filter rules for the corresponding 5020b1edc14aSMd Fahad Iqbal Polash * switch of the netdev 5021b1edc14aSMd Fahad Iqbal Polash */ 5022b1edc14aSMd Fahad Iqbal Polash status = ice_update_sw_rule_bridge_mode(hw); 5023b1edc14aSMd Fahad Iqbal Polash if (status) { 5024df17b7e0SAnirudh Venkataramanan netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", 5025b1edc14aSMd Fahad Iqbal Polash mode, status, hw->adminq.sq_last_status); 5026b1edc14aSMd Fahad Iqbal Polash /* revert hw->evb_veb */ 5027b1edc14aSMd Fahad Iqbal Polash hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 5028b1edc14aSMd Fahad Iqbal Polash return -EIO; 5029b1edc14aSMd Fahad Iqbal Polash } 5030b1edc14aSMd Fahad Iqbal Polash 5031b1edc14aSMd Fahad Iqbal Polash pf_sw->bridge_mode = mode; 5032b1edc14aSMd Fahad Iqbal Polash } 5033b1edc14aSMd Fahad Iqbal Polash 5034b1edc14aSMd Fahad Iqbal Polash return 0; 5035b1edc14aSMd Fahad Iqbal Polash } 5036b1edc14aSMd Fahad Iqbal Polash 5037b1edc14aSMd Fahad Iqbal Polash /** 5038b3969fd7SSudheer Mogilappagari * ice_tx_timeout - Respond to a Tx Hang 5039b3969fd7SSudheer Mogilappagari * @netdev: network interface device structure 5040b3969fd7SSudheer Mogilappagari */ 50410290bd29SMichael S. Tsirkin static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 5042b3969fd7SSudheer Mogilappagari { 5043b3969fd7SSudheer Mogilappagari struct ice_netdev_priv *np = netdev_priv(netdev); 5044b3969fd7SSudheer Mogilappagari struct ice_ring *tx_ring = NULL; 5045b3969fd7SSudheer Mogilappagari struct ice_vsi *vsi = np->vsi; 5046b3969fd7SSudheer Mogilappagari struct ice_pf *pf = vsi->back; 5047807bc98dSBrett Creeley u32 i; 5048b3969fd7SSudheer Mogilappagari 5049b3969fd7SSudheer Mogilappagari pf->tx_timeout_count++; 5050b3969fd7SSudheer Mogilappagari 5051b3969fd7SSudheer Mogilappagari /* now that we have an index, find the tx_ring struct */ 5052bc0c6fabSBruce Allan for (i = 0; i < vsi->num_txq; i++) 5053bc0c6fabSBruce Allan if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 5054ed5a3f66SJulio Faracco if (txqueue == vsi->tx_rings[i]->q_index) { 5055b3969fd7SSudheer Mogilappagari tx_ring = vsi->tx_rings[i]; 5056b3969fd7SSudheer Mogilappagari break; 5057b3969fd7SSudheer Mogilappagari } 5058b3969fd7SSudheer Mogilappagari 5059b3969fd7SSudheer Mogilappagari /* Reset recovery level if enough time has elapsed after last timeout. 5060b3969fd7SSudheer Mogilappagari * Also ensure no new reset action happens before next timeout period. 5061b3969fd7SSudheer Mogilappagari */ 5062b3969fd7SSudheer Mogilappagari if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 5063b3969fd7SSudheer Mogilappagari pf->tx_timeout_recovery_level = 1; 5064b3969fd7SSudheer Mogilappagari else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 5065b3969fd7SSudheer Mogilappagari netdev->watchdog_timeo))) 5066b3969fd7SSudheer Mogilappagari return; 5067b3969fd7SSudheer Mogilappagari 5068b3969fd7SSudheer Mogilappagari if (tx_ring) { 5069807bc98dSBrett Creeley struct ice_hw *hw = &pf->hw; 5070807bc98dSBrett Creeley u32 head, val = 0; 5071807bc98dSBrett Creeley 5072ed5a3f66SJulio Faracco head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 5073807bc98dSBrett Creeley QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 5074b3969fd7SSudheer Mogilappagari /* Read interrupt register */ 5075ba880734SBrett Creeley val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 5076b3969fd7SSudheer Mogilappagari 5077807bc98dSBrett Creeley netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 5078ed5a3f66SJulio Faracco vsi->vsi_num, txqueue, tx_ring->next_to_clean, 5079807bc98dSBrett Creeley head, tx_ring->next_to_use, val); 5080b3969fd7SSudheer Mogilappagari } 5081b3969fd7SSudheer Mogilappagari 5082b3969fd7SSudheer Mogilappagari pf->tx_timeout_last_recovery = jiffies; 5083ed5a3f66SJulio Faracco netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", 5084ed5a3f66SJulio Faracco pf->tx_timeout_recovery_level, txqueue); 5085b3969fd7SSudheer Mogilappagari 5086b3969fd7SSudheer Mogilappagari switch (pf->tx_timeout_recovery_level) { 5087b3969fd7SSudheer Mogilappagari case 1: 5088b3969fd7SSudheer Mogilappagari set_bit(__ICE_PFR_REQ, pf->state); 5089b3969fd7SSudheer Mogilappagari break; 5090b3969fd7SSudheer Mogilappagari case 2: 5091b3969fd7SSudheer Mogilappagari set_bit(__ICE_CORER_REQ, pf->state); 5092b3969fd7SSudheer Mogilappagari break; 5093b3969fd7SSudheer Mogilappagari case 3: 5094b3969fd7SSudheer Mogilappagari set_bit(__ICE_GLOBR_REQ, pf->state); 5095b3969fd7SSudheer Mogilappagari break; 5096b3969fd7SSudheer Mogilappagari default: 5097b3969fd7SSudheer Mogilappagari netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 5098b3969fd7SSudheer Mogilappagari set_bit(__ICE_DOWN, pf->state); 5099b3969fd7SSudheer Mogilappagari set_bit(__ICE_NEEDS_RESTART, vsi->state); 51008d81fa55SAkeem G Abodunrin set_bit(__ICE_SERVICE_DIS, pf->state); 5101b3969fd7SSudheer Mogilappagari break; 5102b3969fd7SSudheer Mogilappagari } 5103b3969fd7SSudheer Mogilappagari 5104b3969fd7SSudheer Mogilappagari ice_service_task_schedule(pf); 5105b3969fd7SSudheer Mogilappagari pf->tx_timeout_recovery_level++; 5106b3969fd7SSudheer Mogilappagari } 5107b3969fd7SSudheer Mogilappagari 5108b3969fd7SSudheer Mogilappagari /** 5109cdedef59SAnirudh Venkataramanan * ice_open - Called when a network interface becomes active 5110cdedef59SAnirudh Venkataramanan * @netdev: network interface device structure 5111cdedef59SAnirudh Venkataramanan * 5112cdedef59SAnirudh Venkataramanan * The open entry point is called when a network interface is made 5113cdedef59SAnirudh Venkataramanan * active by the system (IFF_UP). At this point all resources needed 5114cdedef59SAnirudh Venkataramanan * for transmit and receive operations are allocated, the interrupt 5115cdedef59SAnirudh Venkataramanan * handler is registered with the OS, the netdev watchdog is enabled, 5116cdedef59SAnirudh Venkataramanan * and the stack is notified that the interface is ready. 5117cdedef59SAnirudh Venkataramanan * 5118cdedef59SAnirudh Venkataramanan * Returns 0 on success, negative value on failure 5119cdedef59SAnirudh Venkataramanan */ 51200e674aebSAnirudh Venkataramanan int ice_open(struct net_device *netdev) 5121cdedef59SAnirudh Venkataramanan { 5122cdedef59SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 5123cdedef59SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 51246d599946STony Nguyen struct ice_port_info *pi; 5125cdedef59SAnirudh Venkataramanan int err; 5126cdedef59SAnirudh Venkataramanan 51270f9d5027SAnirudh Venkataramanan if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { 51280f9d5027SAnirudh Venkataramanan netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 51290f9d5027SAnirudh Venkataramanan return -EIO; 51300f9d5027SAnirudh Venkataramanan } 51310f9d5027SAnirudh Venkataramanan 5132cdedef59SAnirudh Venkataramanan netif_carrier_off(netdev); 5133cdedef59SAnirudh Venkataramanan 51346d599946STony Nguyen pi = vsi->port_info; 51356d599946STony Nguyen err = ice_update_link_info(pi); 51366d599946STony Nguyen if (err) { 51376d599946STony Nguyen netdev_err(netdev, "Failed to get link info, error %d\n", 51386d599946STony Nguyen err); 51396d599946STony Nguyen return err; 51406d599946STony Nguyen } 51416d599946STony Nguyen 51426d599946STony Nguyen /* Set PHY if there is media, otherwise, turn off PHY */ 51436d599946STony Nguyen if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 5144b6f934f0SBrett Creeley err = ice_force_phys_link_state(vsi, true); 5145b6f934f0SBrett Creeley if (err) { 514619cce2c6SAnirudh Venkataramanan netdev_err(netdev, "Failed to set physical link up, error %d\n", 51476d599946STony Nguyen err); 5148b6f934f0SBrett Creeley return err; 5149b6f934f0SBrett Creeley } 51506d599946STony Nguyen } else { 51516d599946STony Nguyen err = ice_aq_set_link_restart_an(pi, false, NULL); 51526d599946STony Nguyen if (err) { 51536d599946STony Nguyen netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", 51546d599946STony Nguyen vsi->vsi_num, err); 51556d599946STony Nguyen return err; 51566d599946STony Nguyen } 51576d599946STony Nguyen set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); 51586d599946STony Nguyen } 5159cdedef59SAnirudh Venkataramanan 5160b6f934f0SBrett Creeley err = ice_vsi_open(vsi); 5161cdedef59SAnirudh Venkataramanan if (err) 5162cdedef59SAnirudh Venkataramanan netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 5163cdedef59SAnirudh Venkataramanan vsi->vsi_num, vsi->vsw->sw_id); 5164cdedef59SAnirudh Venkataramanan return err; 5165cdedef59SAnirudh Venkataramanan } 5166cdedef59SAnirudh Venkataramanan 5167cdedef59SAnirudh Venkataramanan /** 5168cdedef59SAnirudh Venkataramanan * ice_stop - Disables a network interface 5169cdedef59SAnirudh Venkataramanan * @netdev: network interface device structure 5170cdedef59SAnirudh Venkataramanan * 5171cdedef59SAnirudh Venkataramanan * The stop entry point is called when an interface is de-activated by the OS, 5172cdedef59SAnirudh Venkataramanan * and the netdevice enters the DOWN state. The hardware is still under the 5173cdedef59SAnirudh Venkataramanan * driver's control, but the netdev interface is disabled. 5174cdedef59SAnirudh Venkataramanan * 5175cdedef59SAnirudh Venkataramanan * Returns success only - not allowed to fail 5176cdedef59SAnirudh Venkataramanan */ 51770e674aebSAnirudh Venkataramanan int ice_stop(struct net_device *netdev) 5178cdedef59SAnirudh Venkataramanan { 5179cdedef59SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 5180cdedef59SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 5181cdedef59SAnirudh Venkataramanan 5182cdedef59SAnirudh Venkataramanan ice_vsi_close(vsi); 5183cdedef59SAnirudh Venkataramanan 5184cdedef59SAnirudh Venkataramanan return 0; 5185cdedef59SAnirudh Venkataramanan } 5186cdedef59SAnirudh Venkataramanan 5187e94d4478SAnirudh Venkataramanan /** 5188e94d4478SAnirudh Venkataramanan * ice_features_check - Validate encapsulated packet conforms to limits 5189e94d4478SAnirudh Venkataramanan * @skb: skb buffer 5190e94d4478SAnirudh Venkataramanan * @netdev: This port's netdev 5191e94d4478SAnirudh Venkataramanan * @features: Offload features that the stack believes apply 5192e94d4478SAnirudh Venkataramanan */ 5193e94d4478SAnirudh Venkataramanan static netdev_features_t 5194e94d4478SAnirudh Venkataramanan ice_features_check(struct sk_buff *skb, 5195e94d4478SAnirudh Venkataramanan struct net_device __always_unused *netdev, 5196e94d4478SAnirudh Venkataramanan netdev_features_t features) 5197e94d4478SAnirudh Venkataramanan { 5198e94d4478SAnirudh Venkataramanan size_t len; 5199e94d4478SAnirudh Venkataramanan 5200e94d4478SAnirudh Venkataramanan /* No point in doing any of this if neither checksum nor GSO are 5201e94d4478SAnirudh Venkataramanan * being requested for this frame. We can rule out both by just 5202e94d4478SAnirudh Venkataramanan * checking for CHECKSUM_PARTIAL 5203e94d4478SAnirudh Venkataramanan */ 5204e94d4478SAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 5205e94d4478SAnirudh Venkataramanan return features; 5206e94d4478SAnirudh Venkataramanan 5207e94d4478SAnirudh Venkataramanan /* We cannot support GSO if the MSS is going to be less than 5208e94d4478SAnirudh Venkataramanan * 64 bytes. If it is then we need to drop support for GSO. 5209e94d4478SAnirudh Venkataramanan */ 5210e94d4478SAnirudh Venkataramanan if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 5211e94d4478SAnirudh Venkataramanan features &= ~NETIF_F_GSO_MASK; 5212e94d4478SAnirudh Venkataramanan 5213e94d4478SAnirudh Venkataramanan len = skb_network_header(skb) - skb->data; 5214e94d4478SAnirudh Venkataramanan if (len & ~(ICE_TXD_MACLEN_MAX)) 5215e94d4478SAnirudh Venkataramanan goto out_rm_features; 5216e94d4478SAnirudh Venkataramanan 5217e94d4478SAnirudh Venkataramanan len = skb_transport_header(skb) - skb_network_header(skb); 5218e94d4478SAnirudh Venkataramanan if (len & ~(ICE_TXD_IPLEN_MAX)) 5219e94d4478SAnirudh Venkataramanan goto out_rm_features; 5220e94d4478SAnirudh Venkataramanan 5221e94d4478SAnirudh Venkataramanan if (skb->encapsulation) { 5222e94d4478SAnirudh Venkataramanan len = skb_inner_network_header(skb) - skb_transport_header(skb); 5223e94d4478SAnirudh Venkataramanan if (len & ~(ICE_TXD_L4LEN_MAX)) 5224e94d4478SAnirudh Venkataramanan goto out_rm_features; 5225e94d4478SAnirudh Venkataramanan 5226e94d4478SAnirudh Venkataramanan len = skb_inner_transport_header(skb) - 5227e94d4478SAnirudh Venkataramanan skb_inner_network_header(skb); 5228e94d4478SAnirudh Venkataramanan if (len & ~(ICE_TXD_IPLEN_MAX)) 5229e94d4478SAnirudh Venkataramanan goto out_rm_features; 5230e94d4478SAnirudh Venkataramanan } 5231e94d4478SAnirudh Venkataramanan 5232e94d4478SAnirudh Venkataramanan return features; 5233e94d4478SAnirudh Venkataramanan out_rm_features: 5234e94d4478SAnirudh Venkataramanan return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 5235e94d4478SAnirudh Venkataramanan } 5236e94d4478SAnirudh Venkataramanan 5237462acf6aSTony Nguyen static const struct net_device_ops ice_netdev_safe_mode_ops = { 5238462acf6aSTony Nguyen .ndo_open = ice_open, 5239462acf6aSTony Nguyen .ndo_stop = ice_stop, 5240462acf6aSTony Nguyen .ndo_start_xmit = ice_start_xmit, 5241462acf6aSTony Nguyen .ndo_set_mac_address = ice_set_mac_address, 5242462acf6aSTony Nguyen .ndo_validate_addr = eth_validate_addr, 5243462acf6aSTony Nguyen .ndo_change_mtu = ice_change_mtu, 5244462acf6aSTony Nguyen .ndo_get_stats64 = ice_get_stats64, 5245462acf6aSTony Nguyen .ndo_tx_timeout = ice_tx_timeout, 5246462acf6aSTony Nguyen }; 5247462acf6aSTony Nguyen 5248cdedef59SAnirudh Venkataramanan static const struct net_device_ops ice_netdev_ops = { 5249cdedef59SAnirudh Venkataramanan .ndo_open = ice_open, 5250cdedef59SAnirudh Venkataramanan .ndo_stop = ice_stop, 52512b245cb2SAnirudh Venkataramanan .ndo_start_xmit = ice_start_xmit, 5252e94d4478SAnirudh Venkataramanan .ndo_features_check = ice_features_check, 5253e94d4478SAnirudh Venkataramanan .ndo_set_rx_mode = ice_set_rx_mode, 5254e94d4478SAnirudh Venkataramanan .ndo_set_mac_address = ice_set_mac_address, 5255e94d4478SAnirudh Venkataramanan .ndo_validate_addr = eth_validate_addr, 5256e94d4478SAnirudh Venkataramanan .ndo_change_mtu = ice_change_mtu, 5257fcea6f3dSAnirudh Venkataramanan .ndo_get_stats64 = ice_get_stats64, 52581ddef455SUsha Ketineni .ndo_set_tx_maxrate = ice_set_tx_maxrate, 52597c710869SAnirudh Venkataramanan .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 52607c710869SAnirudh Venkataramanan .ndo_set_vf_mac = ice_set_vf_mac, 52617c710869SAnirudh Venkataramanan .ndo_get_vf_config = ice_get_vf_cfg, 52627c710869SAnirudh Venkataramanan .ndo_set_vf_trust = ice_set_vf_trust, 52637c710869SAnirudh Venkataramanan .ndo_set_vf_vlan = ice_set_vf_port_vlan, 52647c710869SAnirudh Venkataramanan .ndo_set_vf_link_state = ice_set_vf_link_state, 5265730fdea4SJesse Brandeburg .ndo_get_vf_stats = ice_get_vf_stats, 5266d76a60baSAnirudh Venkataramanan .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 5267d76a60baSAnirudh Venkataramanan .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 5268d76a60baSAnirudh Venkataramanan .ndo_set_features = ice_set_features, 5269b1edc14aSMd Fahad Iqbal Polash .ndo_bridge_getlink = ice_bridge_getlink, 5270b1edc14aSMd Fahad Iqbal Polash .ndo_bridge_setlink = ice_bridge_setlink, 5271e94d4478SAnirudh Venkataramanan .ndo_fdb_add = ice_fdb_add, 5272e94d4478SAnirudh Venkataramanan .ndo_fdb_del = ice_fdb_del, 5273b3969fd7SSudheer Mogilappagari .ndo_tx_timeout = ice_tx_timeout, 5274efc2214bSMaciej Fijalkowski .ndo_bpf = ice_xdp, 5275efc2214bSMaciej Fijalkowski .ndo_xdp_xmit = ice_xdp_xmit, 52762d4238f5SKrzysztof Kazimierczak .ndo_xsk_wakeup = ice_xsk_wakeup, 5277cdedef59SAnirudh Venkataramanan }; 5278